From 87cfd481f31de5463181a19a0f2c117fc469b00e Mon Sep 17 00:00:00 2001 From: ci-penbot-01 Date: Fri, 1 Aug 2025 22:52:00 +0530 Subject: [PATCH 01/21] [GPUOP] Adding docs for DCM Systemd integration (#851) (#852) (cherry picked from commit 5389d38c6a9efbb7680ac74c28a874aef221728a) Co-authored-by: nikhilsk <47417007+nikhilsk@users.noreply.github.com> Signed-off-by: yansun1996 --- docs/dcm/applying-partition-profiles.rst | 3 + docs/dcm/device-config-manager-configmap.md | 7 ++ docs/dcm/systemd_integration.md | 98 +++++++++++++++++++++ docs/sphinx/_toc.yml | 1 + docs/sphinx/_toc.yml.in | 1 + example/configManager/config.json | 3 + 6 files changed, 113 insertions(+) create mode 100644 docs/dcm/systemd_integration.md diff --git a/docs/dcm/applying-partition-profiles.rst b/docs/dcm/applying-partition-profiles.rst index dbaf3e19..ab89d547 100644 --- a/docs/dcm/applying-partition-profiles.rst +++ b/docs/dcm/applying-partition-profiles.rst @@ -102,6 +102,9 @@ Below is an example of how to create the `config-manager-config.yaml` file that } ] } + }, + "gpuClientSystemdServices": { + "names": ["amd-metrics-exporter", "gpuagent"] } } EOF diff --git a/docs/dcm/device-config-manager-configmap.md b/docs/dcm/device-config-manager-configmap.md index e831acdd..55013f66 100644 --- a/docs/dcm/device-config-manager-configmap.md +++ b/docs/dcm/device-config-manager-configmap.md @@ -43,6 +43,9 @@ data: } ] } + }, + "gpuClientSystemdServices": { + "names": ["amd-metrics-exporter", "gpuagent"] } } ``` @@ -57,6 +60,7 @@ Below is an explanation of each field in the ConfigMap: | `computePartition` | Compute partition type | | `memoryPartition` | Memory partition type | | `numGPUsAssigned` | Number of GPUs to be partitioned on the node | +| `gpuClientSystemdServices` | Defines a list of systemd service unit files to be stopped/restarted on the node | ```{note} Users can create a heterogeneous partitioning config profile by specifying more than one `computePartition` scheme in the `profiles` array, however this is not a recommmended or supported configuration by AMD. Note that NPS4 memory partition mode does not work with heterogenous parition schemes and only supports CPX on MI300X systems. @@ -90,6 +94,9 @@ Users can create a heterogeneous partitioning config profile by specifying more } ] } + }, + "gpuClientSystemdServices": { + "names": ["amd-metrics-exporter", "gpuagent"] } } diff --git a/docs/dcm/systemd_integration.md b/docs/dcm/systemd_integration.md new file mode 100644 index 00000000..cdb830b0 --- /dev/null +++ b/docs/dcm/systemd_integration.md @@ -0,0 +1,98 @@ +# DCM Systemd Integration + +## Background + +The Device Config Manager (DCM) orchestrates hardware-level tasks such as GPU partitioning. Before initiating partitioning, it gracefully stops specific systemd services defined in a configmap to prevent any processes (gpuagent, etc) from partition interference and ensure consistent device states + +## K8S ConfigMap enhancement + +The configmap contains a key "gpuClientSystemdServices" which declares the list of services to manage: + +```yaml +"gpuClientSystemdServices": { + "names": ["amd-metrics-exporter", "gpuagent"] +} +``` +- These are the unit names (without the. service suffix) of systemd services related to GPU runtime agents. We add the suffix as a part of the code +- Users can add/modify services to the above list + +## ConfigMap + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-manager-config + namespace: kube-amd-gpu +data: + config.json: | + { + "gpu-config-profiles": + { + "cpx-profile": + { + "skippedGPUs": { + "ids": [] + }, + "profiles": [ + { + "computePartition": "CPX", + "memoryPartition": "NPS4", + "numGPUsAssigned": 8 + } + ] + }, + "spx-profile": + { + "skippedGPUs": { + "ids": [] + }, + "profiles": [ + { + "computePartition": "SPX", + "memoryPartition": "NPS1", + "numGPUsAssigned": 8 + } + ] + } + }, + "gpuClientSystemdServices": { + "names": ["amd-metrics-exporter", "gpuagent"] + } + } +``` + +## Required Mounts for D-Bus & systemd Integration + +| **Mount Name** | **Mount Path** | **Purpose** | +|------------------------|------------------------|---------------------------------------------------------------------------| +| `etc-systemd` | `/etc/systemd` | Access unit files for service definitions | +| `run-systemd` | `/run/systemd` | Enables access to systemd runtime state | +| `usr-lib-systemd` | `/usr/lib/systemd` | Required for systemd libraries and binaries | +| `var-run-dbus` | `/var/run/dbus` | Allows DCM to communicate via system D-Bus (`system_bus_socket`) | + +## Workflow + +- DCM uses D-Bus APIs to query, stop, and restart systemd services programmatically, ensuring precise service orchestration. + +- Extract Service List: On startup, DCM parses the configmap and retrieves the names array under gpuClientSystemdServices. Each entry is appended with (. service) to form full unit names. + +- Capture Pre-State: + - For each service: + - It checks status using D-Bus via `org.freedesktop.systemd1.Manager.GetUnit.` + - Stores current state (e.g. `active`, `inactive`, `not-loaded`) in PreStateDB. + - This DB is used for restoring service state post-partitioning. + +- Stop Services: Services are stopped gracefully using D-Bus APIs. This ensures they release GPU resources and don't disrupt the partitioning operation. We check if the service is present before stopping it using the CheckUnitStatus API. + +- Perform Partitioning: Once services are stopped temporarily, DCM initiates the partitioning logic (using node labels/configmap profiles) and completes the partitioning workflow + +- Restart & Restore State After partitioning: + - DCM checks PreStateDB to determine which services were previously active. + - Only those Services are restarted accordingly using the D-Bus invocation APIs. + - Additionally, PreStateDB is cleared via a CleanupPreState() function to reset the tracker DB for the next run. + +# Conclusion + +- Avoids GPU contention during partitioning (device-busy errors aren’t seen during partition) +- Maintains service continuity with minimal downtime \ No newline at end of file diff --git a/docs/sphinx/_toc.yml b/docs/sphinx/_toc.yml index 9b355d73..92d2920d 100644 --- a/docs/sphinx/_toc.yml +++ b/docs/sphinx/_toc.yml @@ -58,6 +58,7 @@ subtrees: - file: dcm/device-config-manager - file: dcm/device-config-manager-configmap - file: dcm/applying-partition-profiles + - file: dcm/systemd_integration - caption: Specialized Networks entries: - file: specialized_networks/airgapped-install diff --git a/docs/sphinx/_toc.yml.in b/docs/sphinx/_toc.yml.in index 9b355d73..92d2920d 100644 --- a/docs/sphinx/_toc.yml.in +++ b/docs/sphinx/_toc.yml.in @@ -58,6 +58,7 @@ subtrees: - file: dcm/device-config-manager - file: dcm/device-config-manager-configmap - file: dcm/applying-partition-profiles + - file: dcm/systemd_integration - caption: Specialized Networks entries: - file: specialized_networks/airgapped-install diff --git a/example/configManager/config.json b/example/configManager/config.json index 6b0abaf0..817983df 100644 --- a/example/configManager/config.json +++ b/example/configManager/config.json @@ -27,5 +27,8 @@ } ] } + }, + "gpuClientSystemdServices": { + "names": ["amd-metrics-exporter", "gpuagent"] } } \ No newline at end of file From d68303f88c7f9413c4ffd7f9f221737527a4b426 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Thu, 17 Jul 2025 23:04:44 +0000 Subject: [PATCH 02/21] [Helm] Add Radeon Pro W7800 48GB PCI device ID into default NFD rule --- hack/k8s-patch/template-patch/nfd-default-rule.yaml | 5 +++++ helm-charts-k8s/templates/nfd-default-rule.yaml | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/hack/k8s-patch/template-patch/nfd-default-rule.yaml b/hack/k8s-patch/template-patch/nfd-default-rule.yaml index f93c0327..e18f5bba 100644 --- a/hack/k8s-patch/template-patch/nfd-default-rule.yaml +++ b/hack/k8s-patch/template-patch/nfd-default-rule.yaml @@ -147,6 +147,11 @@ spec: matchExpressions: vendor: {op: In, value: ["1002"]} device: {op: In, value: ["744a"]} # W7900 Dual Slot + - matchFeatures: + - feature: pci.device + matchExpressions: + vendor: {op: In, value: ["1002"]} + device: {op: In, value: ["7449"]} # W7800 48GB - matchFeatures: - feature: pci.device matchExpressions: diff --git a/helm-charts-k8s/templates/nfd-default-rule.yaml b/helm-charts-k8s/templates/nfd-default-rule.yaml index f93c0327..e18f5bba 100644 --- a/helm-charts-k8s/templates/nfd-default-rule.yaml +++ b/helm-charts-k8s/templates/nfd-default-rule.yaml @@ -147,6 +147,11 @@ spec: matchExpressions: vendor: {op: In, value: ["1002"]} device: {op: In, value: ["744a"]} # W7900 Dual Slot + - matchFeatures: + - feature: pci.device + matchExpressions: + vendor: {op: In, value: ["1002"]} + device: {op: In, value: ["7449"]} # W7800 48GB - matchFeatures: - feature: pci.device matchExpressions: From 3e7b591d55f9ccdd71e83026d5e2cfd53765a8de Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Tue, 12 Aug 2025 21:30:30 +0000 Subject: [PATCH 03/21] [DOC] Fix invalid YAML format of amd-smi example pod --- docs/usage.rst | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/usage.rst b/docs/usage.rst index c186fe12..79866a2f 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -181,19 +181,19 @@ To run ``amd-smi`` in a pod: apiVersion: v1 kind: Pod metadata: - name: amd-smi - spec: - containers: - - image: docker.io/rocm/rocm-terminal:latest name: amd-smi - command: ["/bin/bash"] - args: ["-c","amd-smi version && amd-smi monitor -ptum"] - resources: - limits: - amd.com/gpu: 1 - requests: - amd.com/gpu: 1 - restartPolicy: Never + spec: + containers: + - image: docker.io/rocm/rocm-terminal:latest + name: amd-smi + command: ["/bin/bash"] + args: ["-c","amd-smi version && amd-smi monitor -ptum"] + resources: + limits: + amd.com/gpu: 1 + requests: + amd.com/gpu: 1 + restartPolicy: Never - Create the pod: From a00655f8a0d4e8cef109f3a84adc68022ec88b9d Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Fri, 2 May 2025 10:38:33 +0000 Subject: [PATCH 04/21] Automate GIM driver installation on vgpu Host --- api/v1alpha1/deviceconfig_types.go | 18 +++++++ api/v1alpha1/zz_generated.deepcopy.go | 16 ++++++ ...md-gpu-operator.clusterserviceversion.yaml | 16 ++++++ bundle/manifests/amd.com_deviceconfigs.yaml | 19 +++++++ config/crd/bases/amd.com_deviceconfigs.yaml | 19 +++++++ ...md-gpu-operator.clusterserviceversion.yaml | 16 ++++++ .../template-patch/nfd-default-rule.yaml | 20 ++++++- helm-charts-k8s/crds/deviceconfig-crd.yaml | 19 +++++++ .../templates/nfd-default-rule.yaml | 20 ++++++- .../crds/deviceconfig-crd.yaml | 19 +++++++ .../kmmmodule/dockerfiles/vGPUHostGIM.ubuntu | 34 ++++++++++++ internal/kmmmodule/kmmmodule.go | 54 ++++++++++++++----- internal/metricsexporter/metricsexporter.go | 14 +++-- internal/nodelabeller/nodelabeller.go | 36 +++++++++---- internal/testrunner/testrunner.go | 13 ++++- internal/utils.go | 19 +++++++ 16 files changed, 320 insertions(+), 32 deletions(-) create mode 100644 internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu diff --git a/api/v1alpha1/deviceconfig_types.go b/api/v1alpha1/deviceconfig_types.go index 6b99f789..bbfd352a 100644 --- a/api/v1alpha1/deviceconfig_types.go +++ b/api/v1alpha1/deviceconfig_types.go @@ -95,6 +95,16 @@ type DriverSpec struct { // +kubebuilder:default=true Enable *bool `json:"enable,omitempty"` + // specify the type of driver (container/vf-passthrough) to install on the worker node. default value is gpu. + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="DriverType",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:driverType"} + // +kubebuilder:validation:Enum=container;vf-passthrough + // +kubebuilder:default=container + DriverType string `json:"driverType,omitempty"` + + // vf-passthrough host driver specific configs. Only applies when the driverType is vf-passthrough + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="VFPassthrough",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:vfPassthrough"} + VFPassthrough VFPassthroughSpec `json:"vfPassthrough,omitempty"` + // blacklist amdgpu drivers on the host. Node reboot is required to apply the baclklist on the worker nodes. // Not working for OpenShift cluster. OpenShift users please use the Machine Config Operator (MCO) resource to configure amdgpu blacklist. // Example MCO resource is available at https://instinct.docs.amd.com/projects/gpu-operator/en/latest/installation/openshift-olm.html#create-blacklist-for-installing-out-of-tree-kernel-module @@ -709,6 +719,14 @@ type CommonConfigSpec struct { UtilsContainer UtilsContainerSpec `json:"utilsContainer,omitempty"` } +// VFPassthroughSpec vf-passthrough host driver specific configs +type VFPassthroughSpec struct { + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="GPUModel",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:gpuModel"} + // +kubebuilder:validation:Enum=mi210;mi300x + // +kubebuilder:default=mi300x + GPUModel string `json:"gpuModel,omitempty"` +} + // DeploymentStatus contains the status for a daemonset deployed during // reconciliation loop type DeploymentStatus struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index e55c07f4..2158b917 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -342,6 +342,7 @@ func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { *out = new(bool) **out = **in } + out.VFPassthrough = in.VFPassthrough if in.Blacklist != nil { in, out := &in.Blacklist, &out.Blacklist *out = new(bool) @@ -818,3 +819,18 @@ func (in *UtilsContainerSpec) DeepCopy() *UtilsContainerSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VFPassthroughSpec) DeepCopyInto(out *VFPassthroughSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VFPassthroughSpec. +func (in *VFPassthroughSpec) DeepCopy() *VFPassthroughSpec { + if in == nil { + return nil + } + out := new(VFPassthroughSpec) + in.DeepCopyInto(out) + return out +} diff --git a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml index 761fa2e2..5a439882 100644 --- a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml +++ b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml @@ -265,6 +265,12 @@ spec: path: driver.blacklist x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:blacklistDrivers + - description: specify the type of driver (container/vf-passthrough) to install + on the worker node. default value is gpu. + displayName: DriverType + path: driver.driverType + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:driverType - description: enable driver install. default value is true. disable is for skipping driver install/uninstall for dryrun or using in-tree amdgpu kernel module @@ -399,6 +405,16 @@ spec: path: driver.version x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:version + - description: vf-passthrough host driver specific configs. Only applies when + the driverType is vf-passthrough + displayName: VFPassthrough + path: driver.vfPassthrough + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:vfPassthrough + - displayName: GPUModel + path: driver.vfPassthrough.gpuModel + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:gpuModel - description: metrics exporter displayName: MetricsExporter path: metricsExporter diff --git a/bundle/manifests/amd.com_deviceconfigs.yaml b/bundle/manifests/amd.com_deviceconfigs.yaml index a7849223..acd45a0d 100644 --- a/bundle/manifests/amd.com_deviceconfigs.yaml +++ b/bundle/manifests/amd.com_deviceconfigs.yaml @@ -368,6 +368,14 @@ spec: Not working for OpenShift cluster. OpenShift users please use the Machine Config Operator (MCO) resource to configure amdgpu blacklist. Example MCO resource is available at https://instinct.docs.amd.com/projects/gpu-operator/en/latest/installation/openshift-olm.html#create-blacklist-for-installing-out-of-tree-kernel-module type: boolean + driverType: + default: container + description: specify the type of driver (container/vf-passthrough) + to install on the worker node. default value is gpu. + enum: + - container + - vf-passthrough + type: string enable: default: true description: |- @@ -591,6 +599,17 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string + vfPassthrough: + description: vf-passthrough host driver specific configs. Only + applies when the driverType is vf-passthrough + properties: + gpuModel: + default: mi300x + enum: + - mi210 + - mi300x + type: string + type: object type: object metricsExporter: description: metrics exporter diff --git a/config/crd/bases/amd.com_deviceconfigs.yaml b/config/crd/bases/amd.com_deviceconfigs.yaml index 40c42d9f..fd37f0c6 100644 --- a/config/crd/bases/amd.com_deviceconfigs.yaml +++ b/config/crd/bases/amd.com_deviceconfigs.yaml @@ -364,6 +364,14 @@ spec: Not working for OpenShift cluster. OpenShift users please use the Machine Config Operator (MCO) resource to configure amdgpu blacklist. Example MCO resource is available at https://instinct.docs.amd.com/projects/gpu-operator/en/latest/installation/openshift-olm.html#create-blacklist-for-installing-out-of-tree-kernel-module type: boolean + driverType: + default: container + description: specify the type of driver (container/vf-passthrough) + to install on the worker node. default value is gpu. + enum: + - container + - vf-passthrough + type: string enable: default: true description: |- @@ -587,6 +595,17 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string + vfPassthrough: + description: vf-passthrough host driver specific configs. Only + applies when the driverType is vf-passthrough + properties: + gpuModel: + default: mi300x + enum: + - mi210 + - mi300x + type: string + type: object type: object metricsExporter: description: metrics exporter diff --git a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml index 4c2bbf60..522ef979 100644 --- a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml @@ -236,6 +236,12 @@ spec: path: driver.blacklist x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:blacklistDrivers + - description: specify the type of driver (container/vf-passthrough) to install + on the worker node. default value is gpu. + displayName: DriverType + path: driver.driverType + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:driverType - description: enable driver install. default value is true. disable is for skipping driver install/uninstall for dryrun or using in-tree amdgpu kernel module @@ -370,6 +376,16 @@ spec: path: driver.version x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:version + - description: vf-passthrough host driver specific configs. Only applies when + the driverType is vf-passthrough + displayName: VFPassthrough + path: driver.vfPassthrough + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:vfPassthrough + - displayName: GPUModel + path: driver.vfPassthrough.gpuModel + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:gpuModel - description: metrics exporter displayName: MetricsExporter path: metricsExporter diff --git a/hack/k8s-patch/template-patch/nfd-default-rule.yaml b/hack/k8s-patch/template-patch/nfd-default-rule.yaml index e18f5bba..f8bf3e70 100644 --- a/hack/k8s-patch/template-patch/nfd-default-rule.yaml +++ b/hack/k8s-patch/template-patch/nfd-default-rule.yaml @@ -198,4 +198,22 @@ spec: matchExpressions: vendor: {op: In, value: ["1002"]} device: {op: In, value: ["73bf"]} # RX 6800 / 6800 XT / 6900 XT -{{- end }} \ No newline at end of file + - name: amd-gpu-mi210 + labels: + feature.node.kubernetes.io/amd-gpu-mi210: "true" + matchAny: + - matchFeatures: + - feature: pci.device + matchExpressions: + vendor: {op: In, value: ["1002"]} + device: {op: In, value: ["740f"]} # MI210 + - name: amd-gpu-mi300x + labels: + feature.node.kubernetes.io/amd-gpu-mi300x: "true" + matchAny: + - matchFeatures: + - feature: pci.device + matchExpressions: + vendor: {op: In, value: ["1002"]} + device: {op: In, value: ["74a1"]} # MI300X +{{- end }} diff --git a/helm-charts-k8s/crds/deviceconfig-crd.yaml b/helm-charts-k8s/crds/deviceconfig-crd.yaml index 7bad5403..bd45a68f 100644 --- a/helm-charts-k8s/crds/deviceconfig-crd.yaml +++ b/helm-charts-k8s/crds/deviceconfig-crd.yaml @@ -372,6 +372,14 @@ spec: Not working for OpenShift cluster. OpenShift users please use the Machine Config Operator (MCO) resource to configure amdgpu blacklist. Example MCO resource is available at https://instinct.docs.amd.com/projects/gpu-operator/en/latest/installation/openshift-olm.html#create-blacklist-for-installing-out-of-tree-kernel-module type: boolean + driverType: + default: container + description: specify the type of driver (container/vf-passthrough) + to install on the worker node. default value is gpu. + enum: + - container + - vf-passthrough + type: string enable: default: true description: |- @@ -594,6 +602,17 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string + vfPassthrough: + description: vf-passthrough host driver specific configs. Only applies + when the driverType is vf-passthrough + properties: + gpuModel: + default: mi300x + enum: + - mi210 + - mi300x + type: string + type: object type: object metricsExporter: description: metrics exporter diff --git a/helm-charts-k8s/templates/nfd-default-rule.yaml b/helm-charts-k8s/templates/nfd-default-rule.yaml index e18f5bba..f8bf3e70 100644 --- a/helm-charts-k8s/templates/nfd-default-rule.yaml +++ b/helm-charts-k8s/templates/nfd-default-rule.yaml @@ -198,4 +198,22 @@ spec: matchExpressions: vendor: {op: In, value: ["1002"]} device: {op: In, value: ["73bf"]} # RX 6800 / 6800 XT / 6900 XT -{{- end }} \ No newline at end of file + - name: amd-gpu-mi210 + labels: + feature.node.kubernetes.io/amd-gpu-mi210: "true" + matchAny: + - matchFeatures: + - feature: pci.device + matchExpressions: + vendor: {op: In, value: ["1002"]} + device: {op: In, value: ["740f"]} # MI210 + - name: amd-gpu-mi300x + labels: + feature.node.kubernetes.io/amd-gpu-mi300x: "true" + matchAny: + - matchFeatures: + - feature: pci.device + matchExpressions: + vendor: {op: In, value: ["1002"]} + device: {op: In, value: ["74a1"]} # MI300X +{{- end }} diff --git a/helm-charts-openshift/crds/deviceconfig-crd.yaml b/helm-charts-openshift/crds/deviceconfig-crd.yaml index 7bad5403..bd45a68f 100644 --- a/helm-charts-openshift/crds/deviceconfig-crd.yaml +++ b/helm-charts-openshift/crds/deviceconfig-crd.yaml @@ -372,6 +372,14 @@ spec: Not working for OpenShift cluster. OpenShift users please use the Machine Config Operator (MCO) resource to configure amdgpu blacklist. Example MCO resource is available at https://instinct.docs.amd.com/projects/gpu-operator/en/latest/installation/openshift-olm.html#create-blacklist-for-installing-out-of-tree-kernel-module type: boolean + driverType: + default: container + description: specify the type of driver (container/vf-passthrough) + to install on the worker node. default value is gpu. + enum: + - container + - vf-passthrough + type: string enable: default: true description: |- @@ -594,6 +602,17 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string + vfPassthrough: + description: vf-passthrough host driver specific configs. Only applies + when the driverType is vf-passthrough + properties: + gpuModel: + default: mi300x + enum: + - mi210 + - mi300x + type: string + type: object type: object metricsExporter: description: metrics exporter diff --git a/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu b/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu new file mode 100644 index 00000000..e933ca02 --- /dev/null +++ b/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu @@ -0,0 +1,34 @@ +FROM registry.test.pensando.io:5000/host_gim_driver_source:latest AS source +FROM ubuntu:$$VERSION AS builder + +ARG KERNEL_FULL_VERSION + +ARG DRIVERS_VERSION + +ARG REPO_URL + +RUN apt-get update && apt-get install -y automake \ + make \ + autoconf \ + dkms \ + bc \ + gcc-12 \ + dpkg-dev \ + initramfs-tools \ + linux-headers-${KERNEL_FULL_VERSION} && \ + update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 60 + +COPY --from=source /gim_drivers/* /gim_drivers/ + +RUN dpkg -i /gim_drivers/$$GPU_MODEL* + +RUN depmod ${KERNEL_FULL_VERSION} + +FROM ubuntu:$$VERSION + +ARG KERNEL_FULL_VERSION + +RUN apt-get update && apt-get install -y kmod +RUN mkdir -p /opt/lib/modules/${KERNEL_FULL_VERSION}/updates/dkms/ +COPY --from=builder /lib/modules/${KERNEL_FULL_VERSION}/updates/dkms/gim* /opt/lib/modules/${KERNEL_FULL_VERSION}/updates/dkms/ +COPY --from=builder /lib/modules/${KERNEL_FULL_VERSION}/modules.* /opt/lib/modules/${KERNEL_FULL_VERSION}/ \ No newline at end of file diff --git a/internal/kmmmodule/kmmmodule.go b/internal/kmmmodule/kmmmodule.go index 5d7c00c5..502ce6ab 100644 --- a/internal/kmmmodule/kmmmodule.go +++ b/internal/kmmmodule/kmmmodule.go @@ -43,14 +43,10 @@ import ( "strings" "github.com/go-logr/logr" - "github.com/rh-ecosystem-edge/kernel-module-management/pkg/labels" - appsv1 "k8s.io/api/apps/v1" - "k8s.io/utils/ptr" - - amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" - utils "github.com/ROCm/gpu-operator/internal" kmmv1beta1 "github.com/rh-ecosystem-edge/kernel-module-management/api/v1beta1" + "github.com/rh-ecosystem-edge/kernel-module-management/pkg/labels" "golang.org/x/exp/maps" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -58,9 +54,13 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" + + amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" ) const ( @@ -68,6 +68,7 @@ const ( kubeletDevicePluginsPath = "/var/lib/kubelet/device-plugins" nodeVarLibFirmwarePath = "/var/lib/firmware" gpuDriverModuleName = "amdgpu" + vGPUHostDriverModuleName = "gim" ttmModuleName = "amdttm" kclModuleName = "amdkcl" imageFirmwarePath = "firmwareDir/updates" @@ -91,6 +92,8 @@ var ( buildOcDockerfile string //go:embed devdockerfiles/devdockerfile.txt dockerfileDevTemplateUbuntu string + //go:embed dockerfiles/vGPUHostGIM.ubuntu + dockerfileTemplateUbuntuVGPUHost string ) //go:generate mockgen -source=kmmmodule.go -package=kmmmodule -destination=mock_kmmmodule.go KMMModuleAPI @@ -178,13 +181,18 @@ var driverLabels = map[string]string{ } func resolveDockerfile(cmName string, devConfig *amdv1alpha1.DeviceConfig) (string, error) { - splits := strings.SplitN(cmName, "-", 4) + splits := strings.SplitN(cmName, "-", -1) osDistro := splits[0] version := splits[1] var dockerfileTemplate string switch osDistro { case "ubuntu": dockerfileTemplate = dockerfileTemplateUbuntu + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + dockerfileTemplate = dockerfileTemplateUbuntuVGPUHost + dockerfileTemplate = strings.Replace(dockerfileTemplate, "$$GPU_MODEL", devConfig.Spec.Driver.VFPassthrough.GPUModel, -1) + } driverLabel, present := driverLabels[version] if !present { return "", fmt.Errorf("invalid ubuntu version, expected to be one of %v", maps.Keys(driverLabels)) @@ -292,6 +300,13 @@ func (km *kmmModule) SetDevicePluginAsDesired(ds *appsv1.DaemonSet, devConfig *a if devConfig.Spec.CommonConfig.InitContainerImage != "" { initContainerImage = devConfig.Spec.CommonConfig.InitContainerImage } + + initContainerCommand := "while [ ! -d /sys/class/kfd ] || [ ! -d /sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done" + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + initContainerCommand = "while [ ! -d /sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done" + } + ds.Spec = appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: matchLabels}, Template: v1.PodTemplateSpec{ @@ -303,7 +318,7 @@ func (km *kmmModule) SetDevicePluginAsDesired(ds *appsv1.DaemonSet, devConfig *a { Name: "driver-init", Image: initContainerImage, - Command: []string{"sh", "-c", "while [ ! -d /sys/class/kfd ] || [ ! -d /sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done"}, + Command: []string{"sh", "-c", initContainerCommand}, SecurityContext: &v1.SecurityContext{Privileged: ptr.To(true)}, VolumeMounts: []v1.VolumeMount{ { @@ -431,10 +446,18 @@ func setKMMModuleLoader(ctx context.Context, mod *kmmv1beta1.Module, devConfig * } } + firmwarePath := imageFirmwarePath + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + moduleName = vGPUHostDriverModuleName + modLoadingOrder = []string{} + firmwarePath = "" + } + mod.Spec.ModuleLoader.Container = kmmv1beta1.ModuleLoaderContainerSpec{ Modprobe: kmmv1beta1.ModprobeSpec{ ModuleName: moduleName, - FirmwarePath: imageFirmwarePath, + FirmwarePath: firmwarePath, Args: &kmmv1beta1.ModprobeArgs{}, Parameters: getModprobeParametersFromNodeInfo(nodes), ModulesLoadingOrder: modLoadingOrder, @@ -509,7 +532,7 @@ func getKM(devConfig *amdv1alpha1.DeviceConfig, node v1.Node, inTreeModuleToRemo if driversImage == "" { driversImage = defaultOcDriversImageTemplate } - driversImage = addNodeInfoSuffixToImageTag(driversImage, osName, driversVersion) + driversImage = addNodeInfoSuffixToImageTag(driversImage, osName, driversVersion, devConfig) } else { if driversVersion == "" { driversVersion, err = utils.GetDefaultDriversVersion(node) @@ -520,7 +543,7 @@ func getKM(devConfig *amdv1alpha1.DeviceConfig, node v1.Node, inTreeModuleToRemo if driversImage == "" { driversImage = defaultDriversImageTemplate } - driversImage = addNodeInfoSuffixToImageTag(driversImage, osName, driversVersion) + driversImage = addNodeInfoSuffixToImageTag(driversImage, osName, driversVersion, devConfig) } repoURL := defaultInstallerRepoURL @@ -577,7 +600,7 @@ func getKM(devConfig *amdv1alpha1.DeviceConfig, node v1.Node, inTreeModuleToRemo kmmBuild.BaseImageRegistryTLS.InsecureSkipTLSVerify = *devConfig.Spec.Driver.ImageBuild.BaseImageRegistryTLS.InsecureSkipTLSVerify } _, isCIEnvSet := os.LookupEnv("CI_ENV") - if isCIEnvSet { + if isCIEnvSet || devConfig.Spec.Driver.DriverType == utils.DriverTypeVFPassthrough { kmmBuild.BaseImageRegistryTLS.Insecure = true kmmBuild.BaseImageRegistryTLS.InsecureSkipTLSVerify = true } @@ -592,9 +615,12 @@ func getKM(devConfig *amdv1alpha1.DeviceConfig, node v1.Node, inTreeModuleToRemo }, driversVersion, nil } -func addNodeInfoSuffixToImageTag(imgStr string, osName, driversVersion string) string { +func addNodeInfoSuffixToImageTag(imgStr, osName, driversVersion string, devCfg *amdv1alpha1.DeviceConfig) string { + // if driver is vGPU host, different GPU model's driver image would be different + // need to add a suffix to distinguish them + gpuModelSuffix := utils.GetGPUModelSuffix(devCfg) // KMM will render and fulfill the value of ${KERNEL_FULL_VERSION} - tag := osName + "-${KERNEL_FULL_VERSION}-" + driversVersion + tag := osName + "-${KERNEL_FULL_VERSION}-" + driversVersion + gpuModelSuffix // tag cannot be more than 128 chars if len(tag) > 128 { tag = tag[len(tag)-128:] diff --git a/internal/metricsexporter/metricsexporter.go b/internal/metricsexporter/metricsexporter.go index 4960076d..68892526 100644 --- a/internal/metricsexporter/metricsexporter.go +++ b/internal/metricsexporter/metricsexporter.go @@ -37,8 +37,6 @@ import ( "fmt" "os" - amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" - utils "github.com/ROCm/gpu-operator/internal" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/rh-ecosystem-edge/kernel-module-management/pkg/labels" appsv1 "k8s.io/api/apps/v1" @@ -48,6 +46,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" ) const ( @@ -374,6 +375,13 @@ func (nl *metricsExporter) SetMetricsExporterAsDesired(ds *appsv1.DaemonSet, dev if devConfig.Spec.CommonConfig.InitContainerImage != "" { initContainerImage = devConfig.Spec.CommonConfig.InitContainerImage } + + initContainerCommand := "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/class/kfd ] || [ ! -d /host-sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done" + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + initContainerCommand = "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done" + } + ds.Spec = appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: matchLabels}, Template: v1.PodTemplateSpec{ @@ -386,7 +394,7 @@ func (nl *metricsExporter) SetMetricsExporterAsDesired(ds *appsv1.DaemonSet, dev { Name: "driver-init", Image: initContainerImage, - Command: []string{"sh", "-c", "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/class/kfd ] || [ ! -d /host-sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done"}, + Command: []string{"sh", "-c", initContainerCommand}, SecurityContext: &v1.SecurityContext{Privileged: ptr.To(true)}, VolumeMounts: []v1.VolumeMount{ { diff --git a/internal/nodelabeller/nodelabeller.go b/internal/nodelabeller/nodelabeller.go index fd9f22e2..0af1e155 100644 --- a/internal/nodelabeller/nodelabeller.go +++ b/internal/nodelabeller/nodelabeller.go @@ -36,7 +36,6 @@ import ( "fmt" "strings" - amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,6 +43,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" ) const ( @@ -137,16 +139,7 @@ func (nl *nodeLabeller) SetNodeLabellerAsDesired(ds *appsv1.DaemonSet, devConfig blackListFileName = openShiftBlacklistFileName } - var initContainerCommand []string - if devConfig.Spec.Driver.Blacklist != nil && *devConfig.Spec.Driver.Blacklist { - // if users want to apply the blacklist, init container will add the amdgpu to the blacklist - initContainerCommand = []string{"sh", "-c", fmt.Sprintf("echo \"# added by gpu operator \nblacklist amdgpu\" > /host-etc/modprobe.d/%v; while [ ! -d /host-sys/class/kfd ] || [ ! -d /host-sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done", blackListFileName)} - } else { - // if users disabled the KMM driver, or disabled the blacklist - // init container will remove any hanging amdgpu blacklist entry from the list - initContainerCommand = []string{"sh", "-c", fmt.Sprintf("rm -f /host-etc/modprobe.d/%v; while [ ! -d /host-sys/class/kfd ] || [ ! -d /host-sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done", blackListFileName)} - } - + initContainerCommand := getNodeLabellerInitContainerCommand(devConfig, blackListFileName) initContainerImage := defaultInitContainerImage if devConfig.Spec.CommonConfig.InitContainerImage != "" { initContainerImage = devConfig.Spec.CommonConfig.InitContainerImage @@ -265,3 +258,24 @@ func getDevicePluginVersion(devConfig *amdv1alpha1.DeviceConfig) string { } return "" } + +func getNodeLabellerInitContainerCommand(devConfig *amdv1alpha1.DeviceConfig, blackListFileName string) []string { + if devConfig.Spec.Driver.Blacklist != nil && *devConfig.Spec.Driver.Blacklist { + // if users want to apply the blacklist, init container will add the amdgpu to the blacklist + initContainerCommand := []string{"sh", "-c", fmt.Sprintf("echo \"# added by gpu operator \nblacklist amdgpu\" > /host-etc/modprobe.d/%v; while [ ! -d /host-sys/class/kfd ] || [ ! -d /host-sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done", blackListFileName)} + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + initContainerCommand = []string{"sh", "-c", fmt.Sprintf("echo \"# added by gpu operator \nblacklist amdgpu\" > /host-etc/modprobe.d/%v; while [ ! -d /host-sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done", blackListFileName)} + } + return initContainerCommand + } else { + // if users disabled the KMM driver, or disabled the blacklist + // init container will remove any hanging amdgpu blacklist entry from the list + initContainerCommand := []string{"sh", "-c", fmt.Sprintf("rm -f /host-etc/modprobe.d/%v; while [ ! -d /host-sys/class/kfd ] || [ ! -d /host-sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done", blackListFileName)} + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + initContainerCommand = []string{"sh", "-c", fmt.Sprintf("rm -f /host-etc/modprobe.d/%v; while [ ! -d /host-sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done", blackListFileName)} + } + return initContainerCommand + } +} diff --git a/internal/testrunner/testrunner.go b/internal/testrunner/testrunner.go index f154e569..7de962de 100644 --- a/internal/testrunner/testrunner.go +++ b/internal/testrunner/testrunner.go @@ -37,7 +37,6 @@ import ( "os" "path/filepath" - amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" "github.com/rh-ecosystem-edge/kernel-module-management/pkg/labels" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -46,6 +45,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" ) const ( @@ -270,6 +272,13 @@ func (nl *testRunner) SetTestRunnerAsDesired(ds *appsv1.DaemonSet, devConfig *am if devConfig.Spec.CommonConfig.InitContainerImage != "" { initContainerImage = devConfig.Spec.CommonConfig.InitContainerImage } + + initContainerCommand := "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/class/kfd ] || [ ! -d /host-sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done" + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + initContainerCommand = "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done" + } + ds.Spec = appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: matchLabels}, Template: v1.PodTemplateSpec{ @@ -282,7 +291,7 @@ func (nl *testRunner) SetTestRunnerAsDesired(ds *appsv1.DaemonSet, devConfig *am { Name: "driver-init", Image: initContainerImage, - Command: []string{"sh", "-c", "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/class/kfd ] || [ ! -d /host-sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done"}, + Command: []string{"sh", "-c", initContainerCommand}, SecurityContext: &v1.SecurityContext{Privileged: ptr.To(true)}, VolumeMounts: []v1.VolumeMount{ { diff --git a/internal/utils.go b/internal/utils.go index 36f7efe5..fbf67875 100644 --- a/internal/utils.go +++ b/internal/utils.go @@ -45,6 +45,11 @@ const ( computePartitioningSupportedLabel = "amd.com/compute-partitioning-supported" memoryPartitioningSupportedLabel = "amd.com/memory-partitioning-supported" partitionTypeLabel = "amd.com/compute-memory-partition" + // kubevirt + DriverTypeContainer = "container" + DriverTypeVFPassthrough = "vf-passthrough" + VGPUHostModelMI210 = "mi210" + VGPUHostModelMI300X = "mi300x" ) var ( @@ -209,3 +214,17 @@ func IsPrometheusServiceMonitorEnable(devConfig *amdv1alpha1.DeviceConfig) bool } return false } + +func GetGPUModelSuffix(devCfg *amdv1alpha1.DeviceConfig) string { + gpuModelSuffix := "" + switch devCfg.Spec.Driver.DriverType { + case DriverTypeVFPassthrough: + switch devCfg.Spec.Driver.VFPassthrough.GPUModel { + case VGPUHostModelMI210: + gpuModelSuffix = "-" + VGPUHostModelMI210 + case VGPUHostModelMI300X: + gpuModelSuffix = "-" + VGPUHostModelMI300X + } + } + return gpuModelSuffix +} From 0a3341b7de1ba83480645ef459d4b4d1953c706b Mon Sep 17 00:00:00 2001 From: Yan Sun Date: Fri, 2 May 2025 13:59:15 -0700 Subject: [PATCH 05/21] Use opensource GIM driver for automation (#631) --- api/v1alpha1/deviceconfig_types.go | 12 ---------- api/v1alpha1/zz_generated.deepcopy.go | 16 ------------- ...md-gpu-operator.clusterserviceversion.yaml | 10 -------- bundle/manifests/amd.com_deviceconfigs.yaml | 11 --------- config/crd/bases/amd.com_deviceconfigs.yaml | 11 --------- ...md-gpu-operator.clusterserviceversion.yaml | 10 -------- helm-charts-k8s/crds/deviceconfig-crd.yaml | 11 --------- .../crds/deviceconfig-crd.yaml | 11 --------- .../kmmmodule/dockerfiles/vGPUHostGIM.ubuntu | 20 +++++++--------- internal/kmmmodule/kmmmodule.go | 23 +++++++++---------- internal/utils.go | 16 ------------- 11 files changed, 19 insertions(+), 132 deletions(-) diff --git a/api/v1alpha1/deviceconfig_types.go b/api/v1alpha1/deviceconfig_types.go index bbfd352a..9dfa7277 100644 --- a/api/v1alpha1/deviceconfig_types.go +++ b/api/v1alpha1/deviceconfig_types.go @@ -101,10 +101,6 @@ type DriverSpec struct { // +kubebuilder:default=container DriverType string `json:"driverType,omitempty"` - // vf-passthrough host driver specific configs. Only applies when the driverType is vf-passthrough - //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="VFPassthrough",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:vfPassthrough"} - VFPassthrough VFPassthroughSpec `json:"vfPassthrough,omitempty"` - // blacklist amdgpu drivers on the host. Node reboot is required to apply the baclklist on the worker nodes. // Not working for OpenShift cluster. OpenShift users please use the Machine Config Operator (MCO) resource to configure amdgpu blacklist. // Example MCO resource is available at https://instinct.docs.amd.com/projects/gpu-operator/en/latest/installation/openshift-olm.html#create-blacklist-for-installing-out-of-tree-kernel-module @@ -719,14 +715,6 @@ type CommonConfigSpec struct { UtilsContainer UtilsContainerSpec `json:"utilsContainer,omitempty"` } -// VFPassthroughSpec vf-passthrough host driver specific configs -type VFPassthroughSpec struct { - //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="GPUModel",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:gpuModel"} - // +kubebuilder:validation:Enum=mi210;mi300x - // +kubebuilder:default=mi300x - GPUModel string `json:"gpuModel,omitempty"` -} - // DeploymentStatus contains the status for a daemonset deployed during // reconciliation loop type DeploymentStatus struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 2158b917..e55c07f4 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -342,7 +342,6 @@ func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { *out = new(bool) **out = **in } - out.VFPassthrough = in.VFPassthrough if in.Blacklist != nil { in, out := &in.Blacklist, &out.Blacklist *out = new(bool) @@ -819,18 +818,3 @@ func (in *UtilsContainerSpec) DeepCopy() *UtilsContainerSpec { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VFPassthroughSpec) DeepCopyInto(out *VFPassthroughSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VFPassthroughSpec. -func (in *VFPassthroughSpec) DeepCopy() *VFPassthroughSpec { - if in == nil { - return nil - } - out := new(VFPassthroughSpec) - in.DeepCopyInto(out) - return out -} diff --git a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml index 5a439882..50fcdd55 100644 --- a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml +++ b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml @@ -405,16 +405,6 @@ spec: path: driver.version x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:version - - description: vf-passthrough host driver specific configs. Only applies when - the driverType is vf-passthrough - displayName: VFPassthrough - path: driver.vfPassthrough - x-descriptors: - - urn:alm:descriptor:com.amd.deviceconfigs:vfPassthrough - - displayName: GPUModel - path: driver.vfPassthrough.gpuModel - x-descriptors: - - urn:alm:descriptor:com.amd.deviceconfigs:gpuModel - description: metrics exporter displayName: MetricsExporter path: metricsExporter diff --git a/bundle/manifests/amd.com_deviceconfigs.yaml b/bundle/manifests/amd.com_deviceconfigs.yaml index acd45a0d..bb9ca0b0 100644 --- a/bundle/manifests/amd.com_deviceconfigs.yaml +++ b/bundle/manifests/amd.com_deviceconfigs.yaml @@ -599,17 +599,6 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string - vfPassthrough: - description: vf-passthrough host driver specific configs. Only - applies when the driverType is vf-passthrough - properties: - gpuModel: - default: mi300x - enum: - - mi210 - - mi300x - type: string - type: object type: object metricsExporter: description: metrics exporter diff --git a/config/crd/bases/amd.com_deviceconfigs.yaml b/config/crd/bases/amd.com_deviceconfigs.yaml index fd37f0c6..8e1b5da4 100644 --- a/config/crd/bases/amd.com_deviceconfigs.yaml +++ b/config/crd/bases/amd.com_deviceconfigs.yaml @@ -595,17 +595,6 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string - vfPassthrough: - description: vf-passthrough host driver specific configs. Only - applies when the driverType is vf-passthrough - properties: - gpuModel: - default: mi300x - enum: - - mi210 - - mi300x - type: string - type: object type: object metricsExporter: description: metrics exporter diff --git a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml index 522ef979..4da92d70 100644 --- a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml @@ -376,16 +376,6 @@ spec: path: driver.version x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:version - - description: vf-passthrough host driver specific configs. Only applies when - the driverType is vf-passthrough - displayName: VFPassthrough - path: driver.vfPassthrough - x-descriptors: - - urn:alm:descriptor:com.amd.deviceconfigs:vfPassthrough - - displayName: GPUModel - path: driver.vfPassthrough.gpuModel - x-descriptors: - - urn:alm:descriptor:com.amd.deviceconfigs:gpuModel - description: metrics exporter displayName: MetricsExporter path: metricsExporter diff --git a/helm-charts-k8s/crds/deviceconfig-crd.yaml b/helm-charts-k8s/crds/deviceconfig-crd.yaml index bd45a68f..6d53b1ba 100644 --- a/helm-charts-k8s/crds/deviceconfig-crd.yaml +++ b/helm-charts-k8s/crds/deviceconfig-crd.yaml @@ -602,17 +602,6 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string - vfPassthrough: - description: vf-passthrough host driver specific configs. Only applies - when the driverType is vf-passthrough - properties: - gpuModel: - default: mi300x - enum: - - mi210 - - mi300x - type: string - type: object type: object metricsExporter: description: metrics exporter diff --git a/helm-charts-openshift/crds/deviceconfig-crd.yaml b/helm-charts-openshift/crds/deviceconfig-crd.yaml index bd45a68f..6d53b1ba 100644 --- a/helm-charts-openshift/crds/deviceconfig-crd.yaml +++ b/helm-charts-openshift/crds/deviceconfig-crd.yaml @@ -602,17 +602,6 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string - vfPassthrough: - description: vf-passthrough host driver specific configs. Only applies - when the driverType is vf-passthrough - properties: - gpuModel: - default: mi300x - enum: - - mi210 - - mi300x - type: string - type: object type: object metricsExporter: description: metrics exporter diff --git a/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu b/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu index e933ca02..016647b2 100644 --- a/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu +++ b/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu @@ -1,4 +1,3 @@ -FROM registry.test.pensando.io:5000/host_gim_driver_source:latest AS source FROM ubuntu:$$VERSION AS builder ARG KERNEL_FULL_VERSION @@ -7,20 +6,17 @@ ARG DRIVERS_VERSION ARG REPO_URL -RUN apt-get update && apt-get install -y automake \ - make \ - autoconf \ +RUN apt-get update && \ + apt-get install -y build-essential \ dkms \ - bc \ - gcc-12 \ - dpkg-dev \ - initramfs-tools \ + autoconf \ + automake \ linux-headers-${KERNEL_FULL_VERSION} && \ - update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 60 - -COPY --from=source /gim_drivers/* /gim_drivers/ + apt-get clean -RUN dpkg -i /gim_drivers/$$GPU_MODEL* +RUN wget https://github.com/amd/MxGPU-Virtualization/releases/download/mainline%2F${DRIVERS_VERSION}/gim-dkms_${DRIVERS_VERSION}_all.deb && \ + dpkg -i gim-dkms_${DRIVERS_VERSION}_all.deb && \ + rm gim-dkms_${DRIVERS_VERSION}_all.deb RUN depmod ${KERNEL_FULL_VERSION} diff --git a/internal/kmmmodule/kmmmodule.go b/internal/kmmmodule/kmmmodule.go index 502ce6ab..9203e785 100644 --- a/internal/kmmmodule/kmmmodule.go +++ b/internal/kmmmodule/kmmmodule.go @@ -191,7 +191,6 @@ func resolveDockerfile(cmName string, devConfig *amdv1alpha1.DeviceConfig) (stri switch devConfig.Spec.Driver.DriverType { case utils.DriverTypeVFPassthrough: dockerfileTemplate = dockerfileTemplateUbuntuVGPUHost - dockerfileTemplate = strings.Replace(dockerfileTemplate, "$$GPU_MODEL", devConfig.Spec.Driver.VFPassthrough.GPUModel, -1) } driverLabel, present := driverLabels[version] if !present { @@ -459,7 +458,7 @@ func setKMMModuleLoader(ctx context.Context, mod *kmmv1beta1.Module, devConfig * ModuleName: moduleName, FirmwarePath: firmwarePath, Args: &kmmv1beta1.ModprobeArgs{}, - Parameters: getModprobeParametersFromNodeInfo(nodes), + Parameters: getModprobeParametersFromNodeInfo(nodes, devConfig), ModulesLoadingOrder: modLoadingOrder, }, Version: devConfig.Spec.Driver.Version, @@ -616,11 +615,8 @@ func getKM(devConfig *amdv1alpha1.DeviceConfig, node v1.Node, inTreeModuleToRemo } func addNodeInfoSuffixToImageTag(imgStr, osName, driversVersion string, devCfg *amdv1alpha1.DeviceConfig) string { - // if driver is vGPU host, different GPU model's driver image would be different - // need to add a suffix to distinguish them - gpuModelSuffix := utils.GetGPUModelSuffix(devCfg) // KMM will render and fulfill the value of ${KERNEL_FULL_VERSION} - tag := osName + "-${KERNEL_FULL_VERSION}-" + driversVersion + gpuModelSuffix + tag := osName + "-${KERNEL_FULL_VERSION}-" + driversVersion // tag cannot be more than 128 chars if len(tag) > 128 { tag = tag[len(tag)-128:] @@ -763,12 +759,15 @@ func getNodeSelector(devConfig *amdv1alpha1.DeviceConfig) map[string]string { return ns } -func getModprobeParametersFromNodeInfo(nodes *v1.NodeList) []string { - // if selected nodes have VF device, we need to pass specific argument to modprobe command - // in order to make sure the amdgpu was loaded successfully into guest VM - for _, node := range nodes.Items { - if utils.HasNodeLabelKey(node, utils.NodeFeatureLabelAmdVGpu) { - return []string{"ip_block_mask=0x7f"} +func getModprobeParametersFromNodeInfo(nodes *v1.NodeList, devConfig *amdv1alpha1.DeviceConfig) []string { + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeContainer: + // if selected nodes have VF device and the driver type is container, we need to pass specific argument to modprobe command + // in order to make sure the amdgpu was loaded successfully into guest VM + for _, node := range nodes.Items { + if utils.HasNodeLabelKey(node, utils.NodeFeatureLabelAmdVGpu) { + return []string{"ip_block_mask=0x7f"} + } } } return nil diff --git a/internal/utils.go b/internal/utils.go index fbf67875..656929bc 100644 --- a/internal/utils.go +++ b/internal/utils.go @@ -48,8 +48,6 @@ const ( // kubevirt DriverTypeContainer = "container" DriverTypeVFPassthrough = "vf-passthrough" - VGPUHostModelMI210 = "mi210" - VGPUHostModelMI300X = "mi300x" ) var ( @@ -214,17 +212,3 @@ func IsPrometheusServiceMonitorEnable(devConfig *amdv1alpha1.DeviceConfig) bool } return false } - -func GetGPUModelSuffix(devCfg *amdv1alpha1.DeviceConfig) string { - gpuModelSuffix := "" - switch devCfg.Spec.Driver.DriverType { - case DriverTypeVFPassthrough: - switch devCfg.Spec.Driver.VFPassthrough.GPUModel { - case VGPUHostModelMI210: - gpuModelSuffix = "-" + VGPUHostModelMI210 - case VGPUHostModelMI300X: - gpuModelSuffix = "-" + VGPUHostModelMI300X - } - } - return gpuModelSuffix -} From c6ef4a3975a2b03b79e0cd13e3556e6c6aea0521 Mon Sep 17 00:00:00 2001 From: Yan Sun Date: Tue, 6 May 2025 17:00:04 -0700 Subject: [PATCH 06/21] Add workerMgr module to enable vfio post process after installing GIM driver (#583) --- Makefile | 2 +- api/v1alpha1/deviceconfig_types.go | 2 +- ...md-gpu-operator.clusterserviceversion.yaml | 3 + cmd/main.go | 7 +- ...md-gpu-operator.clusterserviceversion.yaml | 3 + .../controllers/device_config_reconciler.go | 305 +++++++++--------- .../device_config_reconciler_test.go | 14 +- .../mock_device_config_reconciler.go | 14 - .../controllers/mock_pod_event_handler.go | 107 ------ internal/controllers/node_event_handler.go | 66 ---- internal/controllers/watchers/daemonset.go | 230 +++++++++++++ .../controllers/watchers/device_config.go | 57 ++++ .../controllers/watchers/mock_daemonset.go | 106 ++++++ internal/controllers/watchers/mock_node.go | 106 ++++++ internal/controllers/watchers/mock_pod.go | 106 ++++++ internal/controllers/watchers/node.go | 252 +++++++++++++++ .../{pod_event_handler.go => watchers/pod.go} | 83 ++++- .../controllers/workermgr/mock_workermgr.go | 140 ++++++++ .../workermgr/scripts/vfio_bind.sh | 42 +++ .../workermgr/scripts/vfio_unbind.sh | 33 ++ internal/controllers/workermgr/workermgr.go | 284 ++++++++++++++++ .../kmmmodule/dockerfiles/vGPUHostGIM.ubuntu | 1 + internal/kmmmodule/kmmmodule.go | 38 ++- internal/utils.go | 49 +++ internal/utils_container/Dockerfile | 2 +- internal/utils_test.go | 51 +++ 26 files changed, 1719 insertions(+), 384 deletions(-) delete mode 100644 internal/controllers/mock_pod_event_handler.go delete mode 100644 internal/controllers/node_event_handler.go create mode 100644 internal/controllers/watchers/daemonset.go create mode 100644 internal/controllers/watchers/device_config.go create mode 100644 internal/controllers/watchers/mock_daemonset.go create mode 100644 internal/controllers/watchers/mock_node.go create mode 100644 internal/controllers/watchers/mock_pod.go create mode 100644 internal/controllers/watchers/node.go rename internal/controllers/{pod_event_handler.go => watchers/pod.go} (52%) create mode 100644 internal/controllers/workermgr/mock_workermgr.go create mode 100644 internal/controllers/workermgr/scripts/vfio_bind.sh create mode 100644 internal/controllers/workermgr/scripts/vfio_unbind.sh create mode 100644 internal/controllers/workermgr/workermgr.go diff --git a/Makefile b/Makefile index f817337c..617a33c6 100644 --- a/Makefile +++ b/Makefile @@ -229,7 +229,7 @@ fmt: ## Run go fmt against code. vet: ## Run go vet against code. go vet ./... -UNIT_TEST ?= ./internal/controllers ./internal/kmmmodule ./internal +UNIT_TEST ?= ./internal ./internal/controllers ./internal/kmmmodule .PHONY: unit-test unit-test: vet ## Run the unit tests. diff --git a/api/v1alpha1/deviceconfig_types.go b/api/v1alpha1/deviceconfig_types.go index 9dfa7277..d0a1858d 100644 --- a/api/v1alpha1/deviceconfig_types.go +++ b/api/v1alpha1/deviceconfig_types.go @@ -763,7 +763,7 @@ type DeviceConfigStatus struct { //+kubebuilder:subresource:status // DeviceConfig describes how to enable AMD GPU device -// +operator-sdk:csv:customresourcedefinitions:displayName="DeviceConfig",resources={{Module,v1beta1,modules.kmm.sigs.x-k8s.io},{Daemonset,v1,apps}, {services,v1,core}} +// +operator-sdk:csv:customresourcedefinitions:displayName="DeviceConfig",resources={{Module,v1beta1,modules.kmm.sigs.x-k8s.io},{Daemonset,v1,apps},{services,v1,core},{Pod,v1,core}} type DeviceConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml index 50fcdd55..ddbca3d7 100644 --- a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml +++ b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml @@ -67,6 +67,9 @@ spec: - kind: Daemonset name: apps version: v1 + - kind: Pod + name: core + version: v1 - kind: services name: core version: v1 diff --git a/cmd/main.go b/cmd/main.go index 10b3756f..abc38477 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -53,6 +53,7 @@ import ( "github.com/ROCm/gpu-operator/internal/config" "github.com/ROCm/gpu-operator/internal/configmanager" "github.com/ROCm/gpu-operator/internal/controllers" + "github.com/ROCm/gpu-operator/internal/controllers/workermgr" "github.com/ROCm/gpu-operator/internal/kmmmodule" "github.com/ROCm/gpu-operator/internal/metricsexporter" "github.com/ROCm/gpu-operator/internal/nodelabeller" @@ -111,6 +112,7 @@ func main() { cmd.FatalError(setupLogger, err, "unable to create manager") } + // Use manager's client, it may read from a cache. client := mgr.GetClient() isOpenShift := utils.IsOpenShift(setupLogger) kmmHandler := kmmmodule.NewKMMModule(client, scheme, isOpenShift) @@ -118,6 +120,7 @@ func main() { metricsHandler := metricsexporter.NewMetricsExporter(scheme) testrunnerHandler := testrunner.NewTestRunner(scheme) configmanagerHandler := configmanager.NewConfigManager(scheme) + workerMgr := workermgr.NewWorkerMgr(client, scheme) dcr := controllers.NewDeviceConfigReconciler( mgr.GetConfig(), client, @@ -125,7 +128,9 @@ func main() { nlHandler, metricsHandler, testrunnerHandler, - configmanagerHandler) + configmanagerHandler, + workerMgr, + isOpenShift) if err = dcr.SetupWithManager(mgr); err != nil { cmd.FatalError(setupLogger, err, "unable to create controller", "name", controllers.DeviceConfigReconcilerName) } diff --git a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml index 4da92d70..ef135117 100644 --- a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml @@ -38,6 +38,9 @@ spec: - kind: Daemonset name: apps version: v1 + - kind: Pod + name: core + version: v1 - kind: services name: core version: v1 diff --git a/internal/controllers/device_config_reconciler.go b/internal/controllers/device_config_reconciler.go index 78f0afcb..6b4928dc 100644 --- a/internal/controllers/device_config_reconciler.go +++ b/internal/controllers/device_config_reconciler.go @@ -40,37 +40,38 @@ import ( "strings" "sync" - "github.com/ROCm/gpu-operator/internal/configmanager" - "github.com/ROCm/gpu-operator/internal/metricsexporter" - "github.com/ROCm/gpu-operator/internal/testrunner" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "k8s.io/client-go/rest" - "k8s.io/client-go/util/retry" - - "github.com/rh-ecosystem-edge/kernel-module-management/pkg/labels" - - amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" - utils "github.com/ROCm/gpu-operator/internal" - "github.com/ROCm/gpu-operator/internal/conditions" - "github.com/ROCm/gpu-operator/internal/kmmmodule" - "github.com/ROCm/gpu-operator/internal/nodelabeller" - "github.com/ROCm/gpu-operator/internal/validator" kmmv1beta1 "github.com/rh-ecosystem-edge/kernel-module-management/api/v1beta1" + kmmLabels "github.com/rh-ecosystem-edge/kernel-module-management/pkg/labels" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - event "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" + "github.com/ROCm/gpu-operator/internal/conditions" + "github.com/ROCm/gpu-operator/internal/configmanager" + "github.com/ROCm/gpu-operator/internal/controllers/watchers" + "github.com/ROCm/gpu-operator/internal/controllers/workermgr" + "github.com/ROCm/gpu-operator/internal/kmmmodule" + "github.com/ROCm/gpu-operator/internal/metricsexporter" + "github.com/ROCm/gpu-operator/internal/nodelabeller" + "github.com/ROCm/gpu-operator/internal/testrunner" + "github.com/ROCm/gpu-operator/internal/validator" ) const ( @@ -81,10 +82,13 @@ const ( // ModuleReconciler reconciles a Module object type DeviceConfigReconciler struct { - once sync.Once - initErr error - helper deviceConfigReconcilerHelperAPI - podEventHandler podEventHandlerAPI + client.Client + once sync.Once + initErr error + helper deviceConfigReconcilerHelperAPI + podEventHandler watchers.PodEventHandlerAPI + nodeEventHandler watchers.NodeEventHandlerAPI + daemonsetEventHandler watchers.DaemonsetEventHandlerAPI } func NewDeviceConfigReconciler( @@ -94,56 +98,53 @@ func NewDeviceConfigReconciler( nlHandler nodelabeller.NodeLabeller, metricsHandler metricsexporter.MetricsExporter, testrunnerHandler testrunner.TestRunner, - configmanagerHandler configmanager.ConfigManager) *DeviceConfigReconciler { + configmanagerHandler configmanager.ConfigManager, + workerMgr workermgr.WorkerMgrAPI, + isOpenShift bool) *DeviceConfigReconciler { upgradeMgrHandler := newUpgradeMgrHandler(client, k8sConfig) - helper := newDeviceConfigReconcilerHelper(client, kmmHandler, nlHandler, upgradeMgrHandler, metricsHandler, testrunnerHandler, configmanagerHandler) - podEventHandler := newPodEventHandler(client) + helper := newDeviceConfigReconcilerHelper(client, kmmHandler, nlHandler, upgradeMgrHandler, metricsHandler, testrunnerHandler, configmanagerHandler, workerMgr) + podEventHandler := watchers.NewPodEventHandler(client, workerMgr) + nodeEventHandler := watchers.NewNodeEventHandler(client, workerMgr) + daemonsetEventHandler := watchers.NewDaemonsetEventHandler(client) return &DeviceConfigReconciler{ - helper: helper, - podEventHandler: podEventHandler, + Client: client, + helper: helper, + podEventHandler: podEventHandler, + nodeEventHandler: nodeEventHandler, + daemonsetEventHandler: daemonsetEventHandler, } } // SetupWithManager sets up the controller with the Manager. -// 1. Owns() will tell the manager that if any Module or Daemonset object or their status got updated -// the DeviceConfig object in their ref field need to be reconciled -// 2. findDeviceConfigsForNMC: when a NMC changed, only trigger reconcile for related DeviceConfig func (r *DeviceConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&amdv1alpha1.DeviceConfig{}). - Owns(&kmmv1beta1.Module{}). - Owns(&appsv1.DaemonSet{}). - Owns(&v1.Service{}). Named(DeviceConfigReconcilerName). - Watches( // watch NMC for updating the DeviceConfigs CR status - &kmmv1beta1.NodeModulesConfig{}, - handler.EnqueueRequestsFromMapFunc(r.helper.findDeviceConfigsForNMC), - builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + // just reconcile the spec change or deletion + For(&amdv1alpha1.DeviceConfig{}, builder.WithPredicates(watchers.SpecChangedOrDeletionPredicate{})). + Owns(&v1.Service{}, builder.WithPredicates(watchers.SpecChangedOrDeletionPredicate{})). + Owns(&kmmv1beta1.Module{}, builder.WithPredicates(watchers.SpecChangedOrDeletionPredicate{})). + Watches( // watch for owned daemonset, only update status + &appsv1.DaemonSet{}, + r.daemonsetEventHandler, + builder.WithPredicates(watchers.DaemonsetPredicate{}), ). - Watches(&v1.Secret{}, // watch for KMM build/sign/install related secrets + Watches( // watch for KMM build/sign/install related secrets event, reconcile corresponding DeviceConfig + &v1.Secret{}, handler.EnqueueRequestsFromMapFunc(r.helper.findDeviceConfigsForSecret), - builder.WithPredicates( - predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return true - }, - UpdateFunc: func(e event.UpdateEvent) bool { - return true - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return true - }, - }, - ), ). Watches(&v1.Node{}, // watch for Node resource to get latest kernel mapping for KMM CR - handler.EnqueueRequestsFromMapFunc(r.helper.findDeviceConfigsWithKMM), - builder.WithPredicates(NodeKernelVersionPredicate{}), + r.nodeEventHandler, + builder.WithPredicates(watchers.NodePredicate{}), + ). + Watches( // watch NMC for upgrademgr + &kmmv1beta1.NodeModulesConfig{}, + handler.EnqueueRequestsFromMapFunc(r.helper.findDeviceConfigsForNMC), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), ). - Watches( // watch for KMM builder pod event to auto-clean unknown status builder pod + Watches( // watch pod event to auto-clean unknown status builder pod and cleanup post-process worker pod &v1.Pod{}, r.podEventHandler, - builder.WithPredicates(PodLabelPredicate{}), // only watch for event from kmm builder pod + builder.WithPredicates(watchers.PodLabelPredicate{}), ).Complete(r) } @@ -208,7 +209,7 @@ func (r *DeviceConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request return res, fmt.Errorf("failed to get the requested %s CR: %v", req.NamespacedName, err) } - nodes, err := kmmmodule.GetK8SNodes(kmmmodule.MapToLabelSelector(devConfig.Spec.Selector)) + nodes, err := kmmmodule.GetK8SNodes(ctx, r.Client, labels.SelectorFromSet(labels.Set(devConfig.Spec.Selector))) if err != nil { return res, fmt.Errorf("failed to list Node for DeviceConfig %s: %v", req.NamespacedName, err) } @@ -332,9 +333,8 @@ type deviceConfigReconcilerHelperAPI interface { buildDeviceConfigStatus(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error updateDeviceConfigStatus(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) error finalizeDeviceConfig(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error - findDeviceConfigsForNMC(ctx context.Context, nmc client.Object) []reconcile.Request findDeviceConfigsForSecret(ctx context.Context, secret client.Object) []reconcile.Request - findDeviceConfigsWithKMM(ctx context.Context, node client.Object) []reconcile.Request + findDeviceConfigsForNMC(ctx context.Context, nmc client.Object) []reconcile.Request setFinalizer(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) error handleKMMModule(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error handleDevicePlugin(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error @@ -360,6 +360,7 @@ type deviceConfigReconcilerHelper struct { nodeAssignments map[string]string conditionUpdater conditions.ConditionUpdater validator validator.ValidatorAPI + kmmPostProcessor workermgr.WorkerMgrAPI upgradeMgrHandler upgradeMgrAPI namespace string } @@ -370,7 +371,8 @@ func newDeviceConfigReconcilerHelper(client client.Client, upgradeMgrHandler upgradeMgrAPI, metricsHandler metricsexporter.MetricsExporter, testrunnerHandler testrunner.TestRunner, - configmanagerHandler configmanager.ConfigManager) deviceConfigReconcilerHelperAPI { + configmanagerHandler configmanager.ConfigManager, + workerMgr workermgr.WorkerMgrAPI) deviceConfigReconcilerHelperAPI { conditionUpdater := conditions.NewDeviceConfigConditionMgr() validator := validator.NewValidator() return &deviceConfigReconcilerHelper{ @@ -383,6 +385,7 @@ func newDeviceConfigReconcilerHelper(client client.Client, nodeAssignments: make(map[string]string), conditionUpdater: conditionUpdater, validator: validator, + kmmPostProcessor: workerMgr, upgradeMgrHandler: upgradeMgrHandler, namespace: os.Getenv("OPERATOR_NAMESPACE"), } @@ -407,28 +410,6 @@ func (dcrh *deviceConfigReconcilerHelper) getRequestedDeviceConfig(ctx context.C return &devConfig, nil } -// findDeviceConfigsForNMC when a NMC changed, only trigger reconcile for related DeviceConfig -func (drch *deviceConfigReconcilerHelper) findDeviceConfigsForNMC(ctx context.Context, nmc client.Object) []reconcile.Request { - reqs := []reconcile.Request{} - logger := log.FromContext(ctx) - nmcObj, ok := nmc.(*kmmv1beta1.NodeModulesConfig) - if !ok { - logger.Error(fmt.Errorf("failed to convert object %+v to NodeModulesConfig", nmc), "") - return reqs - } - if len(nmcObj.Status.Modules) > 0 { - for _, module := range nmcObj.Status.Modules { - reqs = append(reqs, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: module.Namespace, - Name: module.Name, - }, - }) - } - } - return reqs -} - // findDeviceConfigsForSecret when a secret changed, only trigger reconcile for related DeviceConfig func (drch *deviceConfigReconcilerHelper) findDeviceConfigsForSecret(ctx context.Context, secret client.Object) []reconcile.Request { reqs := []reconcile.Request{} @@ -478,31 +459,6 @@ func (dcrh *deviceConfigReconcilerHelper) hasSecretReference(secretName string, return false } -// findDeviceConfigsWithKMM only reconcile deviceconfigs with KMM enabled to manage out-of-tree kernel module -func (drch *deviceConfigReconcilerHelper) findDeviceConfigsWithKMM(ctx context.Context, node client.Object) []reconcile.Request { - reqs := []reconcile.Request{} - logger := log.FromContext(ctx) - deviceConfigList, err := drch.listDeviceConfigs(ctx) - if err != nil || deviceConfigList == nil { - logger.Error(err, "failed to list deviceconfigs") - return reqs - } - for _, dcfg := range deviceConfigList.Items { - if dcfg.Namespace == drch.namespace && - dcfg.Spec.Driver.Enable != nil && - *dcfg.Spec.Driver.Enable { - reqs = append(reqs, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: dcfg.Namespace, - Name: dcfg.Name, - }, - }) - } - } - - return reqs -} - func (dcrh *deviceConfigReconcilerHelper) buildDeviceConfigStatus(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error { // fetch DeviceConfig-owned custom resource // then retrieve its status and put it to DeviceConfig's status fields @@ -514,49 +470,15 @@ func (dcrh *deviceConfigReconcilerHelper) buildDeviceConfigStatus(ctx context.Co } if kmmModuleObj != nil { devConfig.Status.Drivers = amdv1alpha1.DeploymentStatus{ - NodesMatchingSelectorNumber: kmmModuleObj.Status.ModuleLoader.DesiredNumber, + NodesMatchingSelectorNumber: kmmModuleObj.Status.ModuleLoader.NodesMatchingSelectorNumber, DesiredNumber: kmmModuleObj.Status.ModuleLoader.DesiredNumber, AvailableNumber: kmmModuleObj.Status.ModuleLoader.AvailableNumber, } } } - devPlDs := appsv1.DaemonSet{} - dsName := types.NamespacedName{ - Namespace: devConfig.Namespace, - Name: devConfig.Name + "-device-plugin", - } - - if err := dcrh.client.Get(ctx, dsName, &devPlDs); err == nil { - devConfig.Status.DevicePlugin = amdv1alpha1.DeploymentStatus{ - NodesMatchingSelectorNumber: devPlDs.Status.NumberAvailable, - DesiredNumber: devPlDs.Status.DesiredNumberScheduled, - AvailableNumber: devPlDs.Status.NumberAvailable, - } - } else { - return fmt.Errorf("failed to fetch device-plugin %+v: %+v", dsName, err) - } - - if devConfig.Spec.MetricsExporter.Enable != nil && *devConfig.Spec.MetricsExporter.Enable { - metricsDS := appsv1.DaemonSet{} - dsName := types.NamespacedName{ - Namespace: devConfig.Namespace, - Name: devConfig.Name + "-" + metricsexporter.ExporterName, - } - - if err := dcrh.client.Get(ctx, dsName, &metricsDS); err == nil { - devConfig.Status.MetricsExporter = amdv1alpha1.DeploymentStatus{ - NodesMatchingSelectorNumber: metricsDS.Status.NumberAvailable, - DesiredNumber: metricsDS.Status.DesiredNumberScheduled, - AvailableNumber: metricsDS.Status.NumberAvailable, - } - } else { - return fmt.Errorf("failed to fetch metricsExporter %+v: %+v", dsName, err) - } - } - // fetch latest node modules config, push their status back to DeviceConfig's status fields - if err := dcrh.updateDeviceConfigNodeStatus(ctx, devConfig, nodes); err != nil { + if err := dcrh.buildDeviceConfigNodeStatus(ctx, devConfig, nodes); err != nil { return err } @@ -576,8 +498,9 @@ func (dcrh *deviceConfigReconcilerHelper) updateDeviceConfigStatus(ctx context.C if err != nil { return err } + originalObj := latestObj.DeepCopy() devConfig.Status.DeepCopyInto(&latestObj.Status) - if err := dcrh.client.Status().Update(ctx, latestObj); err != nil { + if err := dcrh.client.Status().Patch(ctx, latestObj, client.MergeFrom(originalObj)); err != nil { return err } return nil @@ -593,7 +516,7 @@ func (dcrh *deviceConfigReconcilerHelper) getDeviceConfigOwnedKMMModule(ctx cont return &module, nil } -func (dcrh *deviceConfigReconcilerHelper) updateDeviceConfigNodeStatus(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error { +func (dcrh *deviceConfigReconcilerHelper) buildDeviceConfigNodeStatus(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error { logger := log.FromContext(ctx) previousUpgradeTimes := make(map[string]string) previousBootIds := make(map[string]string) @@ -648,7 +571,6 @@ func (dcrh *deviceConfigReconcilerHelper) updateDeviceConfigNodeStatus(ctx conte } } } - return nil } @@ -882,7 +804,14 @@ func (dcrh *deviceConfigReconcilerHelper) finalizeDeviceConfig(ctx context.Conte if k8serrors.IsNotFound(err) { // if KMM module CR is not found if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { - logger.Info("module already deleted, removing finalizer", "module", namespacedName) + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + if !dcrh.checkPostProcessFinalizeCondition(ctx, devConfig, nodes) { + return errors.New("waiting for post-process finalize condition") + } + default: + logger.Info("module already deleted, removing finalizer", "module", namespacedName) + } } else { // driver disabled mode won't have KMM CR created // but it still requries the removal of node labels @@ -913,6 +842,65 @@ func (dcrh *deviceConfigReconcilerHelper) finalizeDeviceConfig(ctx context.Conte return nil } +func (dcrh *deviceConfigReconcilerHelper) checkPostProcessFinalizeCondition(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) bool { + logger := log.FromContext(ctx) + for _, node := range nodes.Items { + pod, err := dcrh.kmmPostProcessor.GetWorkerPod(ctx, devConfig, &node) + if err == nil { + logger.Info(fmt.Sprintf("post-process worker pod %+v still exist on node %+v", pod.Name, node.Name)) + return false + } + if !k8serrors.IsNotFound(err) { + logger.Error(err, fmt.Sprintf("failed to get post-process worker pod on node %+v", node.Name)) + return false + } + if _, ok := node.Labels[dcrh.kmmPostProcessor.GetWorkReadyLabel(types.NamespacedName{ + Namespace: devConfig.Namespace, + Name: devConfig.Name, + })]; ok { + logger.Info(fmt.Sprintf("post-process label still exist on node %+v", node.Name)) + return false + } + } + return true +} + +// findDeviceConfigsForNMC when a NMC changed, only trigger reconcile for related DeviceConfig +func (drch *deviceConfigReconcilerHelper) findDeviceConfigsForNMC(ctx context.Context, nmc client.Object) []reconcile.Request { + reqs := []reconcile.Request{} + logger := log.FromContext(ctx) + nmcObj, ok := nmc.(*kmmv1beta1.NodeModulesConfig) + if !ok { + logger.Error(fmt.Errorf("failed to convert object %+v to NodeModulesConfig", nmc), "") + return reqs + } + reconcileDeviceConfig := func(moduleConfig kmmv1beta1.ModuleConfig, nsn types.NamespacedName) { + switch moduleConfig.Modprobe.ModuleName { + // only reconcile for the kernel module whose name is amdgpu or gim + case kmmmodule.ContainerDriverModuleName, + kmmmodule.VFPassthroughDriverModuleName: + reqs = append(reqs, reconcile.Request{ + NamespacedName: nsn, + }) + } + } + // reconcile for all the detected DeviceConfig in both NMC Spec and Status + // the external function EnqueueRequestsFromMapFunc is already deduplicating the reconcile request to the same DeviceConfig + for _, module := range nmcObj.Spec.Modules { + reconcileDeviceConfig(module.Config, types.NamespacedName{ + Namespace: module.Namespace, + Name: module.Name, + }) + } + for _, module := range nmcObj.Status.Modules { + reconcileDeviceConfig(module.Config, types.NamespacedName{ + Namespace: module.Namespace, + Name: module.Name, + }) + } + return reqs +} + func (dcrh *deviceConfigReconcilerHelper) handleBuildConfigMap(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error { logger := log.FromContext(ctx) if devConfig.Spec.Driver.Enable == nil || !*devConfig.Spec.Driver.Enable { @@ -993,7 +981,7 @@ func (dcrh *deviceConfigReconcilerHelper) handleKMMModule(ctx context.Context, d func (dcrh *deviceConfigReconcilerHelper) handleDevicePlugin(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error { logger := log.FromContext(ctx) ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Namespace: devConfig.Namespace, Name: devConfig.Name + "-device-plugin"}, + ObjectMeta: metav1.ObjectMeta{Namespace: devConfig.Namespace, Name: devConfig.Name + utils.DevicePluginNameSuffix}, } opRes, err := controllerutil.CreateOrPatch(ctx, dcrh.client, ds, func() error { @@ -1027,7 +1015,7 @@ func (dcrh *deviceConfigReconcilerHelper) handleNodeLabeller(ctx context.Context existingDS := &appsv1.DaemonSet{} existingDSMetadata := types.NamespacedName{ Namespace: devConfig.Namespace, - Name: devConfig.Name + "-node-labeller", + Name: devConfig.Name + utils.NodeLabellerNameSuffix, } if err := dcrh.client.Get(ctx, existingDSMetadata, existingDS); err == nil { logger.Info("disabling node labeller, deleting existing node labeller daemonset", "daemonset", existingDSMetadata.Name) @@ -1045,7 +1033,7 @@ func (dcrh *deviceConfigReconcilerHelper) handleNodeLabeller(ctx context.Context } ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Namespace: devConfig.Namespace, Name: devConfig.Name + "-node-labeller"}, + ObjectMeta: metav1.ObjectMeta{Namespace: devConfig.Namespace, Name: devConfig.Name + utils.NodeLabellerNameSuffix}, } opRes, err := controllerutil.CreateOrPatch(ctx, dcrh.client, ds, func() error { return dcrh.nlHandler.SetNodeLabellerAsDesired(ds, devConfig) @@ -1059,7 +1047,7 @@ func (dcrh *deviceConfigReconcilerHelper) handleNodeLabeller(ctx context.Context // todo: temp. cleanup labels set by node-labeller // not required once label cleanup is added in node-labeller - nodeLabels := func() string { + labelSelector, err := func() (labels.Selector, error) { // nodes without gpu, kmm, dev-plugin sel := []string{ "! " + utils.NodeFeatureLabelAmdGpu, @@ -1068,8 +1056,8 @@ func (dcrh *deviceConfigReconcilerHelper) handleNodeLabeller(ctx context.Context if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { sel = append(sel, - "! "+labels.GetKernelModuleReadyNodeLabel(devConfig.Namespace, devConfig.Name), - "! "+labels.GetDevicePluginNodeLabel(devConfig.Namespace, devConfig.Name), + "! "+kmmLabels.GetKernelModuleReadyNodeLabel(devConfig.Namespace, devConfig.Name), + "! "+kmmLabels.GetDevicePluginNodeLabel(devConfig.Namespace, devConfig.Name), ) } @@ -1080,15 +1068,22 @@ func (dcrh *deviceConfigReconcilerHelper) handleNodeLabeller(ctx context.Context } sel = append(sel, fmt.Sprintf("%s=%s", k, v)) } - return strings.Join(sel, ",") + selector, err := labels.Parse(strings.Join(sel, ",")) + if err != nil { + return nil, err + } + return selector, nil }() + if err != nil { + return err + } - its, err := kmmmodule.GetK8SNodes(nodeLabels) + its, err := kmmmodule.GetK8SNodes(ctx, dcrh.client, labelSelector) if err != nil { logger.Info("failed to get node list ", err) return nil } - logger.Info(fmt.Sprintf("select (%v) found %v nodes", nodeLabels, len(its.Items))) + logger.Info(fmt.Sprintf("select (%v) found %v nodes", labelSelector, len(its.Items))) if err := dcrh.updateNodeLabels(ctx, devConfig, its, false); err != nil { logger.Error(err, "failed to update node labels") @@ -1106,7 +1101,7 @@ func (dcrh *deviceConfigReconcilerHelper) handleModuleUpgrade(ctx context.Contex func (dcrh *deviceConfigReconcilerHelper) handleMetricsExporter(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) error { logger := log.FromContext(ctx) ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Namespace: devConfig.Namespace, Name: devConfig.Name + "-" + metricsexporter.ExporterName}, + ObjectMeta: metav1.ObjectMeta{Namespace: devConfig.Namespace, Name: devConfig.Name + utils.MetricsExporterNameSuffix}, } // delete if disabled @@ -1197,7 +1192,7 @@ func (dcrh *deviceConfigReconcilerHelper) handleMetricsExporter(ctx context.Cont func (dcrh *deviceConfigReconcilerHelper) handleTestRunner(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error { logger := log.FromContext(ctx) ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Namespace: devConfig.Namespace, Name: devConfig.Name + "-" + testrunner.TestRunnerName}, + ObjectMeta: metav1.ObjectMeta{Namespace: devConfig.Namespace, Name: devConfig.Name + utils.TestRunnerNameSuffix}, } // delete if disabled diff --git a/internal/controllers/device_config_reconciler_test.go b/internal/controllers/device_config_reconciler_test.go index 8d3c0f4e..437cb6a9 100644 --- a/internal/controllers/device_config_reconciler_test.go +++ b/internal/controllers/device_config_reconciler_test.go @@ -194,7 +194,7 @@ var _ = Describe("getLabelsPerModules", func() { BeforeEach(func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -239,7 +239,7 @@ var _ = Describe("setFinalizer", func() { BeforeEach(func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -275,7 +275,7 @@ var _ = Describe("finalizeDeviceConfig", func() { BeforeEach(func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -486,7 +486,7 @@ var _ = Describe("handleKMMModule", func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) kmmHelper = kmmmodule.NewMockKMMModuleAPI(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, kmmHelper, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, kmmHelper, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -554,7 +554,7 @@ var _ = Describe("handleBuildConfigMap", func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) kmmHelper = kmmmodule.NewMockKMMModuleAPI(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, kmmHelper, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, kmmHelper, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -621,7 +621,7 @@ var _ = Describe("handleNodeLabeller", func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) nodeLabellerHelper = nodelabeller.NewMockNodeLabeller(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nodeLabellerHelper, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nodeLabellerHelper, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -647,6 +647,7 @@ var _ = Describe("handleNodeLabeller", func() { kubeClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).Return(k8serrors.NewNotFound(schema.GroupResource{}, "whatever")), nodeLabellerHelper.EXPECT().SetNodeLabellerAsDesired(newDS, devConfig).Return(nil), kubeClient.EXPECT().Create(ctx, gomock.Any()).Return(nil), + kubeClient.EXPECT().List(ctx, gomock.Any(), gomock.Any()).Return(nil), ) err := dcrh.handleNodeLabeller(ctx, devConfig, testNodeList) @@ -666,6 +667,7 @@ var _ = Describe("handleNodeLabeller", func() { }, ), nodeLabellerHelper.EXPECT().SetNodeLabellerAsDesired(existingDS, devConfig).Return(nil), + kubeClient.EXPECT().List(ctx, gomock.Any(), gomock.Any()).Return(nil), ) err := dcrh.handleNodeLabeller(ctx, devConfig, testNodeList) diff --git a/internal/controllers/mock_device_config_reconciler.go b/internal/controllers/mock_device_config_reconciler.go index 969363d2..750ad697 100644 --- a/internal/controllers/mock_device_config_reconciler.go +++ b/internal/controllers/mock_device_config_reconciler.go @@ -146,20 +146,6 @@ func (mr *MockdeviceConfigReconcilerHelperAPIMockRecorder) findDeviceConfigsForS return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "findDeviceConfigsForSecret", reflect.TypeOf((*MockdeviceConfigReconcilerHelperAPI)(nil).findDeviceConfigsForSecret), ctx, secret) } -// findDeviceConfigsWithKMM mocks base method. -func (m *MockdeviceConfigReconcilerHelperAPI) findDeviceConfigsWithKMM(ctx context.Context, node client.Object) []reconcile.Request { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "findDeviceConfigsWithKMM", ctx, node) - ret0, _ := ret[0].([]reconcile.Request) - return ret0 -} - -// findDeviceConfigsWithKMM indicates an expected call of findDeviceConfigsWithKMM. -func (mr *MockdeviceConfigReconcilerHelperAPIMockRecorder) findDeviceConfigsWithKMM(ctx, node any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "findDeviceConfigsWithKMM", reflect.TypeOf((*MockdeviceConfigReconcilerHelperAPI)(nil).findDeviceConfigsWithKMM), ctx, node) -} - // getDeviceConfigOwnedKMMModule mocks base method. func (m *MockdeviceConfigReconcilerHelperAPI) getDeviceConfigOwnedKMMModule(ctx context.Context, devConfig *v1alpha1.DeviceConfig) (*v1beta1.Module, error) { m.ctrl.T.Helper() diff --git a/internal/controllers/mock_pod_event_handler.go b/internal/controllers/mock_pod_event_handler.go deleted file mode 100644 index 93ef5559..00000000 --- a/internal/controllers/mock_pod_event_handler.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright (c) Advanced Micro Devices, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the \"License\"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an \"AS IS\" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by MockGen. DO NOT EDIT. -// Source: pod_event_handler.go -// -// Generated by this command: -// -// mockgen -source=pod_event_handler.go -package=controllers -destination=mock_pod_event_handler.go podEventHandlerAPI -// -// Package controllers is a generated GoMock package. -package controllers - -import ( - context "context" - reflect "reflect" - - gomock "go.uber.org/mock/gomock" - workqueue "k8s.io/client-go/util/workqueue" - client "sigs.k8s.io/controller-runtime/pkg/client" - event "sigs.k8s.io/controller-runtime/pkg/event" - reconcile "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// MockpodEventHandlerAPI is a mock of podEventHandlerAPI interface. -type MockpodEventHandlerAPI struct { - ctrl *gomock.Controller - recorder *MockpodEventHandlerAPIMockRecorder -} - -// MockpodEventHandlerAPIMockRecorder is the mock recorder for MockpodEventHandlerAPI. -type MockpodEventHandlerAPIMockRecorder struct { - mock *MockpodEventHandlerAPI -} - -// NewMockpodEventHandlerAPI creates a new mock instance. -func NewMockpodEventHandlerAPI(ctrl *gomock.Controller) *MockpodEventHandlerAPI { - mock := &MockpodEventHandlerAPI{ctrl: ctrl} - mock.recorder = &MockpodEventHandlerAPIMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockpodEventHandlerAPI) EXPECT() *MockpodEventHandlerAPIMockRecorder { - return m.recorder -} - -// Create mocks base method. -func (m *MockpodEventHandlerAPI) Create(arg0 context.Context, arg1 event.TypedCreateEvent[client.Object], arg2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Create", arg0, arg1, arg2) -} - -// Create indicates an expected call of Create. -func (mr *MockpodEventHandlerAPIMockRecorder) Create(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockpodEventHandlerAPI)(nil).Create), arg0, arg1, arg2) -} - -// Delete mocks base method. -func (m *MockpodEventHandlerAPI) Delete(arg0 context.Context, arg1 event.TypedDeleteEvent[client.Object], arg2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Delete", arg0, arg1, arg2) -} - -// Delete indicates an expected call of Delete. -func (mr *MockpodEventHandlerAPIMockRecorder) Delete(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockpodEventHandlerAPI)(nil).Delete), arg0, arg1, arg2) -} - -// Generic mocks base method. -func (m *MockpodEventHandlerAPI) Generic(arg0 context.Context, arg1 event.TypedGenericEvent[client.Object], arg2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Generic", arg0, arg1, arg2) -} - -// Generic indicates an expected call of Generic. -func (mr *MockpodEventHandlerAPIMockRecorder) Generic(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Generic", reflect.TypeOf((*MockpodEventHandlerAPI)(nil).Generic), arg0, arg1, arg2) -} - -// Update mocks base method. -func (m *MockpodEventHandlerAPI) Update(arg0 context.Context, arg1 event.TypedUpdateEvent[client.Object], arg2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Update", arg0, arg1, arg2) -} - -// Update indicates an expected call of Update. -func (mr *MockpodEventHandlerAPIMockRecorder) Update(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockpodEventHandlerAPI)(nil).Update), arg0, arg1, arg2) -} diff --git a/internal/controllers/node_event_handler.go b/internal/controllers/node_event_handler.go deleted file mode 100644 index 4eaa1845..00000000 --- a/internal/controllers/node_event_handler.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2024. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Copyright (c) Advanced Micro Devices, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the \"License\"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an \"AS IS\" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - v1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" -) - -type NodeKernelVersionPredicate struct { - predicate.Funcs -} - -func (NodeKernelVersionPredicate) Create(e event.CreateEvent) bool { - return true -} - -func (NodeKernelVersionPredicate) Update(e event.UpdateEvent) bool { - oldNode, okOld := e.ObjectOld.(*v1.Node) - newNode, okNew := e.ObjectNew.(*v1.Node) - if !okOld || !okNew { - return false - } - - oldKernelVersion := oldNode.Status.NodeInfo.KernelVersion - newKernelVersion := newNode.Status.NodeInfo.KernelVersion - - // if kernel version changed - // reconcile the deviceconfig to update kernel mapping of KMM CR - return oldKernelVersion != newKernelVersion -} - -func (NodeKernelVersionPredicate) Delete(e event.DeleteEvent) bool { - return true -} diff --git a/internal/controllers/watchers/daemonset.go b/internal/controllers/watchers/daemonset.go new file mode 100644 index 00000000..fadabad5 --- /dev/null +++ b/internal/controllers/watchers/daemonset.go @@ -0,0 +1,230 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watchers + +import ( + "context" + "fmt" + "strings" + + v1 "k8s.io/api/apps/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" + workqueue "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" +) + +type DaemonsetPredicate struct { + predicate.Funcs +} + +func ownedByDeviceConfig(obj client.Object) bool { + for _, owner := range obj.GetOwnerReferences() { + if owner.Kind == utils.KindDeviceConfig { + return true + } + } + return false +} + +func (DaemonsetPredicate) Update(e event.UpdateEvent) bool { + return ownedByDeviceConfig(e.ObjectNew) +} + +func (DaemonsetPredicate) Generic(e event.GenericEvent) bool { + return ownedByDeviceConfig(e.Object) +} + +func (DaemonsetPredicate) Delete(e event.DeleteEvent) bool { + return ownedByDeviceConfig(e.Object) +} + +//go:generate mockgen -source=daemonset.go -package=watchers -destination=mock_daemonset.go DaemonsetEventHandlerAPI +type DaemonsetEventHandlerAPI interface { + Create(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) + Update(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) + Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) + Generic(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) +} + +func NewDaemonsetEventHandler(client client.Client) DaemonsetEventHandlerAPI { + return &DaemonsetEventHandler{ + client: client, + } +} + +type DaemonsetEventHandler struct { + client client.Client +} + +// Create handle create event +func (h *DaemonsetEventHandler) Create(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + devConfigName := h.fetchOwnerDeviceConfigName(evt.Object) + if devConfigName == "" { + // if there is no DeviceConfig owner, stop processing event for this daemonset + return + } + h.patchDeviceConfigNodeStatus(ctx, evt.Object, devConfigName) +} + +// Create handle generic event +func (h *DaemonsetEventHandler) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + devConfigName := h.fetchOwnerDeviceConfigName(evt.Object) + if devConfigName == "" { + // if there is no DeviceConfig owner, stop processing event for this daemonset + return + } + h.patchDeviceConfigNodeStatus(ctx, evt.Object, devConfigName) +} + +// Delete handle delete event +func (h *DaemonsetEventHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + devConfigName := h.fetchOwnerDeviceConfigName(evt.Object) + if devConfigName == "" { + // if there is no DeviceConfig owner, stop processing event for this daemonset + return + } + h.patchDeviceConfigNodeStatus(ctx, evt.Object, devConfigName) +} + +// Update handle update event +func (h *DaemonsetEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + devConfigName := h.fetchOwnerDeviceConfigName(evt.ObjectNew) + if devConfigName == "" { + // if there is no DeviceConfig owner, stop processing event for this daemonset + return + } + h.patchDeviceConfigNodeStatus(ctx, evt.ObjectNew, devConfigName) + // if the managed daemonset got spec changed or deleted, reconcile the owner DeviceConfig + if evt.ObjectOld.GetGeneration() != evt.ObjectNew.GetGeneration() || + (evt.ObjectOld.GetDeletionTimestamp() == nil && evt.ObjectNew.GetDeletionTimestamp() != nil) || + (evt.ObjectOld.GetDeletionTimestamp() != nil && evt.ObjectNew.GetDeletionTimestamp() == nil) { + q.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: evt.ObjectNew.GetNamespace(), + Name: devConfigName, + }, + }) + } +} + +func (h *DaemonsetEventHandler) patchDeviceConfigNodeStatus(ctx context.Context, obj client.Object, devConfigName string) { + logger := log.FromContext(ctx) + // whenever NMC object get updated + // push the NMC status information to corresponding DeviceConfig status + ds, ok := obj.(*v1.DaemonSet) + if !ok { + return + } + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + devConfig := &v1alpha1.DeviceConfig{} + err := h.client.Get(ctx, types.NamespacedName{Name: devConfigName, Namespace: ds.Namespace}, devConfig) + if err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, "cannot get DeviceConfig for handling daemonset event", + "namesace", ds.Namespace, "name", ds.Name) + return err + } + + latestDS := &v1.DaemonSet{} + err = h.client.Get(ctx, types.NamespacedName{Name: ds.Name, Namespace: ds.Namespace}, latestDS) + if err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, "cannot fetch daemonset for handling daemonset event", + "namesace", ds.Namespace, "name", ds.Name) + return err + } + // if err == nil the latest status counter will be pushed to DeviceConfig + // OR if err == NotFound, zero counter values will be pushed to DeviceConfig + + devConfigCopy := devConfig.DeepCopy() + update := false + switch { + case strings.HasSuffix(latestDS.Name, utils.MetricsExporterNameSuffix): + update = h.handleMetricsExporterStatus(latestDS, devConfig) + case strings.HasSuffix(latestDS.Name, utils.DevicePluginNameSuffix): + update = h.handleDevicePluginStatus(latestDS, devConfig) + } + if update { + err = h.client.Status().Patch(ctx, devConfig, client.MergeFrom(devConfigCopy)) + if err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, "cannot patch DeviceConfig status") + } + return err + } + return nil + }); err != nil { + logger.Error(err, fmt.Sprintf("failed to patch device config status for daemonset %+v", ds.Name)) + } +} + +func (h *DaemonsetEventHandler) handleMetricsExporterStatus(ds *v1.DaemonSet, devConfig *v1alpha1.DeviceConfig) bool { + if devConfig.Status.MetricsExporter.AvailableNumber == ds.Status.NumberAvailable && + devConfig.Status.MetricsExporter.NodesMatchingSelectorNumber == ds.Status.NumberAvailable && + devConfig.Status.MetricsExporter.DesiredNumber == ds.Status.DesiredNumberScheduled { + // if there is nothing to update, skip the patch operation + return false + } + devConfig.Status.MetricsExporter.AvailableNumber = ds.Status.NumberAvailable + devConfig.Status.MetricsExporter.NodesMatchingSelectorNumber = ds.Status.NumberAvailable + devConfig.Status.MetricsExporter.DesiredNumber = ds.Status.DesiredNumberScheduled + return true +} + +func (h *DaemonsetEventHandler) handleDevicePluginStatus(ds *v1.DaemonSet, devConfig *v1alpha1.DeviceConfig) bool { + if devConfig.Status.DevicePlugin.AvailableNumber == ds.Status.NumberAvailable && + devConfig.Status.DevicePlugin.NodesMatchingSelectorNumber == ds.Status.NumberAvailable && + devConfig.Status.DevicePlugin.DesiredNumber == ds.Status.DesiredNumberScheduled { + // if there is nothing to update, skip the patch operation + return false + } + devConfig.Status.DevicePlugin.AvailableNumber = ds.Status.NumberAvailable + devConfig.Status.DevicePlugin.NodesMatchingSelectorNumber = ds.Status.NumberAvailable + devConfig.Status.DevicePlugin.DesiredNumber = ds.Status.DesiredNumberScheduled + return true +} + +func (h *DaemonsetEventHandler) fetchOwnerDeviceConfigName(obj client.Object) string { + for _, owner := range obj.GetOwnerReferences() { + if owner.Kind == utils.KindDeviceConfig { + return owner.Name + } + } + return "" +} diff --git a/internal/controllers/watchers/device_config.go b/internal/controllers/watchers/device_config.go new file mode 100644 index 00000000..20290aa9 --- /dev/null +++ b/internal/controllers/watchers/device_config.go @@ -0,0 +1,57 @@ +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watchers + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// SpecChangedOrDeletionPredicate implements predicate.Predicate interface. +// triggering reconciliation +// only if the Spec field has changed or the DeletionTimestamp has been updated. +type SpecChangedOrDeletionPredicate struct { + predicate.Funcs +} + +// Update implements the update event filter for Spec or DeletionTimestamp changes. +func (SpecChangedOrDeletionPredicate) Update(e event.UpdateEvent) bool { + // 1. Check if the DeletionTimestamp has changed. + // This catches the case where the object is marked for deletion. + if (e.ObjectOld.GetDeletionTimestamp() == nil && e.ObjectNew.GetDeletionTimestamp() != nil) || + (e.ObjectOld.GetDeletionTimestamp() != nil && e.ObjectNew.GetDeletionTimestamp() == nil) { + return true // Reconcile when deletion starts + } + + // 2. Check if the Spec has changed. + if e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() { + return true + } + + // If neither DeletionTimestamp nor Spec changed, don't reconcile. + return false +} + +// Create returns true, allowing reconciliation when a new resource is created. +func (SpecChangedOrDeletionPredicate) Create(e event.CreateEvent) bool { + return true +} + +// Delete returns true, allowing reconciliation when a resource is deleted. +func (SpecChangedOrDeletionPredicate) Delete(e event.DeleteEvent) bool { + return true +} diff --git a/internal/controllers/watchers/mock_daemonset.go b/internal/controllers/watchers/mock_daemonset.go new file mode 100644 index 00000000..66187911 --- /dev/null +++ b/internal/controllers/watchers/mock_daemonset.go @@ -0,0 +1,106 @@ +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: daemonset.go +// +// Generated by this command: +// +// mockgen -source=daemonset.go -package=watchers -destination=mock_daemonset.go DaemonsetEventHandlerAPI +// +// Package watchers is a generated GoMock package. +package watchers + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + workqueue "k8s.io/client-go/util/workqueue" + event "sigs.k8s.io/controller-runtime/pkg/event" + reconcile "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// MockDaemonsetEventHandlerAPI is a mock of DaemonsetEventHandlerAPI interface. +type MockDaemonsetEventHandlerAPI struct { + ctrl *gomock.Controller + recorder *MockDaemonsetEventHandlerAPIMockRecorder +} + +// MockDaemonsetEventHandlerAPIMockRecorder is the mock recorder for MockDaemonsetEventHandlerAPI. +type MockDaemonsetEventHandlerAPIMockRecorder struct { + mock *MockDaemonsetEventHandlerAPI +} + +// NewMockDaemonsetEventHandlerAPI creates a new mock instance. +func NewMockDaemonsetEventHandlerAPI(ctrl *gomock.Controller) *MockDaemonsetEventHandlerAPI { + mock := &MockDaemonsetEventHandlerAPI{ctrl: ctrl} + mock.recorder = &MockDaemonsetEventHandlerAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDaemonsetEventHandlerAPI) EXPECT() *MockDaemonsetEventHandlerAPIMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockDaemonsetEventHandlerAPI) Create(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Create", ctx, evt, q) +} + +// Create indicates an expected call of Create. +func (mr *MockDaemonsetEventHandlerAPIMockRecorder) Create(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockDaemonsetEventHandlerAPI)(nil).Create), ctx, evt, q) +} + +// Delete mocks base method. +func (m *MockDaemonsetEventHandlerAPI) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Delete", ctx, evt, q) +} + +// Delete indicates an expected call of Delete. +func (mr *MockDaemonsetEventHandlerAPIMockRecorder) Delete(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDaemonsetEventHandlerAPI)(nil).Delete), ctx, evt, q) +} + +// Generic mocks base method. +func (m *MockDaemonsetEventHandlerAPI) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Generic", ctx, evt, q) +} + +// Generic indicates an expected call of Generic. +func (mr *MockDaemonsetEventHandlerAPIMockRecorder) Generic(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Generic", reflect.TypeOf((*MockDaemonsetEventHandlerAPI)(nil).Generic), ctx, evt, q) +} + +// Update mocks base method. +func (m *MockDaemonsetEventHandlerAPI) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Update", ctx, evt, q) +} + +// Update indicates an expected call of Update. +func (mr *MockDaemonsetEventHandlerAPIMockRecorder) Update(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockDaemonsetEventHandlerAPI)(nil).Update), ctx, evt, q) +} diff --git a/internal/controllers/watchers/mock_node.go b/internal/controllers/watchers/mock_node.go new file mode 100644 index 00000000..2edb1e2f --- /dev/null +++ b/internal/controllers/watchers/mock_node.go @@ -0,0 +1,106 @@ +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: node.go +// +// Generated by this command: +// +// mockgen -source=node.go -package=watchers -destination=mock_node.go NodeEventHandlerAPI +// +// Package watchers is a generated GoMock package. +package watchers + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + workqueue "k8s.io/client-go/util/workqueue" + event "sigs.k8s.io/controller-runtime/pkg/event" + reconcile "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// MockNodeEventHandlerAPI is a mock of NodeEventHandlerAPI interface. +type MockNodeEventHandlerAPI struct { + ctrl *gomock.Controller + recorder *MockNodeEventHandlerAPIMockRecorder +} + +// MockNodeEventHandlerAPIMockRecorder is the mock recorder for MockNodeEventHandlerAPI. +type MockNodeEventHandlerAPIMockRecorder struct { + mock *MockNodeEventHandlerAPI +} + +// NewMockNodeEventHandlerAPI creates a new mock instance. +func NewMockNodeEventHandlerAPI(ctrl *gomock.Controller) *MockNodeEventHandlerAPI { + mock := &MockNodeEventHandlerAPI{ctrl: ctrl} + mock.recorder = &MockNodeEventHandlerAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNodeEventHandlerAPI) EXPECT() *MockNodeEventHandlerAPIMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockNodeEventHandlerAPI) Create(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Create", ctx, evt, q) +} + +// Create indicates an expected call of Create. +func (mr *MockNodeEventHandlerAPIMockRecorder) Create(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockNodeEventHandlerAPI)(nil).Create), ctx, evt, q) +} + +// Delete mocks base method. +func (m *MockNodeEventHandlerAPI) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Delete", ctx, evt, q) +} + +// Delete indicates an expected call of Delete. +func (mr *MockNodeEventHandlerAPIMockRecorder) Delete(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockNodeEventHandlerAPI)(nil).Delete), ctx, evt, q) +} + +// Generic mocks base method. +func (m *MockNodeEventHandlerAPI) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Generic", ctx, evt, q) +} + +// Generic indicates an expected call of Generic. +func (mr *MockNodeEventHandlerAPIMockRecorder) Generic(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Generic", reflect.TypeOf((*MockNodeEventHandlerAPI)(nil).Generic), ctx, evt, q) +} + +// Update mocks base method. +func (m *MockNodeEventHandlerAPI) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Update", ctx, evt, q) +} + +// Update indicates an expected call of Update. +func (mr *MockNodeEventHandlerAPIMockRecorder) Update(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockNodeEventHandlerAPI)(nil).Update), ctx, evt, q) +} diff --git a/internal/controllers/watchers/mock_pod.go b/internal/controllers/watchers/mock_pod.go new file mode 100644 index 00000000..1c3b74fd --- /dev/null +++ b/internal/controllers/watchers/mock_pod.go @@ -0,0 +1,106 @@ +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: pod.go +// +// Generated by this command: +// +// mockgen -source=pod.go -package=watchers -destination=mock_pod.go PodEventHandlerAPI +// +// Package watchers is a generated GoMock package. +package watchers + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + workqueue "k8s.io/client-go/util/workqueue" + event "sigs.k8s.io/controller-runtime/pkg/event" + reconcile "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// MockPodEventHandlerAPI is a mock of PodEventHandlerAPI interface. +type MockPodEventHandlerAPI struct { + ctrl *gomock.Controller + recorder *MockPodEventHandlerAPIMockRecorder +} + +// MockPodEventHandlerAPIMockRecorder is the mock recorder for MockPodEventHandlerAPI. +type MockPodEventHandlerAPIMockRecorder struct { + mock *MockPodEventHandlerAPI +} + +// NewMockPodEventHandlerAPI creates a new mock instance. +func NewMockPodEventHandlerAPI(ctrl *gomock.Controller) *MockPodEventHandlerAPI { + mock := &MockPodEventHandlerAPI{ctrl: ctrl} + mock.recorder = &MockPodEventHandlerAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPodEventHandlerAPI) EXPECT() *MockPodEventHandlerAPIMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockPodEventHandlerAPI) Create(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Create", ctx, evt, q) +} + +// Create indicates an expected call of Create. +func (mr *MockPodEventHandlerAPIMockRecorder) Create(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockPodEventHandlerAPI)(nil).Create), ctx, evt, q) +} + +// Delete mocks base method. +func (m *MockPodEventHandlerAPI) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Delete", ctx, evt, q) +} + +// Delete indicates an expected call of Delete. +func (mr *MockPodEventHandlerAPIMockRecorder) Delete(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockPodEventHandlerAPI)(nil).Delete), ctx, evt, q) +} + +// Generic mocks base method. +func (m *MockPodEventHandlerAPI) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Generic", ctx, evt, q) +} + +// Generic indicates an expected call of Generic. +func (mr *MockPodEventHandlerAPIMockRecorder) Generic(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Generic", reflect.TypeOf((*MockPodEventHandlerAPI)(nil).Generic), ctx, evt, q) +} + +// Update mocks base method. +func (m *MockPodEventHandlerAPI) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Update", ctx, evt, q) +} + +// Update indicates an expected call of Update. +func (mr *MockPodEventHandlerAPIMockRecorder) Update(ctx, evt, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockPodEventHandlerAPI)(nil).Update), ctx, evt, q) +} diff --git a/internal/controllers/watchers/node.go b/internal/controllers/watchers/node.go new file mode 100644 index 00000000..d6cfc473 --- /dev/null +++ b/internal/controllers/watchers/node.go @@ -0,0 +1,252 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watchers + +import ( + "context" + "fmt" + "reflect" + + "github.com/go-logr/logr" + kmmv1beta1 "github.com/rh-ecosystem-edge/kernel-module-management/api/v1beta1" + v1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + workqueue "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" + "github.com/ROCm/gpu-operator/internal/controllers/workermgr" + "github.com/ROCm/gpu-operator/internal/kmmmodule" +) + +type NodePredicate struct { + predicate.Funcs +} + +func (NodePredicate) Create(e event.CreateEvent) bool { + return true +} + +func (NodePredicate) Update(e event.UpdateEvent) bool { + oldNode, okOld := e.ObjectOld.(*v1.Node) + newNode, okNew := e.ObjectNew.(*v1.Node) + if !okOld || !okNew { + return false + } + + // send the event to node event handler if Node has the following update + // 1. kernel upgrade + // 2. spec change like podCIDR or taints + // 3. bootID for tracking node reboot + // 4. node labels change, which may affect the DeviceConfigs node selector + if oldNode.Status.NodeInfo.KernelVersion != newNode.Status.NodeInfo.KernelVersion || + oldNode.Generation != newNode.Generation || + oldNode.Status.NodeInfo.BootID != newNode.Status.NodeInfo.BootID || + !reflect.DeepEqual(oldNode.Labels, newNode.Labels) { + return true + } + + return false +} + +func (NodePredicate) Generic(e event.GenericEvent) bool { + return true +} + +func (NodePredicate) Delete(e event.DeleteEvent) bool { + return true +} + +//go:generate mockgen -source=node.go -package=watchers -destination=mock_node.go NodeEventHandlerAPI +type NodeEventHandlerAPI interface { + Create(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) + Update(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) + Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) + Generic(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) +} + +func NewNodeEventHandler(client client.Client, workerMgr workermgr.WorkerMgrAPI) NodeEventHandlerAPI { + return &NodeEventHandler{ + client: client, + workerMgr: workerMgr, + } +} + +type NodeEventHandler struct { + client client.Client + workerMgr workermgr.WorkerMgrAPI +} + +// Create handle create event +func (h *NodeEventHandler) Create(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + // if a Node was created, the kernel mapping may be updated + // any DeviceConfig would be possible to manage this new Node + // trigger the reconcile on all existing DeviceConfigs + h.reconcileAllDeviceConfigs(ctx, q) +} + +// Create handle generic event +func (h *NodeEventHandler) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + h.reconcileRelatedDeviceConfig(ctx, evt.Object, q) +} + +// Delete handle delete event +func (h *NodeEventHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + // if a Node was deleted + // trigger the reconcile when there exists a DeviceConfig managing the node + h.reconcileRelatedDeviceConfig(ctx, evt.Object, q) +} + +// Update handle update event +func (h *NodeEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + oldNode, okOld := evt.ObjectOld.(*v1.Node) + newNode, okNew := evt.ObjectNew.(*v1.Node) + logger := log.FromContext(ctx) + if !okOld || !okNew { + return + } + + // send the event to node event handler if Node has the following update + // 1. kernel upgrade + // 2. spec change like podCIDR or taints + // 3. bootID for tracking node reboot + // 4. node labels change, which may affect the DeviceConfigs node selector + if oldNode.Status.NodeInfo.KernelVersion != newNode.Status.NodeInfo.KernelVersion || + oldNode.Generation != newNode.Generation || + oldNode.Status.NodeInfo.BootID != newNode.Status.NodeInfo.BootID || + !reflect.DeepEqual(oldNode.Labels, newNode.Labels) { + h.reconcileAllDeviceConfigs(ctx, q) + } + h.handlePostProcess(ctx, logger, newNode) +} + +func (h *NodeEventHandler) reconcileAllDeviceConfigs(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + logger := log.FromContext(ctx) + devConfigList := &amdv1alpha1.DeviceConfigList{} + err := h.client.List(ctx, devConfigList) + if err != nil { + logger.Error(err, "failed to list deviceconfigs") + } + for _, dcfg := range devConfigList.Items { + if dcfg.Spec.Driver.Enable != nil && + *dcfg.Spec.Driver.Enable { + q.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: dcfg.Namespace, + Name: dcfg.Name, + }, + }) + } + } +} + +func (h *NodeEventHandler) reconcileRelatedDeviceConfig(ctx context.Context, obj client.Object, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + logger := log.FromContext(ctx) + nmc := &kmmv1beta1.NodeModulesConfig{} + err := h.client.Get(ctx, types.NamespacedName{Name: obj.GetName()}, nmc) + if err != nil { + if !k8serrors.IsNotFound(err) { + logger.Error(err, "failed to get NMC for node") + } + return + } + foundDeviceConfig := false + for _, module := range nmc.Spec.Modules { + switch module.Config.Modprobe.ModuleName { + case kmmmodule.ContainerDriverModuleName, + kmmmodule.VFPassthroughDriverModuleName: + q.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: module.Namespace, + Name: module.Name, + }, + }) + foundDeviceConfig = true + } + // once amdgpu related kernel module was found on this NMC + // no need to continue for loop + if foundDeviceConfig { + break + } + } +} + +func (h *NodeEventHandler) handlePostProcess(ctx context.Context, logger logr.Logger, node *v1.Node) { + hasVFIOReadyLabel, vfioLabel, vfioDevConfigNamespace, vfioDevConfigName := utils.HasNodeLabelTemplateMatch(node.Labels, utils.VFIOMountReadyLabelTemplate) + hasModuleReadyLabel, moduleLabel, moduleDevConfigNamespace, moduleDevConfigName := utils.HasNodeLabelTemplateMatch(node.Labels, utils.KMMModuleReadyLabelTemplate) + if hasModuleReadyLabel && !hasVFIOReadyLabel { + // trigger VFIO worker pod + devConfig := &amdv1alpha1.DeviceConfig{} + err := h.client.Get(ctx, types.NamespacedName{ + Namespace: moduleDevConfigNamespace, + Name: moduleDevConfigName, + }, devConfig) + if err != nil { + if !k8serrors.IsNotFound(err) { + logger.Error(err, "failed to get DeviceConfig") + } + return + } + if devConfig.Spec.Driver.DriverType == utils.DriverTypeVFPassthrough { + logger.Info(fmt.Sprintf("node %v with configured VFPassthrough driver only has KMM module label %v %v %v, launching VFIO worker pod", + node.Name, moduleLabel, moduleDevConfigNamespace, moduleDevConfigName)) + if err := h.workerMgr.Work(ctx, devConfig, node); err != nil { + logger.Error(err, "failed to create worker pod") + } + } + } else if !hasModuleReadyLabel && hasVFIOReadyLabel { + logger.Info(fmt.Sprintf("node %v with configured VFPassthrough driver only has VFIO label %v %v %v, launching VFIO cleanup worker pod", + node.Name, vfioLabel, vfioDevConfigNamespace, vfioDevConfigName)) + // trigger VFIO cleanup worker pod + devConfig := &amdv1alpha1.DeviceConfig{} + err := h.client.Get(ctx, types.NamespacedName{ + Namespace: vfioDevConfigNamespace, + Name: vfioDevConfigName, + }, devConfig) + if err != nil { + if !k8serrors.IsNotFound(err) { + logger.Error(err, "failed to get DeviceConfig") + } + return + } + if err := h.workerMgr.Cleanup(ctx, devConfig, node); err != nil { + logger.Error(err, "failed to create cleanup worker pod") + } + } +} diff --git a/internal/controllers/pod_event_handler.go b/internal/controllers/watchers/pod.go similarity index 52% rename from internal/controllers/pod_event_handler.go rename to internal/controllers/watchers/pod.go index de927ebd..24c9efdc 100644 --- a/internal/controllers/pod_event_handler.go +++ b/internal/controllers/watchers/pod.go @@ -30,36 +30,45 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package watchers import ( "context" "fmt" + utils "github.com/ROCm/gpu-operator/internal" + "github.com/ROCm/gpu-operator/internal/controllers/workermgr" + "github.com/go-logr/logr" v1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -//go:generate mockgen -source=pod_event_handler.go -package=controllers -destination=mock_pod_event_handler.go podEventHandlerAPI -type podEventHandlerAPI interface { - handler.TypedEventHandler[client.Object, reconcile.Request] +//go:generate mockgen -source=pod.go -package=watchers -destination=mock_pod.go PodEventHandlerAPI +type PodEventHandlerAPI interface { + Create(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) + Update(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) + Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) + Generic(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) } -func newPodEventHandler(client client.Client) podEventHandlerAPI { +func NewPodEventHandler(client client.Client, workerMgr workermgr.WorkerMgrAPI) PodEventHandlerAPI { return &PodEventHandler{ - client: client, + client: client, + workerMgr: workerMgr, } } type PodEventHandler struct { - client client.Client + client client.Client + workerMgr workermgr.WorkerMgrAPI } // Create handle pod create event @@ -117,6 +126,54 @@ func (h *PodEventHandler) Update( break } } + + // if the pod is workerMgr pod, do proper handling based on pod state + if action, ok := pod.Labels[utils.WorkerActionLabelKey]; ok { + h.handleWorkerMgrPodEvt(ctx, logger, pod, action) + } +} + +func (h *PodEventHandler) handleWorkerMgrPodEvt(ctx context.Context, logger logr.Logger, pod *v1.Pod, action string) { + foundDeviceConfigOwner := false + var nsn types.NamespacedName + for _, owner := range pod.OwnerReferences { + if owner.Kind == utils.KindDeviceConfig { + nsn = types.NamespacedName{ + Namespace: pod.Namespace, + Name: owner.Name, + } + foundDeviceConfigOwner = true + } + } + if !foundDeviceConfigOwner { + logger.Info(fmt.Sprintf("cannot find DeviceConfig owner for worker pod %+v", pod.GetObjectMeta())) + return + } + switch pod.Status.Phase { + case v1.PodSucceeded: + // if the worker pod already succeed + // modify the node label based on action + switch action { + case utils.LoadVFIOAction: + h.workerMgr.AddWorkReadyLabel(ctx, logger, nsn, pod) + case utils.UnloadVFIOAction: + h.workerMgr.RemoveWorkReadyLabel(ctx, logger, nsn, pod) + } + // remove the completed pod + logger.Info(fmt.Sprintf("remove worker pod %v after its completion", pod.Name)) + err := h.client.Delete(ctx, pod) + if err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, fmt.Sprintf("failed to delete completed worker pod %v", pod.Name)) + return + } + case v1.PodFailed, v1.PodUnknown: + logger.Info(fmt.Sprintf("remove worker pod %v due to its %v status", pod.Name, pod.Status.Phase)) + err := h.client.Delete(ctx, pod) + if err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, fmt.Sprintf("failed to delete stale worker pod %v", pod.Name)) + return + } + } } type PodLabelPredicate struct { @@ -124,14 +181,16 @@ type PodLabelPredicate struct { } func (PodLabelPredicate) Update(e event.UpdateEvent) bool { - return hasBuilderLabel(e.ObjectNew) + return hasExpectedPodLabel(e.ObjectNew) } -func hasBuilderLabel(obj metav1.Object) bool { +func hasExpectedPodLabel(obj metav1.Object) bool { labels := obj.GetLabels() if labels == nil { return false } - value, exists := labels["kmm.node.kubernetes.io/pod-type"] - return exists && value == "builder" + value := labels["kmm.node.kubernetes.io/pod-type"] + isKMMBuilder := value == "builder" + _, isWorkerMgrPod := labels[utils.WorkerActionLabelKey] + return isKMMBuilder || isWorkerMgrPod } diff --git a/internal/controllers/workermgr/mock_workermgr.go b/internal/controllers/workermgr/mock_workermgr.go new file mode 100644 index 00000000..16b2e838 --- /dev/null +++ b/internal/controllers/workermgr/mock_workermgr.go @@ -0,0 +1,140 @@ +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: workermgr.go +// +// Generated by this command: +// +// mockgen -source=workermgr.go -package=workermgr -destination=mock_workermgr.go WorkerMgrAPI +// +// Package workermgr is a generated GoMock package. +package workermgr + +import ( + context "context" + reflect "reflect" + + v1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + logr "github.com/go-logr/logr" + gomock "go.uber.org/mock/gomock" + v1 "k8s.io/api/core/v1" + types "k8s.io/apimachinery/pkg/types" +) + +// MockWorkerMgrAPI is a mock of WorkerMgrAPI interface. +type MockWorkerMgrAPI struct { + ctrl *gomock.Controller + recorder *MockWorkerMgrAPIMockRecorder +} + +// MockWorkerMgrAPIMockRecorder is the mock recorder for MockWorkerMgrAPI. +type MockWorkerMgrAPIMockRecorder struct { + mock *MockWorkerMgrAPI +} + +// NewMockWorkerMgrAPI creates a new mock instance. +func NewMockWorkerMgrAPI(ctrl *gomock.Controller) *MockWorkerMgrAPI { + mock := &MockWorkerMgrAPI{ctrl: ctrl} + mock.recorder = &MockWorkerMgrAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWorkerMgrAPI) EXPECT() *MockWorkerMgrAPIMockRecorder { + return m.recorder +} + +// AddWorkReadyLabel mocks base method. +func (m *MockWorkerMgrAPI) AddWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddWorkReadyLabel", ctx, logger, nsn, pod) +} + +// AddWorkReadyLabel indicates an expected call of AddWorkReadyLabel. +func (mr *MockWorkerMgrAPIMockRecorder) AddWorkReadyLabel(ctx, logger, nsn, pod any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWorkReadyLabel", reflect.TypeOf((*MockWorkerMgrAPI)(nil).AddWorkReadyLabel), ctx, logger, nsn, pod) +} + +// Cleanup mocks base method. +func (m *MockWorkerMgrAPI) Cleanup(ctx context.Context, devConfig *v1alpha1.DeviceConfig, node *v1.Node) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Cleanup", ctx, devConfig, node) + ret0, _ := ret[0].(error) + return ret0 +} + +// Cleanup indicates an expected call of Cleanup. +func (mr *MockWorkerMgrAPIMockRecorder) Cleanup(ctx, devConfig, node any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockWorkerMgrAPI)(nil).Cleanup), ctx, devConfig, node) +} + +// GetWorkReadyLabel mocks base method. +func (m *MockWorkerMgrAPI) GetWorkReadyLabel(nsn types.NamespacedName) string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWorkReadyLabel", nsn) + ret0, _ := ret[0].(string) + return ret0 +} + +// GetWorkReadyLabel indicates an expected call of GetWorkReadyLabel. +func (mr *MockWorkerMgrAPIMockRecorder) GetWorkReadyLabel(nsn any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkReadyLabel", reflect.TypeOf((*MockWorkerMgrAPI)(nil).GetWorkReadyLabel), nsn) +} + +// GetWorkerPod mocks base method. +func (m *MockWorkerMgrAPI) GetWorkerPod(ctx context.Context, devConfig *v1alpha1.DeviceConfig, node *v1.Node) (*v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWorkerPod", ctx, devConfig, node) + ret0, _ := ret[0].(*v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWorkerPod indicates an expected call of GetWorkerPod. +func (mr *MockWorkerMgrAPIMockRecorder) GetWorkerPod(ctx, devConfig, node any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkerPod", reflect.TypeOf((*MockWorkerMgrAPI)(nil).GetWorkerPod), ctx, devConfig, node) +} + +// RemoveWorkReadyLabel mocks base method. +func (m *MockWorkerMgrAPI) RemoveWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveWorkReadyLabel", ctx, logger, nsn, pod) +} + +// RemoveWorkReadyLabel indicates an expected call of RemoveWorkReadyLabel. +func (mr *MockWorkerMgrAPIMockRecorder) RemoveWorkReadyLabel(ctx, logger, nsn, pod any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWorkReadyLabel", reflect.TypeOf((*MockWorkerMgrAPI)(nil).RemoveWorkReadyLabel), ctx, logger, nsn, pod) +} + +// Work mocks base method. +func (m *MockWorkerMgrAPI) Work(ctx context.Context, devConfig *v1alpha1.DeviceConfig, node *v1.Node) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Work", ctx, devConfig, node) + ret0, _ := ret[0].(error) + return ret0 +} + +// Work indicates an expected call of Work. +func (mr *MockWorkerMgrAPIMockRecorder) Work(ctx, devConfig, node any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Work", reflect.TypeOf((*MockWorkerMgrAPI)(nil).Work), ctx, devConfig, node) +} diff --git a/internal/controllers/workermgr/scripts/vfio_bind.sh b/internal/controllers/workermgr/scripts/vfio_bind.sh new file mode 100644 index 00000000..d1dfa4ac --- /dev/null +++ b/internal/controllers/workermgr/scripts/vfio_bind.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +PRODUCT_CODES=("7410" "74b5" "74b9") # 74b5 - MI300X, 7410 - MI210, 74b9 - MI325X + +for PRODUCT_CODE in "${PRODUCT_CODES[@]}"; do + COUNTER=0 + DEVICES_PATHS="" + + # Load VFIO PCI driver on GPU VF devices, if not done already + LSPCI_OUTPUT=$(lspci -nn -d 1002:${PRODUCT_CODE}) + + # Check if LSPCI_OUTPUT is empty + if [ -z "$LSPCI_OUTPUT" ]; then + continue + fi + + while IFS= read -r LINE; do + PCI_ADDRESS=$(echo "$LINE" | awk '{print $1}') + VFIO_DRIVER=$(lspci -k -s "$PCI_ADDRESS" | grep -i vfio-pci | awk '{print $5}') + VFIO_DEVICE="0000:$PCI_ADDRESS" + if [ "$VFIO_DRIVER" != "vfio-pci" ]; then + if [ $COUNTER -eq 0 ]; then + # Load the VFIO PCI driver for all GPUs + modprobe vfio_iommu_type1 allow_unsafe_interrupts + modprobe vfio_pci disable_idle_d3=1 + bash -c "echo 1 > /sys/module/vfio_iommu_type1/parameters/allow_unsafe_interrupts" + bash -c "echo 1002 ${PRODUCT_CODE} > /sys/bus/pci/drivers/vfio-pci/new_id" + fi + fi + # Check if IOMMU entry found for each GPU (VFIO device) + IOMMU_GROUP=$(readlink -f /sys/bus/pci/devices/${VFIO_DEVICE}/iommu_group | awk -F '/' '{print $NF}') + if [ -e "/dev/vfio/$IOMMU_GROUP" ]; then + chown "$UID:$UID" /dev/vfio/$IOMMU_GROUP + else + echo "Error: IOMMU entry not found for GPU VF Device: $VFIO_DEVICE, IOMMU Group: $IOMMU_GROUP" + exit 1 + fi + DEVICES_PATHS+="path=/sys/bus/pci/devices/$VFIO_DEVICE " + ((COUNTER++)) + echo "Group_ID=${IOMMU_GROUP} BUS_ID=${VFIO_DEVICE}" + done <<< "$LSPCI_OUTPUT" +done \ No newline at end of file diff --git a/internal/controllers/workermgr/scripts/vfio_unbind.sh b/internal/controllers/workermgr/scripts/vfio_unbind.sh new file mode 100644 index 00000000..ae27ca24 --- /dev/null +++ b/internal/controllers/workermgr/scripts/vfio_unbind.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +PRODUCT_CODES=("7410" "74b5" "74b9") # 74b5 - MI300X, 7410 - MI210, 74b9 - MI325X + +for PRODUCT_CODE in "${PRODUCT_CODES[@]}"; do + COUNTER=0 + DEVICES_PATHS="" + + # Load VFIO PCI driver on GPU VF devices, if not done already + LSPCI_OUTPUT=$(lspci -nn -d 1002:${PRODUCT_CODE}) + + # Check if LSPCI_OUTPUT is empty + if [ -z "$LSPCI_OUTPUT" ]; then + continue + fi + + while IFS= read -r LINE; do + PCI_ADDRESS=$(echo "$LINE" | awk '{print $1}') + VFIO_DRIVER=$(lspci -k -s "$PCI_ADDRESS" | grep -i vfio-pci | awk '{print $5}') + VFIO_DEVICE="0000:$PCI_ADDRESS" + if [ "$VFIO_DRIVER" == "vfio-pci" ]; then + if [ $COUNTER -eq 0 ]; then + # Unload the VFIO PCI driver for all GPUs + bash -c "echo 1002 ${PRODUCT_CODE} > /sys/bus/pci/drivers/vfio-pci/remove_id" + bash -c "echo ${VFIO_DEVICE} > /sys/bus/pci/drivers/vfio-pci/unbind" + fi + fi + DEVICES_PATHS+="path=/sys/bus/pci/devices/$VFIO_DEVICE " + ((COUNTER++)) + IOMMU_GROUP=$(readlink -f /sys/bus/pci/devices/${VFIO_DEVICE}/iommu_group | awk -F '/' '{print $NF}') + echo "Group_ID=${IOMMU_GROUP} BUS_ID=${VFIO_DEVICE}" + done <<< "$LSPCI_OUTPUT" +done \ No newline at end of file diff --git a/internal/controllers/workermgr/workermgr.go b/internal/controllers/workermgr/workermgr.go new file mode 100644 index 00000000..0caa98fc --- /dev/null +++ b/internal/controllers/workermgr/workermgr.go @@ -0,0 +1,284 @@ +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workermgr + +import ( + "context" + _ "embed" + "encoding/json" + "fmt" + + "github.com/go-logr/logr" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" +) + +const ( + workerContainerName = "worker" +) + +var ( + //go:embed scripts/vfio_bind.sh + vfioBindScript string + //go:embed scripts/vfio_unbind.sh + vfioUnbindScript string +) + +//go:generate mockgen -source=workermgr.go -package=workermgr -destination=mock_workermgr.go WorkerMgrAPI +type WorkerMgrAPI interface { + // Work executes the work on given node via worker pod + Work(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, node *v1.Node) error + // Cleanup cleanup the work on given node + Cleanup(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, node *v1.Node) error + // GetWorkerPod fetches the worker pod info from cluster + GetWorkerPod(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, node *v1.Node) (*v1.Pod, error) + // Add a node label to mark that the work is completed + AddWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) + // GetWorkReadyLabel get the label key to mark that the work is completed + GetWorkReadyLabel(nsn types.NamespacedName) string + // Remove the node label that indicates the work is completed + RemoveWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) +} + +type workerMgr struct { + client client.Client + scheme *runtime.Scheme +} + +// NewWorkerMgr creates a new worker manager +func NewWorkerMgr(client client.Client, scheme *runtime.Scheme) WorkerMgrAPI { + processor := &workerMgr{ + client: client, + scheme: scheme, + } + return processor +} + +// Work executes the work on given node +func (w *workerMgr) Work(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, node *v1.Node) error { + logger := log.FromContext(ctx) + loadWorker := w.getPodDef(devConfig, node.Name, utils.LoadVFIOAction) + opRes, err := controllerutil.CreateOrPatch(ctx, w.client, loadWorker, func() error { + return controllerutil.SetControllerReference(devConfig, loadWorker, w.scheme) + }) + if err == nil { + logger.Info("Reconciled worker", + "name", loadWorker.Name, "action", utils.LoadVFIOAction, "result", opRes) + } + return err +} + +// Cleanup cleanup the work on given node +func (w *workerMgr) Cleanup(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, node *v1.Node) error { + logger := log.FromContext(ctx) + unloadWorker := w.getPodDef(devConfig, node.Name, utils.UnloadVFIOAction) + opRes, err := controllerutil.CreateOrPatch(ctx, w.client, unloadWorker, func() error { + return controllerutil.SetControllerReference(devConfig, unloadWorker, w.scheme) + }) + if err == nil { + logger.Info("Reconciled cleaner", + "name", unloadWorker.Name, "action", utils.UnloadVFIOAction, "result", opRes) + } + return err +} + +func (w *workerMgr) AddWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) { + node := v1.Node{} + err := w.client.Get(ctx, types.NamespacedName{Name: pod.Spec.NodeName}, &node) + if err != nil { + logger.Error(err, fmt.Sprintf("failed to get node resource %+v", pod.Spec.NodeName)) + return + } + patch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]string{ + w.GetWorkReadyLabel(nsn): "", + }, + }, + } + w.patchNode(ctx, patch, &node, logger) +} + +func (w *workerMgr) GetWorkReadyLabel(nsn types.NamespacedName) string { + return fmt.Sprintf(utils.VFIOMountReadyLabelTemplate, nsn.Namespace, nsn.Name) +} + +func (w *workerMgr) RemoveWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) { + node := v1.Node{} + err := w.client.Get(ctx, types.NamespacedName{Name: pod.Spec.NodeName}, &node) + if err != nil { + logger.Error(err, fmt.Sprintf("failed to get node resource %+v", pod.Spec.NodeName)) + return + } + patch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + w.GetWorkReadyLabel(nsn): nil, + }, + }, + } + w.patchNode(ctx, patch, &node, logger) +} + +func (w *workerMgr) patchNode(ctx context.Context, patch map[string]interface{}, node *v1.Node, logger logr.Logger) { + patchBytes, err := json.Marshal(patch) + if err != nil { + logger.Error(err, fmt.Sprintf("Failed to marshal node label patch: %+v", err)) + return + } + rawPatch := client.RawPatch(types.StrategicMergePatchType, patchBytes) + if err := w.client.Patch(ctx, node, rawPatch); err != nil { + logger.Error(err, fmt.Sprintf("Failed to patch node label: %+v", err)) + return + } +} + +func (w *workerMgr) GetWorkerPod(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, node *v1.Node) (*v1.Pod, error) { + // get the existing post process worker pod + // based on the pod status, determine to do proper action + pod := &v1.Pod{} + err := w.client.Get(ctx, types.NamespacedName{ + Namespace: devConfig.Namespace, + Name: w.getPodName(devConfig, node.Name), + }, pod) + if err != nil { + return nil, err + } + return pod, nil +} + +func (w *workerMgr) getPodName(devConfig *amdv1alpha1.DeviceConfig, nodeName string) string { + return fmt.Sprintf("worker-%v-%v", devConfig.Name, nodeName) +} + +// getPodSpec generate the pod definition for worker +func (w *workerMgr) getPodDef(devConfig *amdv1alpha1.DeviceConfig, nodeName, action string) *v1.Pod { + // pod name + podName := w.getPodName(devConfig, nodeName) + // worker image + utilsContainerImage := utils.DefaultUtilsImage + if devConfig.Spec.CommonConfig.UtilsContainer.Image != "" { + utilsContainerImage = devConfig.Spec.CommonConfig.UtilsContainer.Image + } + // container command + var command []string + switch action { + case utils.LoadVFIOAction: + command = []string{"/bin/bash", "-c", vfioBindScript} + case utils.UnloadVFIOAction: + command = []string{"/bin/bash", "-c", vfioUnbindScript} + } + + // mount necessary folders + hostPathDirectory := v1.HostPathDirectory + volumes := []v1.Volume{ + { + Name: "sys", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/sys", + Type: &hostPathDirectory, + }, + }, + }, + { + Name: "lib", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/lib/modules", + Type: &hostPathDirectory, + }, + }, + }, + { + Name: "dev", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/dev", + Type: &hostPathDirectory, + }, + }, + }, + } + volumeMounts := []v1.VolumeMount{ + { + Name: "lib", + MountPath: "/lib/modules", + }, + { + Name: "sys", + MountPath: "/sys", + }, + { + Name: "dev", + MountPath: "/dev", + }, + } + + worker := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: devConfig.Namespace, + Labels: map[string]string{ + utils.WorkerActionLabelKey: action, + }, + }, + Spec: v1.PodSpec{ + NodeName: nodeName, + Containers: []v1.Container{ + { + Name: workerContainerName, + Image: utilsContainerImage, + Command: command, + SecurityContext: &v1.SecurityContext{ + RunAsUser: ptr.To(int64(0)), + Privileged: ptr.To(true), + }, + VolumeMounts: volumeMounts, + }, + }, + RestartPolicy: v1.RestartPolicyOnFailure, + Volumes: volumes, + }, + } + + // add image pull policy if specified + if devConfig.Spec.CommonConfig.UtilsContainer.ImagePullPolicy != "" { + worker.Spec.Containers[0].ImagePullPolicy = v1.PullPolicy(devConfig.Spec.CommonConfig.UtilsContainer.ImagePullPolicy) + } + // add image pull secret if specified + if devConfig.Spec.CommonConfig.UtilsContainer.ImageRegistrySecret != nil { + worker.Spec.ImagePullSecrets = []v1.LocalObjectReference{ + *devConfig.Spec.CommonConfig.UtilsContainer.ImageRegistrySecret, + } + } + + return worker +} diff --git a/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu b/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu index 016647b2..8ae1e8ce 100644 --- a/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu +++ b/internal/kmmmodule/dockerfiles/vGPUHostGIM.ubuntu @@ -11,6 +11,7 @@ RUN apt-get update && \ dkms \ autoconf \ automake \ + wget \ linux-headers-${KERNEL_FULL_VERSION} && \ apt-get clean diff --git a/internal/kmmmodule/kmmmodule.go b/internal/kmmmodule/kmmmodule.go index 9203e785..cc2c9a94 100644 --- a/internal/kmmmodule/kmmmodule.go +++ b/internal/kmmmodule/kmmmodule.go @@ -44,16 +44,15 @@ import ( "github.com/go-logr/logr" kmmv1beta1 "github.com/rh-ecosystem-edge/kernel-module-management/api/v1beta1" - "github.com/rh-ecosystem-edge/kernel-module-management/pkg/labels" + kmmLabels "github.com/rh-ecosystem-edge/kernel-module-management/pkg/labels" "golang.org/x/exp/maps" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -67,8 +66,8 @@ const ( kubeletDevicePluginsVolumeName = "kubelet-device-plugins" kubeletDevicePluginsPath = "/var/lib/kubelet/device-plugins" nodeVarLibFirmwarePath = "/var/lib/firmware" - gpuDriverModuleName = "amdgpu" - vGPUHostDriverModuleName = "gim" + ContainerDriverModuleName = "amdgpu" + VFPassthroughDriverModuleName = "gim" ttmModuleName = "amdttm" kclModuleName = "amdkcl" imageFirmwarePath = "firmwareDir/updates" @@ -287,7 +286,7 @@ func (km *kmmModule) SetDevicePluginAsDesired(ds *appsv1.DaemonSet, devConfig *a nodeSelector[key] = val } if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { - nodeSelector[labels.GetKernelModuleReadyNodeLabel(devConfig.Namespace, devConfig.Name)] = "" + nodeSelector[kmmLabels.GetKernelModuleReadyNodeLabel(devConfig.Namespace, devConfig.Name)] = "" } imagePullSecrets := []v1.LocalObjectReference{} if devConfig.Spec.DevicePlugin.ImageRegistrySecret != nil { @@ -433,13 +432,13 @@ func setKMMModuleLoader(ctx context.Context, mod *kmmv1beta1.Module, devConfig * } var modLoadingOrder []string - var moduleName = gpuDriverModuleName + var moduleName = ContainerDriverModuleName if !isOpenshift { // specify this order fror k8s in order to make sure amdttm and amdkcl was properly cleaned up after deletion of CR // module will be loaded in this order: amdkcl, amdttm, amdgpu // module will be unloaded in this order: amdgpu, amdttm, amdkcl modLoadingOrder = []string{ - gpuDriverModuleName, + ContainerDriverModuleName, ttmModuleName, kclModuleName, } @@ -448,7 +447,7 @@ func setKMMModuleLoader(ctx context.Context, mod *kmmv1beta1.Module, devConfig * firmwarePath := imageFirmwarePath switch devConfig.Spec.Driver.DriverType { case utils.DriverTypeVFPassthrough: - moduleName = vGPUHostDriverModuleName + moduleName = VFPassthroughDriverModuleName modLoadingOrder = []string{} firmwarePath = "" } @@ -615,8 +614,11 @@ func getKM(devConfig *amdv1alpha1.DeviceConfig, node v1.Node, inTreeModuleToRemo } func addNodeInfoSuffixToImageTag(imgStr, osName, driversVersion string, devCfg *amdv1alpha1.DeviceConfig) string { + // if driver is vGPU host, different GPU model's driver image would be different + // need to add a suffix to distinguish them + driverTypeInfo := utils.GetDriverTypeTag(devCfg) // KMM will render and fulfill the value of ${KERNEL_FULL_VERSION} - tag := osName + "-${KERNEL_FULL_VERSION}-" + driversVersion + tag := osName + "-${KERNEL_FULL_VERSION}" + driverTypeInfo + "-" + driversVersion // tag cannot be more than 128 chars if len(tag) > 128 { tag = tag[len(tag)-128:] @@ -687,20 +689,16 @@ func ubuntuCMNameMapper(osImageStr string) string { return fmt.Sprintf("%s-%s", os, trimmedVersion) } -func GetK8SNodes(ls string) (*v1.NodeList, error) { - config, err := rest.InClusterConfig() - if err != nil { - return nil, err +func GetK8SNodes(ctx context.Context, cli client.Client, labelSelector labels.Selector) (*v1.NodeList, error) { + options := &client.ListOptions{ + LabelSelector: labelSelector, } - // creates the clientset - clientset, err := kubernetes.NewForConfig(config) + nodeList := &v1.NodeList{} + err := cli.List(ctx, nodeList, options) if err != nil { return nil, err } - options := metav1.ListOptions{ - LabelSelector: ls, - } - return clientset.CoreV1().Nodes().List(context.TODO(), options) + return nodeList, nil } func MapToLabelSelector(selector map[string]string) string { diff --git a/internal/utils.go b/internal/utils.go index 656929bc..4e7b7a54 100644 --- a/internal/utils.go +++ b/internal/utils.go @@ -19,6 +19,7 @@ package utils import ( "context" "fmt" + "regexp" "strings" "github.com/go-logr/logr" @@ -32,6 +33,7 @@ import ( ) const ( + KindDeviceConfig = "DeviceConfig" defaultOcDriversVersion = "6.2.2" openShiftNodeLabel = "node.openshift.io/os_id" NodeFeatureLabelAmdGpu = "feature.node.kubernetes.io/amd-gpu" @@ -48,6 +50,18 @@ const ( // kubevirt DriverTypeContainer = "container" DriverTypeVFPassthrough = "vf-passthrough" + DefaultUtilsImage = "docker.io/rocm/gpu-operator-utils:latest" + // workerMgr related labels + LoadVFIOAction = "loadVFIO" + UnloadVFIOAction = "unloadVFIO" + WorkerActionLabelKey = "gpu.operator.amd.com/worker-action" + VFIOMountReadyLabelTemplate = "gpu.operator.amd.com/%v.%v.vfio.ready" + KMMModuleReadyLabelTemplate = "kmm.node.kubernetes.io/%v.%v.ready" + // Operand metadata + MetricsExporterNameSuffix = "-metrics-exporter" + TestRunnerNameSuffix = "-test-runner" + DevicePluginNameSuffix = "-device-plugin" + NodeLabellerNameSuffix = "-node-labeller" ) var ( @@ -212,3 +226,38 @@ func IsPrometheusServiceMonitorEnable(devConfig *amdv1alpha1.DeviceConfig) bool } return false } + +func GetDriverTypeTag(devCfg *amdv1alpha1.DeviceConfig) string { + driverTypeTag := "" + switch devCfg.Spec.Driver.DriverType { + case DriverTypeVFPassthrough: + driverTypeTag = "-" + DriverTypeVFPassthrough + case DriverTypeContainer: + driverTypeTag = "-" + DriverTypeContainer + } + return driverTypeTag +} + +func generateRegexPattern(template string) string { + // Escape dots + pattern := strings.Replace(template, ".", `\.`, -1) + // Replace %v with .+ to match any valid characters + pattern = strings.Replace(pattern, "%v", "([^.]+)", 1) + pattern = strings.Replace(pattern, "%v", "(.+)", 1) + // Add start and end anchors + pattern = "^" + pattern + "$" + return pattern +} + +func HasNodeLabelTemplateMatch(nodeLabels map[string]string, template string) (bool, string, string, string) { + pattern := generateRegexPattern(template) + re := regexp.MustCompile(pattern) + // Check each label key against the pattern + for key := range nodeLabels { + matches := re.FindStringSubmatch(key) + if len(matches) >= 3 { + return true, key, matches[1], matches[2] + } + } + return false, "", "", "" +} diff --git a/internal/utils_container/Dockerfile b/internal/utils_container/Dockerfile index a40f740b..9a65b9e2 100644 --- a/internal/utils_container/Dockerfile +++ b/internal/utils_container/Dockerfile @@ -1,7 +1,7 @@ FROM registry.access.redhat.com/ubi9/ubi-minimal:9.3 # Install nsenter from util-linux package -RUN microdnf install -y util-linux && \ +RUN microdnf install -y util-linux pciutils && \ cp /usr/bin/nsenter /nsenter && \ microdnf clean all diff --git a/internal/utils_test.go b/internal/utils_test.go index 53324168..12290f2c 100644 --- a/internal/utils_test.go +++ b/internal/utils_test.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "fmt" "reflect" "testing" @@ -144,3 +145,53 @@ func TestRemoveOldNodeLabels(t *testing.T) { } } } + +func TestHasNodeLabelTemplateMatch(t *testing.T) { + testCases := []struct { + Namespace string + Name string + }{ + { + Namespace: "kube-amd-gpu", + Name: "test-config", + }, + { + Namespace: "openshift-amd-gpu", + Name: "test-config", + }, + { + Namespace: "amd-gpu", + Name: "test-config123", + }, + { + Namespace: "amd-gpu", + Name: "test-config.123", + }, + { + Namespace: "test-amd-gpu", + Name: "testconfig.123", + }, + { + Namespace: "ns", + Name: "test-config.1.2.3", + }, + } + + templates := []string{VFIOMountReadyLabelTemplate, KMMModuleReadyLabelTemplate} + + for _, tc := range testCases { + for _, template := range templates { + nodeLabels := map[string]string{fmt.Sprintf(template, tc.Namespace, tc.Name): ""} + found, key, namespace, name := HasNodeLabelTemplateMatch(nodeLabels, template) + + if !found { + t.Errorf("Expected matched label key, but got mismatch for %+v", nodeLabels) + } + + if namespace != tc.Namespace || name != tc.Name { + t.Errorf("Expected namespace %v and name %v, but got namespace %v and name %v", tc.Namespace, tc.Name, namespace, name) + } + t.Logf("Matched label key: %s, namespace: %s, name: %s", key, namespace, name) + } + } +} From 2d39576109a6c975ab7bd5177f8bca7519c8c3d4 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Thu, 8 May 2025 21:00:10 +0000 Subject: [PATCH 07/21] Misc optimization on handling VFIO device mount --- .../controllers/device_config_reconciler.go | 13 +++++- internal/controllers/watchers/node.go | 14 +++++- internal/controllers/watchers/pod.go | 4 +- .../controllers/workermgr/mock_workermgr.go | 16 +++---- internal/controllers/workermgr/workermgr.go | 46 +++++++++++++++---- internal/utils.go | 5 +- 6 files changed, 74 insertions(+), 24 deletions(-) diff --git a/internal/controllers/device_config_reconciler.go b/internal/controllers/device_config_reconciler.go index 6b4928dc..98be86f7 100644 --- a/internal/controllers/device_config_reconciler.go +++ b/internal/controllers/device_config_reconciler.go @@ -848,17 +848,26 @@ func (dcrh *deviceConfigReconcilerHelper) checkPostProcessFinalizeCondition(ctx pod, err := dcrh.kmmPostProcessor.GetWorkerPod(ctx, devConfig, &node) if err == nil { logger.Info(fmt.Sprintf("post-process worker pod %+v still exist on node %+v", pod.Name, node.Name)) + if err := dcrh.client.Delete(ctx, pod); err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, "failed to delete existing worker pod") + } return false } if !k8serrors.IsNotFound(err) { logger.Error(err, fmt.Sprintf("failed to get post-process worker pod on node %+v", node.Name)) return false } - if _, ok := node.Labels[dcrh.kmmPostProcessor.GetWorkReadyLabel(types.NamespacedName{ + vfioReadyLabel := dcrh.kmmPostProcessor.GetWorkReadyLabel(types.NamespacedName{ Namespace: devConfig.Namespace, Name: devConfig.Name, - })]; ok { + }) + if _, ok := node.Labels[vfioReadyLabel]; ok { logger.Info(fmt.Sprintf("post-process label still exist on node %+v", node.Name)) + nodeCopy := node.DeepCopy() + delete(node.Labels, vfioReadyLabel) + if err := dcrh.client.Patch(ctx, &node, client.MergeFrom(nodeCopy)); err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, "failed to remove vfio ready label from node", "node", node.Name) + } return false } } diff --git a/internal/controllers/watchers/node.go b/internal/controllers/watchers/node.go index d6cfc473..5bbe5546 100644 --- a/internal/controllers/watchers/node.go +++ b/internal/controllers/watchers/node.go @@ -153,7 +153,7 @@ func (h *NodeEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q !reflect.DeepEqual(oldNode.Labels, newNode.Labels) { h.reconcileAllDeviceConfigs(ctx, q) } - h.handlePostProcess(ctx, logger, newNode) + h.handlePostProcess(ctx, logger, oldNode, newNode) } func (h *NodeEventHandler) reconcileAllDeviceConfigs(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { @@ -207,7 +207,7 @@ func (h *NodeEventHandler) reconcileRelatedDeviceConfig(ctx context.Context, obj } } -func (h *NodeEventHandler) handlePostProcess(ctx context.Context, logger logr.Logger, node *v1.Node) { +func (h *NodeEventHandler) handlePostProcess(ctx context.Context, logger logr.Logger, oldNode, node *v1.Node) { hasVFIOReadyLabel, vfioLabel, vfioDevConfigNamespace, vfioDevConfigName := utils.HasNodeLabelTemplateMatch(node.Labels, utils.VFIOMountReadyLabelTemplate) hasModuleReadyLabel, moduleLabel, moduleDevConfigNamespace, moduleDevConfigName := utils.HasNodeLabelTemplateMatch(node.Labels, utils.KMMModuleReadyLabelTemplate) if hasModuleReadyLabel && !hasVFIOReadyLabel { @@ -248,5 +248,15 @@ func (h *NodeEventHandler) handlePostProcess(ctx context.Context, logger logr.Lo if err := h.workerMgr.Cleanup(ctx, devConfig, node); err != nil { logger.Error(err, "failed to create cleanup worker pod") } + } else if hasModuleReadyLabel && hasVFIOReadyLabel && + oldNode.Status.NodeInfo.BootID != node.Status.NodeInfo.BootID { + // if the node was rebooted + // don't wait for KMM to remove the module ready label then wait for workermgr to trigger a unload worker + // directly remove the VFIO ready label + // so that the event handler will bring up a new worker pod to load device into VFIO + h.workerMgr.RemoveWorkReadyLabel(ctx, logger, types.NamespacedName{ + Namespace: vfioDevConfigNamespace, + Name: vfioDevConfigName, + }, node.Name) } } diff --git a/internal/controllers/watchers/pod.go b/internal/controllers/watchers/pod.go index 24c9efdc..d4685bc3 100644 --- a/internal/controllers/watchers/pod.go +++ b/internal/controllers/watchers/pod.go @@ -155,9 +155,9 @@ func (h *PodEventHandler) handleWorkerMgrPodEvt(ctx context.Context, logger logr // modify the node label based on action switch action { case utils.LoadVFIOAction: - h.workerMgr.AddWorkReadyLabel(ctx, logger, nsn, pod) + h.workerMgr.AddWorkReadyLabel(ctx, logger, nsn, pod.Spec.NodeName) case utils.UnloadVFIOAction: - h.workerMgr.RemoveWorkReadyLabel(ctx, logger, nsn, pod) + h.workerMgr.RemoveWorkReadyLabel(ctx, logger, nsn, pod.Spec.NodeName) } // remove the completed pod logger.Info(fmt.Sprintf("remove worker pod %v after its completion", pod.Name)) diff --git a/internal/controllers/workermgr/mock_workermgr.go b/internal/controllers/workermgr/mock_workermgr.go index 16b2e838..1df171ae 100644 --- a/internal/controllers/workermgr/mock_workermgr.go +++ b/internal/controllers/workermgr/mock_workermgr.go @@ -59,15 +59,15 @@ func (m *MockWorkerMgrAPI) EXPECT() *MockWorkerMgrAPIMockRecorder { } // AddWorkReadyLabel mocks base method. -func (m *MockWorkerMgrAPI) AddWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) { +func (m *MockWorkerMgrAPI) AddWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, nodeName string) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddWorkReadyLabel", ctx, logger, nsn, pod) + m.ctrl.Call(m, "AddWorkReadyLabel", ctx, logger, nsn, nodeName) } // AddWorkReadyLabel indicates an expected call of AddWorkReadyLabel. -func (mr *MockWorkerMgrAPIMockRecorder) AddWorkReadyLabel(ctx, logger, nsn, pod any) *gomock.Call { +func (mr *MockWorkerMgrAPIMockRecorder) AddWorkReadyLabel(ctx, logger, nsn, nodeName any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWorkReadyLabel", reflect.TypeOf((*MockWorkerMgrAPI)(nil).AddWorkReadyLabel), ctx, logger, nsn, pod) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWorkReadyLabel", reflect.TypeOf((*MockWorkerMgrAPI)(nil).AddWorkReadyLabel), ctx, logger, nsn, nodeName) } // Cleanup mocks base method. @@ -114,15 +114,15 @@ func (mr *MockWorkerMgrAPIMockRecorder) GetWorkerPod(ctx, devConfig, node any) * } // RemoveWorkReadyLabel mocks base method. -func (m *MockWorkerMgrAPI) RemoveWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) { +func (m *MockWorkerMgrAPI) RemoveWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, nodeName string) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveWorkReadyLabel", ctx, logger, nsn, pod) + m.ctrl.Call(m, "RemoveWorkReadyLabel", ctx, logger, nsn, nodeName) } // RemoveWorkReadyLabel indicates an expected call of RemoveWorkReadyLabel. -func (mr *MockWorkerMgrAPIMockRecorder) RemoveWorkReadyLabel(ctx, logger, nsn, pod any) *gomock.Call { +func (mr *MockWorkerMgrAPIMockRecorder) RemoveWorkReadyLabel(ctx, logger, nsn, nodeName any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWorkReadyLabel", reflect.TypeOf((*MockWorkerMgrAPI)(nil).RemoveWorkReadyLabel), ctx, logger, nsn, pod) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWorkReadyLabel", reflect.TypeOf((*MockWorkerMgrAPI)(nil).RemoveWorkReadyLabel), ctx, logger, nsn, nodeName) } // Work mocks base method. diff --git a/internal/controllers/workermgr/workermgr.go b/internal/controllers/workermgr/workermgr.go index 0caa98fc..90ac55e3 100644 --- a/internal/controllers/workermgr/workermgr.go +++ b/internal/controllers/workermgr/workermgr.go @@ -38,6 +38,7 @@ import ( const ( workerContainerName = "worker" + initContainerName = "pci-device-detector" ) var ( @@ -56,11 +57,11 @@ type WorkerMgrAPI interface { // GetWorkerPod fetches the worker pod info from cluster GetWorkerPod(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, node *v1.Node) (*v1.Pod, error) // Add a node label to mark that the work is completed - AddWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) + AddWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, nodeName string) // GetWorkReadyLabel get the label key to mark that the work is completed GetWorkReadyLabel(nsn types.NamespacedName) string // Remove the node label that indicates the work is completed - RemoveWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) + RemoveWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, nodeName string) } type workerMgr struct { @@ -105,11 +106,11 @@ func (w *workerMgr) Cleanup(ctx context.Context, devConfig *amdv1alpha1.DeviceCo return err } -func (w *workerMgr) AddWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) { +func (w *workerMgr) AddWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, nodeName string) { node := v1.Node{} - err := w.client.Get(ctx, types.NamespacedName{Name: pod.Spec.NodeName}, &node) + err := w.client.Get(ctx, types.NamespacedName{Name: nodeName}, &node) if err != nil { - logger.Error(err, fmt.Sprintf("failed to get node resource %+v", pod.Spec.NodeName)) + logger.Error(err, fmt.Sprintf("failed to get node resource %+v", nodeName)) return } patch := map[string]interface{}{ @@ -126,11 +127,11 @@ func (w *workerMgr) GetWorkReadyLabel(nsn types.NamespacedName) string { return fmt.Sprintf(utils.VFIOMountReadyLabelTemplate, nsn.Namespace, nsn.Name) } -func (w *workerMgr) RemoveWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, pod *v1.Pod) { +func (w *workerMgr) RemoveWorkReadyLabel(ctx context.Context, logger logr.Logger, nsn types.NamespacedName, nodeName string) { node := v1.Node{} - err := w.client.Get(ctx, types.NamespacedName{Name: pod.Spec.NodeName}, &node) + err := w.client.Get(ctx, types.NamespacedName{Name: nodeName}, &node) if err != nil { - logger.Error(err, fmt.Sprintf("failed to get node resource %+v", pod.Spec.NodeName)) + logger.Error(err, fmt.Sprintf("failed to get node resource %+v", nodeName)) return } patch := map[string]interface{}{ @@ -238,6 +239,29 @@ func (w *workerMgr) getPodDef(devConfig *amdv1alpha1.DeviceConfig, nodeName, act }, } + // init container + initContainers := []v1.Container{} + switch action { + case utils.LoadVFIOAction: + // for loading device to VFIO driver + // need to use init container to make sure the device exists + initContainers = []v1.Container{ + { + Name: initContainerName, + Image: utilsContainerImage, + Command: []string{"sh", "-c", "while ! lspci -nn | grep -q -e 7410 -e 74b5 -e 74b9; do echo \"PCI device not found\"; sleep 2; done"}, + SecurityContext: &v1.SecurityContext{ + RunAsUser: ptr.To(int64(0)), + Privileged: ptr.To(true), + }, + VolumeMounts: volumeMounts, + }, + } + // for unloading device from VFIO + // VF devices are already removed due to the removal of GIM driver + // no need to use an init container to detect them + } + worker := &v1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -251,7 +275,8 @@ func (w *workerMgr) getPodDef(devConfig *amdv1alpha1.DeviceConfig, nodeName, act }, }, Spec: v1.PodSpec{ - NodeName: nodeName, + NodeName: nodeName, + InitContainers: initContainers, Containers: []v1.Container{ { Name: workerContainerName, @@ -272,6 +297,9 @@ func (w *workerMgr) getPodDef(devConfig *amdv1alpha1.DeviceConfig, nodeName, act // add image pull policy if specified if devConfig.Spec.CommonConfig.UtilsContainer.ImagePullPolicy != "" { worker.Spec.Containers[0].ImagePullPolicy = v1.PullPolicy(devConfig.Spec.CommonConfig.UtilsContainer.ImagePullPolicy) + if len(worker.Spec.InitContainers) > 0 { + worker.Spec.InitContainers[0].ImagePullPolicy = v1.PullPolicy(devConfig.Spec.CommonConfig.UtilsContainer.ImagePullPolicy) + } } // add image pull secret if specified if devConfig.Spec.CommonConfig.UtilsContainer.ImageRegistrySecret != nil { diff --git a/internal/utils.go b/internal/utils.go index 4e7b7a54..ac816952 100644 --- a/internal/utils.go +++ b/internal/utils.go @@ -233,7 +233,10 @@ func GetDriverTypeTag(devCfg *amdv1alpha1.DeviceConfig) string { case DriverTypeVFPassthrough: driverTypeTag = "-" + DriverTypeVFPassthrough case DriverTypeContainer: - driverTypeTag = "-" + DriverTypeContainer + // when the driver type is container + // don't add any driver type inside the driver image tag + // in order to make sure driver image tag is backward compatible + // so that the driver image tag built before KubeVirt integration could still apply } return driverTypeTag } From 9d6fa15ee521763392c0615680c037bd55cd2224 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Thu, 8 May 2025 23:52:58 +0000 Subject: [PATCH 08/21] Add graceperiod for worker pod --- internal/controllers/device_config_reconciler.go | 2 +- internal/controllers/workermgr/workermgr.go | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/controllers/device_config_reconciler.go b/internal/controllers/device_config_reconciler.go index 98be86f7..fa607b01 100644 --- a/internal/controllers/device_config_reconciler.go +++ b/internal/controllers/device_config_reconciler.go @@ -848,7 +848,7 @@ func (dcrh *deviceConfigReconcilerHelper) checkPostProcessFinalizeCondition(ctx pod, err := dcrh.kmmPostProcessor.GetWorkerPod(ctx, devConfig, &node) if err == nil { logger.Info(fmt.Sprintf("post-process worker pod %+v still exist on node %+v", pod.Name, node.Name)) - if err := dcrh.client.Delete(ctx, pod); err != nil && !k8serrors.IsNotFound(err) { + if err := dcrh.client.Delete(ctx, pod, &client.DeleteOptions{GracePeriodSeconds: &workermgr.WorkerPodGracePeriod}); err != nil && !k8serrors.IsNotFound(err) { logger.Error(err, "failed to delete existing worker pod") } return false diff --git a/internal/controllers/workermgr/workermgr.go b/internal/controllers/workermgr/workermgr.go index 90ac55e3..2fa919c1 100644 --- a/internal/controllers/workermgr/workermgr.go +++ b/internal/controllers/workermgr/workermgr.go @@ -46,6 +46,8 @@ var ( vfioBindScript string //go:embed scripts/vfio_unbind.sh vfioUnbindScript string + + WorkerPodGracePeriod int64 = 2 ) //go:generate mockgen -source=workermgr.go -package=workermgr -destination=mock_workermgr.go WorkerMgrAPI From 6d61d0630ceef6e7e434ce499a745b56a3eb99f7 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Fri, 9 May 2025 23:25:13 +0000 Subject: [PATCH 09/21] Add basic GIM driver deployment and vfio-pci mount e2e test --- tests/e2e/Makefile | 4 ++ tests/e2e/init.go | 30 +++++++---- tests/e2e/kubevirt_test.go | 105 +++++++++++++++++++++++++++++++++++++ 3 files changed, 129 insertions(+), 10 deletions(-) create mode 100644 tests/e2e/kubevirt_test.go diff --git a/tests/e2e/Makefile b/tests/e2e/Makefile index 538eb152..e249d3a1 100644 --- a/tests/e2e/Makefile +++ b/tests/e2e/Makefile @@ -14,6 +14,8 @@ E2E_NODE_LABELLER_IMAGE ?= rocm/k8s-device-plugin:labeller-latest E2E_DEVICE_PLUGIN_IMAGE_2 ?= rocm/k8s-device-plugin:1.31.0.6 E2E_NODE_LABELLER_IMAGE_2 ?= rocm/k8s-device-plugin:labeller-1.31.0.6 E2E_TEST_RUNNER_IMAGE ?= rocm/test-runner:v1.3.1 +E2E_KUBEVIRT_DEVICE_PLUGIN_IMAGE ?= rocm/k8s-device-plugin:latest +E2E_KUBEVIRT_NODE_LABELLER_IMAGE ?= rocm/k8s-device-plugin:labeller-latest export E2E_INIT_CONTAINER_IMAGE export E2E_KUBE_RBAC_PROXY_CURL_IMAGE @@ -26,6 +28,8 @@ export E2E_NODE_LABELLER_IMAGE export E2E_DEVICE_PLUGIN_IMAGE_2 export E2E_NODE_LABELLER_IMAGE_2 export E2E_TEST_RUNNER_IMAGE +export E2E_KUBEVIRT_DEVICE_PLUGIN_IMAGE +export E2E_KUBEVIRT_NODE_LABELLER_IMAGE export E2E_DCM_IMAGE export E2E_NODEAPP_IMG diff --git a/tests/e2e/init.go b/tests/e2e/init.go index 973cedb6..4c061a4f 100644 --- a/tests/e2e/init.go +++ b/tests/e2e/init.go @@ -22,16 +22,18 @@ import ( ) var ( - initContainerImage string - kubeRbacProxyCurlImage string - exporterImage string - exporterImage2 string - devicePluginImage string - nodeLabellerImage string - devicePluginImage2 string - nodeLabellerImage2 string - testRunnerImage string - driverImageRepo string + initContainerImage string + kubeRbacProxyCurlImage string + exporterImage string + exporterImage2 string + devicePluginImage string + nodeLabellerImage string + devicePluginImage2 string + nodeLabellerImage2 string + testRunnerImage string + driverImageRepo string + kubeVirtHostDevicePluginImage string + kubeVirtHostNodeLabellerImage string ) func init() { @@ -77,4 +79,12 @@ func init() { if !ok { log.Fatalf("E2E_DRIVER_IMAGE_REPO is not defined. Please prepare a iamge registry repo to store your driver image and put the image repo URL into E2E_DRIVER_IMAGE_REPO. E.g. docker.io//amdgpu-driver-image") } + kubeVirtHostDevicePluginImage, ok = os.LookupEnv("E2E_KUBEVIRT_DEVICE_PLUGIN_IMAGE") + if !ok { + log.Fatalf("E2E_KUBEVIRT_DEVICE_PLUGIN_IMAGE is not defined.") + } + kubeVirtHostNodeLabellerImage, ok = os.LookupEnv("E2E_KUBEVIRT_NODE_LABELLER_IMAGE") + if !ok { + log.Fatalf("E2E_KUBEVIRT_NODE_LABELLER_IMAGE is not defined.") + } } diff --git a/tests/e2e/kubevirt_test.go b/tests/e2e/kubevirt_test.go new file mode 100644 index 00000000..4a039482 --- /dev/null +++ b/tests/e2e/kubevirt_test.go @@ -0,0 +1,105 @@ +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/stretchr/testify/assert" + . "gopkg.in/check.v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" + "github.com/ROCm/gpu-operator/internal/controllers/workermgr" +) + +func (s *E2ESuite) verifyVFIOReadyLabel(devCfg *v1alpha1.DeviceConfig, expectLabel bool, c *C) { + assert.Eventually(c, func() bool { + nodes, err := s.clientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ + LabelSelector: func() string { + s := []string{} + for k, v := range devCfg.Spec.Selector { + s = append(s, fmt.Sprintf("%v=%v", k, v)) + } + return strings.Join(s, ",") + }(), + }) + if err != nil { + logger.Errorf("failed to get nodes %v", err) + return false + } + + workerMgr := workermgr.NewWorkerMgr(nil, nil) + vfioReadyLabel := workerMgr.GetWorkReadyLabel(types.NamespacedName{ + Namespace: devCfg.Namespace, + Name: devCfg.Name, + }) + for _, node := range nodes.Items { + if expectLabel { + if _, ok := node.Labels[vfioReadyLabel]; !ok { + logger.Errorf("cannot find vfio ready label on node %v", node.Name) + return false + } + } else { + if _, ok := node.Labels[vfioReadyLabel]; ok { + logger.Errorf("vfio ready label still exists on node %v", node.Name) + return false + } + } + } + return true + }, 5*time.Minute, 3*time.Second) +} + +func (s *E2ESuite) TestVFPassthroughDeployment(c *C) { + // only run this test case when all the worker node has AMD GPU model supported by GIM driver for VF Passthrough + if s.simEnable { + c.Skip("Skipping for non amd gpu testbed") + } + _, err := s.dClient.DeviceConfigs(s.ns).Get(s.cfgName, metav1.GetOptions{}) + assert.Errorf(c, err, fmt.Sprintf("config %v exists", s.cfgName)) + + logger.Infof("create %v", s.cfgName) + devCfg := s.getDeviceConfig(c) + + enableDriver := true + enableNodeLabeller := true + devCfg.Spec.Driver.Enable = &enableDriver + devCfg.Spec.Driver.DriverType = utils.DriverTypeVFPassthrough + devCfg.Spec.Driver.Version = "8.0.0.K" // this is the version of the first opensource version GIM driver + devCfg.Spec.DevicePlugin.EnableNodeLabeller = &enableNodeLabeller + devCfg.Spec.DevicePlugin.DevicePluginImage = kubeVirtHostDevicePluginImage + devCfg.Spec.DevicePlugin.NodeLabellerImage = kubeVirtHostNodeLabellerImage + + s.createDeviceConfig(devCfg, c) + s.checkNFDWorkerStatus(s.ns, c, "") + s.checkNodeLabellerStatus(s.ns, c, devCfg) + s.checkMetricsExporterStatus(devCfg, s.ns, v1.ServiceTypeClusterIP, c) + s.verifyDeviceConfigStatus(devCfg, c) + s.verifyNodeGPULabel(devCfg, c) + s.verifyVFIOReadyLabel(devCfg, true, c) + + // delete + s.deleteDeviceConfig(devCfg, c) + s.verifyVFIOReadyLabel(devCfg, false, c) +} From ede4af7f32e8e600030c7caa2bb07d6102f37af3 Mon Sep 17 00:00:00 2001 From: Yan Sun Date: Tue, 27 May 2025 16:22:26 -0700 Subject: [PATCH 10/21] [Feature] Add support for PF-Passthrough use case (#701) * Proto change for adding pf-passthrough driver type * Implementation to support pf-passthrough * Add e2e test for pf-passthrough use case * Address comments --- api/v1alpha1/deviceconfig_types.go | 7 +- ...md-gpu-operator.clusterserviceversion.yaml | 7 +- bundle/manifests/amd.com_deviceconfigs.yaml | 8 +- config/crd/bases/amd.com_deviceconfigs.yaml | 8 +- ...md-gpu-operator.clusterserviceversion.yaml | 7 +- helm-charts-k8s/crds/deviceconfig-crd.yaml | 8 +- .../crds/deviceconfig-crd.yaml | 8 +- internal/configmanager/configmanager.go | 15 ++- .../controllers/device_config_reconciler.go | 86 +++++++++------ .../device_config_reconciler_test.go | 2 + internal/controllers/watchers/node.go | 52 ++++----- .../workermgr/scripts/vfio_bind.sh | 18 +++- .../workermgr/scripts/vfio_unbind.sh | 14 +-- internal/controllers/workermgr/workermgr.go | 79 +++++++++++--- internal/kmmmodule/kmmmodule.go | 4 +- internal/metricsexporter/metricsexporter.go | 4 +- internal/nodelabeller/nodelabeller.go | 4 + internal/testrunner/testrunner.go | 4 +- internal/utils.go | 102 ++++++++++++++++-- internal/utils_container/Dockerfile | 4 +- internal/utils_test.go | 84 ++++++++++++++- internal/validator/specValidators.go | 15 ++- tests/e2e/kubevirt_test.go | 32 ++++++ 23 files changed, 454 insertions(+), 118 deletions(-) diff --git a/api/v1alpha1/deviceconfig_types.go b/api/v1alpha1/deviceconfig_types.go index d0a1858d..48bb773c 100644 --- a/api/v1alpha1/deviceconfig_types.go +++ b/api/v1alpha1/deviceconfig_types.go @@ -95,9 +95,12 @@ type DriverSpec struct { // +kubebuilder:default=true Enable *bool `json:"enable,omitempty"` - // specify the type of driver (container/vf-passthrough) to install on the worker node. default value is gpu. + // specify the type of driver (container/vf-passthrough/pf-passthrough) to install on the worker node. default value is container. + // container: normal amdgpu-dkms driver for Bare Metal GPU nodes or guest VM. + // vf-passthrough: MxGPU GIM driver on the host machine to generate VF, then mount VF to vfio-pci + // pf-passthrough: directly mount PF device to vfio-pci //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="DriverType",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:driverType"} - // +kubebuilder:validation:Enum=container;vf-passthrough + // +kubebuilder:validation:Enum=container;vf-passthrough;pf-passthrough // +kubebuilder:default=container DriverType string `json:"driverType,omitempty"` diff --git a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml index ddbca3d7..500360d2 100644 --- a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml +++ b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml @@ -268,8 +268,11 @@ spec: path: driver.blacklist x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:blacklistDrivers - - description: specify the type of driver (container/vf-passthrough) to install - on the worker node. default value is gpu. + - description: 'specify the type of driver (container/vf-passthrough/pf-passthrough) + to install on the worker node. default value is container. container: normal + amdgpu-dkms driver for Bare Metal GPU nodes or guest VM. vf-passthrough: + MxGPU GIM driver on the host machine to generate VF, then mount VF to vfio-pci + pf-passthrough: directly mount PF device to vfio-pci' displayName: DriverType path: driver.driverType x-descriptors: diff --git a/bundle/manifests/amd.com_deviceconfigs.yaml b/bundle/manifests/amd.com_deviceconfigs.yaml index bb9ca0b0..da6b5083 100644 --- a/bundle/manifests/amd.com_deviceconfigs.yaml +++ b/bundle/manifests/amd.com_deviceconfigs.yaml @@ -370,11 +370,15 @@ spec: type: boolean driverType: default: container - description: specify the type of driver (container/vf-passthrough) - to install on the worker node. default value is gpu. + description: |- + specify the type of driver (container/vf-passthrough/pf-passthrough) to install on the worker node. default value is container. + container: normal amdgpu-dkms driver for Bare Metal GPU nodes or guest VM. + vf-passthrough: MxGPU GIM driver on the host machine to generate VF, then mount VF to vfio-pci + pf-passthrough: directly mount PF device to vfio-pci enum: - container - vf-passthrough + - pf-passthrough type: string enable: default: true diff --git a/config/crd/bases/amd.com_deviceconfigs.yaml b/config/crd/bases/amd.com_deviceconfigs.yaml index 8e1b5da4..7a3bb967 100644 --- a/config/crd/bases/amd.com_deviceconfigs.yaml +++ b/config/crd/bases/amd.com_deviceconfigs.yaml @@ -366,11 +366,15 @@ spec: type: boolean driverType: default: container - description: specify the type of driver (container/vf-passthrough) - to install on the worker node. default value is gpu. + description: |- + specify the type of driver (container/vf-passthrough/pf-passthrough) to install on the worker node. default value is container. + container: normal amdgpu-dkms driver for Bare Metal GPU nodes or guest VM. + vf-passthrough: MxGPU GIM driver on the host machine to generate VF, then mount VF to vfio-pci + pf-passthrough: directly mount PF device to vfio-pci enum: - container - vf-passthrough + - pf-passthrough type: string enable: default: true diff --git a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml index ef135117..f6e08672 100644 --- a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml @@ -239,8 +239,11 @@ spec: path: driver.blacklist x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:blacklistDrivers - - description: specify the type of driver (container/vf-passthrough) to install - on the worker node. default value is gpu. + - description: 'specify the type of driver (container/vf-passthrough/pf-passthrough) + to install on the worker node. default value is container. container: normal + amdgpu-dkms driver for Bare Metal GPU nodes or guest VM. vf-passthrough: + MxGPU GIM driver on the host machine to generate VF, then mount VF to vfio-pci + pf-passthrough: directly mount PF device to vfio-pci' displayName: DriverType path: driver.driverType x-descriptors: diff --git a/helm-charts-k8s/crds/deviceconfig-crd.yaml b/helm-charts-k8s/crds/deviceconfig-crd.yaml index 6d53b1ba..79a6d043 100644 --- a/helm-charts-k8s/crds/deviceconfig-crd.yaml +++ b/helm-charts-k8s/crds/deviceconfig-crd.yaml @@ -374,11 +374,15 @@ spec: type: boolean driverType: default: container - description: specify the type of driver (container/vf-passthrough) - to install on the worker node. default value is gpu. + description: |- + specify the type of driver (container/vf-passthrough/pf-passthrough) to install on the worker node. default value is container. + container: normal amdgpu-dkms driver for Bare Metal GPU nodes or guest VM. + vf-passthrough: MxGPU GIM driver on the host machine to generate VF, then mount VF to vfio-pci + pf-passthrough: directly mount PF device to vfio-pci enum: - container - vf-passthrough + - pf-passthrough type: string enable: default: true diff --git a/helm-charts-openshift/crds/deviceconfig-crd.yaml b/helm-charts-openshift/crds/deviceconfig-crd.yaml index 6d53b1ba..79a6d043 100644 --- a/helm-charts-openshift/crds/deviceconfig-crd.yaml +++ b/helm-charts-openshift/crds/deviceconfig-crd.yaml @@ -374,11 +374,15 @@ spec: type: boolean driverType: default: container - description: specify the type of driver (container/vf-passthrough) - to install on the worker node. default value is gpu. + description: |- + specify the type of driver (container/vf-passthrough/pf-passthrough) to install on the worker node. default value is container. + container: normal amdgpu-dkms driver for Bare Metal GPU nodes or guest VM. + vf-passthrough: MxGPU GIM driver on the host machine to generate VF, then mount VF to vfio-pci + pf-passthrough: directly mount PF device to vfio-pci enum: - container - vf-passthrough + - pf-passthrough type: string enable: default: true diff --git a/internal/configmanager/configmanager.go b/internal/configmanager/configmanager.go index 07c62a88..17c76812 100644 --- a/internal/configmanager/configmanager.go +++ b/internal/configmanager/configmanager.go @@ -36,7 +36,6 @@ import ( "fmt" "os" - amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" "github.com/rh-ecosystem-edge/kernel-module-management/pkg/labels" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -45,6 +44,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" ) const ( @@ -166,7 +168,7 @@ func (nl *configManager) SetConfigManagerAsDesired(ds *appsv1.DaemonSet, devConf // only use module ready label as node selector when KMM driver is enabled useKMMDriver := false - if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { + if utils.ShouldUseKMM(devConfig) { nodeSelector[labels.GetKernelModuleReadyNodeLabel(devConfig.Namespace, devConfig.Name)] = "" useKMMDriver = true } @@ -226,6 +228,13 @@ func (nl *configManager) SetConfigManagerAsDesired(ds *appsv1.DaemonSet, devConf serviceaccount := defaultSAName gracePeriod := int64(1) + initContainerCommand := "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/class/kfd ] || [ ! -d /host-sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done" + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + initContainerCommand = "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done" + case utils.DriverTypePFPassthrough: + initContainerCommand = "true" + } ds.Spec = appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: matchLabels}, Template: v1.PodTemplateSpec{ @@ -238,7 +247,7 @@ func (nl *configManager) SetConfigManagerAsDesired(ds *appsv1.DaemonSet, devConf { Name: "driver-init", Image: "busybox:1.36", - Command: []string{"sh", "-c", "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/class/kfd ] || [ ! -d /host-sys/module/amdgpu/drivers/ ]; do echo \"amdgpu driver is not loaded \"; sleep 2 ;done"}, + Command: []string{"sh", "-c", initContainerCommand}, SecurityContext: &v1.SecurityContext{Privileged: ptr.To(true)}, VolumeMounts: []v1.VolumeMount{ { diff --git a/internal/controllers/device_config_reconciler.go b/internal/controllers/device_config_reconciler.go index fa607b01..7ff07799 100644 --- a/internal/controllers/device_config_reconciler.go +++ b/internal/controllers/device_config_reconciler.go @@ -122,7 +122,7 @@ func (r *DeviceConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { // just reconcile the spec change or deletion For(&amdv1alpha1.DeviceConfig{}, builder.WithPredicates(watchers.SpecChangedOrDeletionPredicate{})). Owns(&v1.Service{}, builder.WithPredicates(watchers.SpecChangedOrDeletionPredicate{})). - Owns(&kmmv1beta1.Module{}, builder.WithPredicates(watchers.SpecChangedOrDeletionPredicate{})). + Owns(&kmmv1beta1.Module{}). Watches( // watch for owned daemonset, only update status &appsv1.DaemonSet{}, r.daemonsetEventHandler, @@ -462,7 +462,7 @@ func (dcrh *deviceConfigReconcilerHelper) hasSecretReference(secretName string, func (dcrh *deviceConfigReconcilerHelper) buildDeviceConfigStatus(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error { // fetch DeviceConfig-owned custom resource // then retrieve its status and put it to DeviceConfig's status fields - if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { + if utils.ShouldUseKMM(devConfig) { kmmModuleObj, err := dcrh.getDeviceConfigOwnedKMMModule(ctx, devConfig) if err != nil { return fmt.Errorf("failed to fetch owned kmm module for DeviceConfig %+v: %+v", @@ -795,6 +795,9 @@ func (dcrh *deviceConfigReconcilerHelper) finalizeDeviceConfig(ctx context.Conte } // finalize KMM CR of managing out-of-tree kernel module + if err := utils.UpdateDriverTypeNodeLabel(ctx, dcrh.client, devConfig, nodes, true); err != nil { + return fmt.Errorf("failed to remove driver type node label: %+v", err) + } mod := kmmv1beta1.Module{} namespacedName = types.NamespacedName{ Namespace: devConfig.Namespace, @@ -803,15 +806,25 @@ func (dcrh *deviceConfigReconcilerHelper) finalizeDeviceConfig(ctx context.Conte if err := dcrh.client.Get(ctx, namespacedName, &mod); err != nil { if k8serrors.IsNotFound(err) { // if KMM module CR is not found - if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { + if utils.ShouldUseKMM(devConfig) { + // when KMM was trigger switch devConfig.Spec.Driver.DriverType { case utils.DriverTypeVFPassthrough: - if !dcrh.checkPostProcessFinalizeCondition(ctx, devConfig, nodes) { + // for vf-passthrough, revert the vfio related work + if !dcrh.checkPostProcessFinalizeCondition(ctx, devConfig, nodes, true) { return errors.New("waiting for post-process finalize condition") } default: logger.Info("module already deleted, removing finalizer", "module", namespacedName) } + } else if devConfig.Spec.Driver.Enable != nil && + *devConfig.Spec.Driver.Enable && + devConfig.Spec.Driver.DriverType == utils.DriverTypePFPassthrough { + // for pf-passthrough, unbind devices from vfio + // so that they can be used by other driver + if !dcrh.checkPostProcessFinalizeCondition(ctx, devConfig, nodes, false) { + return errors.New("waiting for post-process finalize condition") + } } else { // driver disabled mode won't have KMM CR created // but it still requries the removal of node labels @@ -842,14 +855,35 @@ func (dcrh *deviceConfigReconcilerHelper) finalizeDeviceConfig(ctx context.Conte return nil } -func (dcrh *deviceConfigReconcilerHelper) checkPostProcessFinalizeCondition(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) bool { +func (dcrh *deviceConfigReconcilerHelper) checkPostProcessFinalizeCondition(ctx context.Context, + devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList, forceCleanup bool) bool { + // forceCleanup: + // when finalizing vf-passthrough, vf devices will disappear when gim driver was unloaded no need to unbind them logger := log.FromContext(ctx) for _, node := range nodes.Items { + vfioReadyLabel := dcrh.kmmPostProcessor.GetWorkReadyLabel(types.NamespacedName{ + Namespace: devConfig.Namespace, + Name: devConfig.Name, + }) + if _, ok := node.Labels[vfioReadyLabel]; ok { + logger.Info(fmt.Sprintf("post-process label still exist on node %+v", node.Name)) + if forceCleanup { + nodeCopy := node.DeepCopy() + delete(node.Labels, vfioReadyLabel) + if err := dcrh.client.Patch(ctx, &node, client.MergeFrom(nodeCopy)); err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, "failed to remove vfio ready label from node", "node", node.Name) + } + } + return false + } + pod, err := dcrh.kmmPostProcessor.GetWorkerPod(ctx, devConfig, &node) if err == nil { logger.Info(fmt.Sprintf("post-process worker pod %+v still exist on node %+v", pod.Name, node.Name)) - if err := dcrh.client.Delete(ctx, pod, &client.DeleteOptions{GracePeriodSeconds: &workermgr.WorkerPodGracePeriod}); err != nil && !k8serrors.IsNotFound(err) { - logger.Error(err, "failed to delete existing worker pod") + if forceCleanup { + if err := dcrh.client.Delete(ctx, pod, &client.DeleteOptions{GracePeriodSeconds: &workermgr.WorkerPodGracePeriod}); err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, "failed to delete existing worker pod") + } } return false } @@ -857,19 +891,6 @@ func (dcrh *deviceConfigReconcilerHelper) checkPostProcessFinalizeCondition(ctx logger.Error(err, fmt.Sprintf("failed to get post-process worker pod on node %+v", node.Name)) return false } - vfioReadyLabel := dcrh.kmmPostProcessor.GetWorkReadyLabel(types.NamespacedName{ - Namespace: devConfig.Namespace, - Name: devConfig.Name, - }) - if _, ok := node.Labels[vfioReadyLabel]; ok { - logger.Info(fmt.Sprintf("post-process label still exist on node %+v", node.Name)) - nodeCopy := node.DeepCopy() - delete(node.Labels, vfioReadyLabel) - if err := dcrh.client.Patch(ctx, &node, client.MergeFrom(nodeCopy)); err != nil && !k8serrors.IsNotFound(err) { - logger.Error(err, "failed to remove vfio ready label from node", "node", node.Name) - } - return false - } } return true } @@ -912,7 +933,7 @@ func (drch *deviceConfigReconcilerHelper) findDeviceConfigsForNMC(ctx context.Co func (dcrh *deviceConfigReconcilerHelper) handleBuildConfigMap(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error { logger := log.FromContext(ctx) - if devConfig.Spec.Driver.Enable == nil || !*devConfig.Spec.Driver.Enable { + if !utils.ShouldUseKMM(devConfig) { logger.Info("skip handling build config map as KMM driver mode is disabled") return nil } @@ -961,16 +982,21 @@ func (dcrh *deviceConfigReconcilerHelper) handleBuildConfigMap(ctx context.Conte } func (dcrh *deviceConfigReconcilerHelper) handleKMMModule(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error { - // the newly created KMM Module will always has the same namespace and name as its parent DeviceConfig - kmmMod := &kmmv1beta1.Module{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: devConfig.Namespace, - Name: devConfig.Name, - }, - } logger := log.FromContext(ctx) - if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { + // add driver type node label if it is necessary + if err := utils.UpdateDriverTypeNodeLabel(ctx, dcrh.client, devConfig, nodes, false); err != nil { + return err + } + + if utils.ShouldUseKMM(devConfig) { + // the newly created KMM Module will always has the same namespace and name as its parent DeviceConfig + kmmMod := &kmmv1beta1.Module{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: devConfig.Namespace, + Name: devConfig.Name, + }, + } opRes, err := controllerutil.CreateOrPatch(ctx, dcrh.client, kmmMod, func() error { return dcrh.kmmHandler.SetKMMModuleAsDesired(ctx, kmmMod, devConfig, nodes) }) @@ -1007,7 +1033,7 @@ func (dcrh *deviceConfigReconcilerHelper) handleDevicePlugin(ctx context.Context func (dcrh *deviceConfigReconcilerHelper) handleKMMVersionLabel(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error { // label corresponding node with given kmod version // so that KMM could manage the upgrade by watching the node's version label change - if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { + if utils.ShouldUseKMM(devConfig) { err := dcrh.kmmHandler.SetNodeVersionLabelAsDesired(ctx, devConfig, nodes) if err != nil { return fmt.Errorf("failed to update node version label for DeviceConfig %s/%s: %v", devConfig.Namespace, devConfig.Name, err) diff --git a/internal/controllers/device_config_reconciler_test.go b/internal/controllers/device_config_reconciler_test.go index 437cb6a9..3f0960cc 100644 --- a/internal/controllers/device_config_reconciler_test.go +++ b/internal/controllers/device_config_reconciler_test.go @@ -511,6 +511,7 @@ var _ = Describe("handleKMMModule", func() { }, } gomock.InOrder( + kubeClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(nil), kubeClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).Return(k8serrors.NewNotFound(schema.GroupResource{}, "whatever")), kmmHelper.EXPECT().SetKMMModuleAsDesired(ctx, newMod, devConfig, testNodeList).Return(nil), @@ -529,6 +530,7 @@ var _ = Describe("handleKMMModule", func() { }, } gomock.InOrder( + kubeClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(nil), kubeClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).Do( func(_ interface{}, _ interface{}, mod *kmmv1beta1.Module, _ ...client.GetOption) { mod.Name = devConfig.Name diff --git a/internal/controllers/watchers/node.go b/internal/controllers/watchers/node.go index 5bbe5546..36aef26b 100644 --- a/internal/controllers/watchers/node.go +++ b/internal/controllers/watchers/node.go @@ -164,15 +164,12 @@ func (h *NodeEventHandler) reconcileAllDeviceConfigs(ctx context.Context, q work logger.Error(err, "failed to list deviceconfigs") } for _, dcfg := range devConfigList.Items { - if dcfg.Spec.Driver.Enable != nil && - *dcfg.Spec.Driver.Enable { - q.Add(reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: dcfg.Namespace, - Name: dcfg.Name, - }, - }) - } + q.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: dcfg.Namespace, + Name: dcfg.Name, + }, + }) } } @@ -208,14 +205,18 @@ func (h *NodeEventHandler) reconcileRelatedDeviceConfig(ctx context.Context, obj } func (h *NodeEventHandler) handlePostProcess(ctx context.Context, logger logr.Logger, oldNode, node *v1.Node) { + // detect whether vfio bind status hasVFIOReadyLabel, vfioLabel, vfioDevConfigNamespace, vfioDevConfigName := utils.HasNodeLabelTemplateMatch(node.Labels, utils.VFIOMountReadyLabelTemplate) - hasModuleReadyLabel, moduleLabel, moduleDevConfigNamespace, moduleDevConfigName := utils.HasNodeLabelTemplateMatch(node.Labels, utils.KMMModuleReadyLabelTemplate) - if hasModuleReadyLabel && !hasVFIOReadyLabel { - // trigger VFIO worker pod + // detect desired driver type + hasDriverTypeLabel, driverTypeLabel, driverDevConfigNamespace, driverDevConfigName := utils.HasNodeLabelTemplateMatch(node.Labels, utils.DriverTypeNodeLabelTemplate) + + if hasDriverTypeLabel && !hasVFIOReadyLabel { + // if driver type is specified but vfio bind is not ready + // start the vfio bind work for vf-passthrough and pf-passthrough driver devConfig := &amdv1alpha1.DeviceConfig{} err := h.client.Get(ctx, types.NamespacedName{ - Namespace: moduleDevConfigNamespace, - Name: moduleDevConfigName, + Namespace: driverDevConfigNamespace, + Name: driverDevConfigName, }, devConfig) if err != nil { if !k8serrors.IsNotFound(err) { @@ -223,16 +224,19 @@ func (h *NodeEventHandler) handlePostProcess(ctx context.Context, logger logr.Lo } return } - if devConfig.Spec.Driver.DriverType == utils.DriverTypeVFPassthrough { - logger.Info(fmt.Sprintf("node %v with configured VFPassthrough driver only has KMM module label %v %v %v, launching VFIO worker pod", - node.Name, moduleLabel, moduleDevConfigNamespace, moduleDevConfigName)) + // only trigger post installation process for specific driver types + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough, + utils.DriverTypePFPassthrough: + logger.Info(fmt.Sprintf("node %v with configured PFPassthrough driver %v doesn't have VFIO binding ready, launching VFIO worker pod", + node.Name, driverTypeLabel)) if err := h.workerMgr.Work(ctx, devConfig, node); err != nil { logger.Error(err, "failed to create worker pod") } } - } else if !hasModuleReadyLabel && hasVFIOReadyLabel { - logger.Info(fmt.Sprintf("node %v with configured VFPassthrough driver only has VFIO label %v %v %v, launching VFIO cleanup worker pod", - node.Name, vfioLabel, vfioDevConfigNamespace, vfioDevConfigName)) + } else if !hasDriverTypeLabel && hasVFIOReadyLabel { + logger.Info(fmt.Sprintf("node %v with configured driver %v only has VFIO label %v %v %v, launching VFIO cleanup worker pod", + node.Name, driverTypeLabel, vfioLabel, vfioDevConfigNamespace, vfioDevConfigName)) // trigger VFIO cleanup worker pod devConfig := &amdv1alpha1.DeviceConfig{} err := h.client.Get(ctx, types.NamespacedName{ @@ -248,12 +252,10 @@ func (h *NodeEventHandler) handlePostProcess(ctx context.Context, logger logr.Lo if err := h.workerMgr.Cleanup(ctx, devConfig, node); err != nil { logger.Error(err, "failed to create cleanup worker pod") } - } else if hasModuleReadyLabel && hasVFIOReadyLabel && - oldNode.Status.NodeInfo.BootID != node.Status.NodeInfo.BootID { - // if the node was rebooted - // don't wait for KMM to remove the module ready label then wait for workermgr to trigger a unload worker + } else if oldNode.Status.NodeInfo.BootID != node.Status.NodeInfo.BootID { + // if the node was rebooted, most of time devices need rebinding to vfio-pci // directly remove the VFIO ready label - // so that the event handler will bring up a new worker pod to load device into VFIO + // so that the event handler will bring up a new vfio worker pod to load devices into VFIO h.workerMgr.RemoveWorkReadyLabel(ctx, logger, types.NamespacedName{ Namespace: vfioDevConfigNamespace, Name: vfioDevConfigName, diff --git a/internal/controllers/workermgr/scripts/vfio_bind.sh b/internal/controllers/workermgr/scripts/vfio_bind.sh index d1dfa4ac..7d0fe220 100644 --- a/internal/controllers/workermgr/scripts/vfio_bind.sh +++ b/internal/controllers/workermgr/scripts/vfio_bind.sh @@ -1,10 +1,9 @@ #!/bin/bash -PRODUCT_CODES=("7410" "74b5" "74b9") # 74b5 - MI300X, 7410 - MI210, 74b9 - MI325X +PRODUCT_CODES=($$PCI_DEVICE_ID_LIST) for PRODUCT_CODE in "${PRODUCT_CODES[@]}"; do COUNTER=0 - DEVICES_PATHS="" # Load VFIO PCI driver on GPU VF devices, if not done already LSPCI_OUTPUT=$(lspci -nn -d 1002:${PRODUCT_CODE}) @@ -18,14 +17,24 @@ for PRODUCT_CODE in "${PRODUCT_CODES[@]}"; do PCI_ADDRESS=$(echo "$LINE" | awk '{print $1}') VFIO_DRIVER=$(lspci -k -s "$PCI_ADDRESS" | grep -i vfio-pci | awk '{print $5}') VFIO_DEVICE="0000:$PCI_ADDRESS" + # If current assigned driver is not vfio-pci + # Start to bind the device to vfio-pci if [ "$VFIO_DRIVER" != "vfio-pci" ]; then if [ $COUNTER -eq 0 ]; then - # Load the VFIO PCI driver for all GPUs + # Only try to confirm vfio_pci was loaded once modprobe vfio_iommu_type1 allow_unsafe_interrupts modprobe vfio_pci disable_idle_d3=1 bash -c "echo 1 > /sys/module/vfio_iommu_type1/parameters/allow_unsafe_interrupts" - bash -c "echo 1002 ${PRODUCT_CODE} > /sys/bus/pci/drivers/vfio-pci/new_id" fi + # Unbind from other driver + if [-e "/sys/bus/pci/devices/$VFIO_DEVICE/driver/unbind"]; then + echo $VFIO_DEVICE > /sys/bus/pci/devices/$VFIO_DEVICE/driver/unbind + fi + # Bind specific device + # and avoid using /new_id which auto binds all devices with the same device ID + echo "vfio-pci" > /sys/bus/pci/devices/$VFIO_DEVICE/driver_override + echo $VFIO_DEVICE > /sys/bus/pci/drivers/vfio-pci/bind + echo "" > /sys/bus/pci/devices/$VFIO_DEVICE/driver_override fi # Check if IOMMU entry found for each GPU (VFIO device) IOMMU_GROUP=$(readlink -f /sys/bus/pci/devices/${VFIO_DEVICE}/iommu_group | awk -F '/' '{print $NF}') @@ -35,7 +44,6 @@ for PRODUCT_CODE in "${PRODUCT_CODES[@]}"; do echo "Error: IOMMU entry not found for GPU VF Device: $VFIO_DEVICE, IOMMU Group: $IOMMU_GROUP" exit 1 fi - DEVICES_PATHS+="path=/sys/bus/pci/devices/$VFIO_DEVICE " ((COUNTER++)) echo "Group_ID=${IOMMU_GROUP} BUS_ID=${VFIO_DEVICE}" done <<< "$LSPCI_OUTPUT" diff --git a/internal/controllers/workermgr/scripts/vfio_unbind.sh b/internal/controllers/workermgr/scripts/vfio_unbind.sh index ae27ca24..4360ec79 100644 --- a/internal/controllers/workermgr/scripts/vfio_unbind.sh +++ b/internal/controllers/workermgr/scripts/vfio_unbind.sh @@ -1,11 +1,8 @@ #!/bin/bash -PRODUCT_CODES=("7410" "74b5" "74b9") # 74b5 - MI300X, 7410 - MI210, 74b9 - MI325X +PRODUCT_CODES=($$PCI_DEVICE_ID_LIST) for PRODUCT_CODE in "${PRODUCT_CODES[@]}"; do - COUNTER=0 - DEVICES_PATHS="" - # Load VFIO PCI driver on GPU VF devices, if not done already LSPCI_OUTPUT=$(lspci -nn -d 1002:${PRODUCT_CODE}) @@ -19,14 +16,9 @@ for PRODUCT_CODE in "${PRODUCT_CODES[@]}"; do VFIO_DRIVER=$(lspci -k -s "$PCI_ADDRESS" | grep -i vfio-pci | awk '{print $5}') VFIO_DEVICE="0000:$PCI_ADDRESS" if [ "$VFIO_DRIVER" == "vfio-pci" ]; then - if [ $COUNTER -eq 0 ]; then - # Unload the VFIO PCI driver for all GPUs - bash -c "echo 1002 ${PRODUCT_CODE} > /sys/bus/pci/drivers/vfio-pci/remove_id" - bash -c "echo ${VFIO_DEVICE} > /sys/bus/pci/drivers/vfio-pci/unbind" - fi + # Unload the VFIO PCI device + bash -c "echo ${VFIO_DEVICE} > /sys/bus/pci/drivers/vfio-pci/unbind" fi - DEVICES_PATHS+="path=/sys/bus/pci/devices/$VFIO_DEVICE " - ((COUNTER++)) IOMMU_GROUP=$(readlink -f /sys/bus/pci/devices/${VFIO_DEVICE}/iommu_group | awk -F '/' '{print $NF}') echo "Group_ID=${IOMMU_GROUP} BUS_ID=${VFIO_DEVICE}" done <<< "$LSPCI_OUTPUT" diff --git a/internal/controllers/workermgr/workermgr.go b/internal/controllers/workermgr/workermgr.go index 2fa919c1..4a1c81f5 100644 --- a/internal/controllers/workermgr/workermgr.go +++ b/internal/controllers/workermgr/workermgr.go @@ -21,6 +21,7 @@ import ( _ "embed" "encoding/json" "fmt" + "strings" "github.com/go-logr/logr" v1 "k8s.io/api/core/v1" @@ -32,6 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" + "github.com/ROCm/gpu-operator/api/v1alpha1" amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" utils "github.com/ROCm/gpu-operator/internal" ) @@ -39,6 +41,7 @@ import ( const ( workerContainerName = "worker" initContainerName = "pci-device-detector" + pciDeviceIDTemplate = "$$PCI_DEVICE_ID_LIST" ) var ( @@ -83,6 +86,14 @@ func NewWorkerMgr(client client.Client, scheme *runtime.Scheme) WorkerMgrAPI { // Work executes the work on given node func (w *workerMgr) Work(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, node *v1.Node) error { logger := log.FromContext(ctx) + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough, + utils.DriverTypePFPassthrough: + // only post vfio related work for vf-passthrough or pf-passthrough + default: + logger.Info(fmt.Sprintf("no work is required for driver type %v", devConfig.Spec.Driver.DriverType)) + return nil + } loadWorker := w.getPodDef(devConfig, node.Name, utils.LoadVFIOAction) opRes, err := controllerutil.CreateOrPatch(ctx, w.client, loadWorker, func() error { return controllerutil.SetControllerReference(devConfig, loadWorker, w.scheme) @@ -97,6 +108,14 @@ func (w *workerMgr) Work(ctx context.Context, devConfig *amdv1alpha1.DeviceConfi // Cleanup cleanup the work on given node func (w *workerMgr) Cleanup(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, node *v1.Node) error { logger := log.FromContext(ctx) + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough, + utils.DriverTypePFPassthrough: + // only post vfio related work for vf-passthrough or pf-passthrough + default: + logger.Info(fmt.Sprintf("no work is required for driver type %v", devConfig.Spec.Driver.DriverType)) + return nil + } unloadWorker := w.getPodDef(devConfig, node.Name, utils.UnloadVFIOAction) opRes, err := controllerutil.CreateOrPatch(ctx, w.client, unloadWorker, func() error { return controllerutil.SetControllerReference(devConfig, unloadWorker, w.scheme) @@ -177,6 +196,20 @@ func (w *workerMgr) getPodName(devConfig *amdv1alpha1.DeviceConfig, nodeName str return fmt.Sprintf("worker-%v-%v", devConfig.Name, nodeName) } +func (w *workerMgr) getVFIOCommand(cmd string, devConfig *v1alpha1.DeviceConfig) string { + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + return strings.ReplaceAll(cmd, pciDeviceIDTemplate, strings.Join(utils.DefaultVFDeviceIDs, " ")) + case utils.DriverTypePFPassthrough: + return strings.ReplaceAll(cmd, pciDeviceIDTemplate, strings.Join(utils.DefaultPFDeviceIDs, " ")) + default: + // unlikely happen + // previously Work() and Cleanup() function has verified that driver type is vf-passthrough or pf-passthrough + // if somehow happened, do nothing for unknown driver type + return "true" + } +} + // getPodSpec generate the pod definition for worker func (w *workerMgr) getPodDef(devConfig *amdv1alpha1.DeviceConfig, nodeName, action string) *v1.Pod { // pod name @@ -190,9 +223,9 @@ func (w *workerMgr) getPodDef(devConfig *amdv1alpha1.DeviceConfig, nodeName, act var command []string switch action { case utils.LoadVFIOAction: - command = []string{"/bin/bash", "-c", vfioBindScript} + command = []string{"/bin/bash", "-c", w.getVFIOCommand(vfioBindScript, devConfig)} case utils.UnloadVFIOAction: - command = []string{"/bin/bash", "-c", vfioUnbindScript} + command = []string{"/bin/bash", "-c", w.getVFIOCommand(vfioUnbindScript, devConfig)} } // mount necessary folders @@ -245,23 +278,37 @@ func (w *workerMgr) getPodDef(devConfig *amdv1alpha1.DeviceConfig, nodeName, act initContainers := []v1.Container{} switch action { case utils.LoadVFIOAction: - // for loading device to VFIO driver - // need to use init container to make sure the device exists - initContainers = []v1.Container{ - { - Name: initContainerName, - Image: utilsContainerImage, - Command: []string{"sh", "-c", "while ! lspci -nn | grep -q -e 7410 -e 74b5 -e 74b9; do echo \"PCI device not found\"; sleep 2; done"}, - SecurityContext: &v1.SecurityContext{ - RunAsUser: ptr.To(int64(0)), - Privileged: ptr.To(true), + switch devConfig.Spec.Driver.DriverType { + case utils.DriverTypeVFPassthrough: + // for loading VF to VFIO driver + // need to use init container to make sure the VF exists + // then start binding VF + // otherwise there could be fake completion, the pod completed without binding anything + getDetectVFCommand := func() string { + grepArgs := []string{} + for _, deviceID := range utils.DefaultVFDeviceIDs { + grepArgs = append(grepArgs, "-e "+deviceID) + } + return fmt.Sprintf("while ! lspci -nn | grep -q %v; do echo \"PCI device not found\"; sleep 2; done", strings.Join(grepArgs, " ")) + } + initContainers = []v1.Container{ + { + Name: initContainerName, + Image: utilsContainerImage, + Command: []string{"sh", "-c", getDetectVFCommand()}, + SecurityContext: &v1.SecurityContext{ + RunAsUser: ptr.To(int64(0)), + Privileged: ptr.To(true), + }, + VolumeMounts: volumeMounts, }, - VolumeMounts: volumeMounts, - }, + } } + case utils.UnloadVFIOAction: // for unloading device from VFIO - // VF devices are already removed due to the removal of GIM driver - // no need to use an init container to detect them + // 1. VF devices are already removed due to the removal of GIM driver + // no need to use an init container to detect them + // 2. PF devices are already existing, no need to detect them then } worker := &v1.Pod{ diff --git a/internal/kmmmodule/kmmmodule.go b/internal/kmmmodule/kmmmodule.go index cc2c9a94..3a82813e 100644 --- a/internal/kmmmodule/kmmmodule.go +++ b/internal/kmmmodule/kmmmodule.go @@ -285,7 +285,7 @@ func (km *kmmModule) SetDevicePluginAsDesired(ds *appsv1.DaemonSet, devConfig *a for key, val := range devConfig.Spec.Selector { nodeSelector[key] = val } - if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { + if utils.ShouldUseKMM(devConfig) { nodeSelector[kmmLabels.GetKernelModuleReadyNodeLabel(devConfig.Namespace, devConfig.Name)] = "" } imagePullSecrets := []v1.LocalObjectReference{} @@ -303,6 +303,8 @@ func (km *kmmModule) SetDevicePluginAsDesired(ds *appsv1.DaemonSet, devConfig *a switch devConfig.Spec.Driver.DriverType { case utils.DriverTypeVFPassthrough: initContainerCommand = "while [ ! -d /sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done" + case utils.DriverTypePFPassthrough: + initContainerCommand = "true" } ds.Spec = appsv1.DaemonSetSpec{ diff --git a/internal/metricsexporter/metricsexporter.go b/internal/metricsexporter/metricsexporter.go index 68892526..74ca5332 100644 --- a/internal/metricsexporter/metricsexporter.go +++ b/internal/metricsexporter/metricsexporter.go @@ -200,7 +200,7 @@ func (nl *metricsExporter) SetMetricsExporterAsDesired(ds *appsv1.DaemonSet, dev } // only use module ready label as node selector when KMM driver is enabled - if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { + if utils.ShouldUseKMM(devConfig) { nodeSelector[labels.GetKernelModuleReadyNodeLabel(devConfig.Namespace, devConfig.Name)] = "" } @@ -380,6 +380,8 @@ func (nl *metricsExporter) SetMetricsExporterAsDesired(ds *appsv1.DaemonSet, dev switch devConfig.Spec.Driver.DriverType { case utils.DriverTypeVFPassthrough: initContainerCommand = "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done" + case utils.DriverTypePFPassthrough: + initContainerCommand = "true" } ds.Spec = appsv1.DaemonSetSpec{ diff --git a/internal/nodelabeller/nodelabeller.go b/internal/nodelabeller/nodelabeller.go index 0af1e155..3d1d77da 100644 --- a/internal/nodelabeller/nodelabeller.go +++ b/internal/nodelabeller/nodelabeller.go @@ -266,6 +266,8 @@ func getNodeLabellerInitContainerCommand(devConfig *amdv1alpha1.DeviceConfig, bl switch devConfig.Spec.Driver.DriverType { case utils.DriverTypeVFPassthrough: initContainerCommand = []string{"sh", "-c", fmt.Sprintf("echo \"# added by gpu operator \nblacklist amdgpu\" > /host-etc/modprobe.d/%v; while [ ! -d /host-sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done", blackListFileName)} + case utils.DriverTypePFPassthrough: + initContainerCommand = []string{"sh", "-c", "true"} } return initContainerCommand } else { @@ -275,6 +277,8 @@ func getNodeLabellerInitContainerCommand(devConfig *amdv1alpha1.DeviceConfig, bl switch devConfig.Spec.Driver.DriverType { case utils.DriverTypeVFPassthrough: initContainerCommand = []string{"sh", "-c", fmt.Sprintf("rm -f /host-etc/modprobe.d/%v; while [ ! -d /host-sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done", blackListFileName)} + case utils.DriverTypePFPassthrough: + initContainerCommand = []string{"sh", "-c", "true"} } return initContainerCommand } diff --git a/internal/testrunner/testrunner.go b/internal/testrunner/testrunner.go index 7de962de..9a0c8363 100644 --- a/internal/testrunner/testrunner.go +++ b/internal/testrunner/testrunner.go @@ -205,7 +205,7 @@ func (nl *testRunner) SetTestRunnerAsDesired(ds *appsv1.DaemonSet, devConfig *am } // only use module ready label as node selector when KMM driver is enabled - if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { + if utils.ShouldUseKMM(devConfig) { nodeSelector[labels.GetKernelModuleReadyNodeLabel(devConfig.Namespace, devConfig.Name)] = "" } @@ -277,6 +277,8 @@ func (nl *testRunner) SetTestRunnerAsDesired(ds *appsv1.DaemonSet, devConfig *am switch devConfig.Spec.Driver.DriverType { case utils.DriverTypeVFPassthrough: initContainerCommand = "if [ \"$SIM_ENABLE\" = \"true\" ]; then exit 0; fi; while [ ! -d /host-sys/module/gim/drivers/ ]; do echo \"gim driver is not loaded \"; sleep 2 ;done" + case utils.DriverTypePFPassthrough: + initContainerCommand = "true" } ds.Spec = appsv1.DaemonSetSpec{ diff --git a/internal/utils.go b/internal/utils.go index ac816952..43922be3 100644 --- a/internal/utils.go +++ b/internal/utils.go @@ -18,6 +18,7 @@ package utils import ( "context" + "errors" "fmt" "regexp" "strings" @@ -27,17 +28,20 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/ROCm/gpu-operator/api/v1alpha1" amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" "github.com/ROCm/gpu-operator/internal/cmd" ) const ( - KindDeviceConfig = "DeviceConfig" - defaultOcDriversVersion = "6.2.2" - openShiftNodeLabel = "node.openshift.io/os_id" - NodeFeatureLabelAmdGpu = "feature.node.kubernetes.io/amd-gpu" - NodeFeatureLabelAmdVGpu = "feature.node.kubernetes.io/amd-vgpu" + KindDeviceConfig = "DeviceConfig" + defaultOcDriversVersion = "6.2.2" + openShiftNodeLabel = "node.openshift.io/os_id" + NodeFeatureLabelAmdGpu = "feature.node.kubernetes.io/amd-gpu" + NodeFeatureLabelAmdVGpu = "feature.node.kubernetes.io/amd-vgpu" + // device plugin ResourceNamingStrategyFlag = "resource_naming_strategy" SingleStrategy = "single" MixedStrategy = "mixed" @@ -50,12 +54,14 @@ const ( // kubevirt DriverTypeContainer = "container" DriverTypeVFPassthrough = "vf-passthrough" + DriverTypePFPassthrough = "pf-passthrough" DefaultUtilsImage = "docker.io/rocm/gpu-operator-utils:latest" // workerMgr related labels LoadVFIOAction = "loadVFIO" UnloadVFIOAction = "unloadVFIO" WorkerActionLabelKey = "gpu.operator.amd.com/worker-action" VFIOMountReadyLabelTemplate = "gpu.operator.amd.com/%v.%v.vfio.ready" + DriverTypeNodeLabelTemplate = "gpu.operator.amd.com/%v.%v.driver" KMMModuleReadyLabelTemplate = "kmm.node.kubernetes.io/%v.%v.ready" // Operand metadata MetricsExporterNameSuffix = "-metrics-exporter" @@ -65,6 +71,7 @@ const ( ) var ( + // node labeller nodeLabellerKinds = []string{ "firmware", "family", "driver-version", "driver-src-version", "device-id", "product-name", @@ -72,6 +79,27 @@ var ( } allAMDComLabels = []string{} allBetaAMDComLabels = []string{} + // kubevirt + DefaultVFDeviceIDs = []string{ + "7410", // MI210 VF + "74b5", // MI300X VF + "74b9", // MI325X VF + "7461", // Radeon Pro V710 MxGPU + "73ae", // Radeon Pro V620 MxGPU + } + DefaultPFDeviceIDs = []string{ + "74a5", // MI325X + "74a2", // MI308X + "74b6", // MI308X + "74a8", // MI308X HF + "74a0", // MI300A + "74a1", // MI300X + "74a9", // MI300X HF + "74bd", // MI300X HF + "740f", // MI210 + "7408", // MI250X + "740c", // MI250/MI250X + } ) func init() { @@ -230,8 +258,9 @@ func IsPrometheusServiceMonitorEnable(devConfig *amdv1alpha1.DeviceConfig) bool func GetDriverTypeTag(devCfg *amdv1alpha1.DeviceConfig) string { driverTypeTag := "" switch devCfg.Spec.Driver.DriverType { - case DriverTypeVFPassthrough: - driverTypeTag = "-" + DriverTypeVFPassthrough + case DriverTypeVFPassthrough, + DriverTypePFPassthrough: + driverTypeTag = "-" + devCfg.Spec.Driver.DriverType case DriverTypeContainer: // when the driver type is container // don't add any driver type inside the driver image tag @@ -264,3 +293,62 @@ func HasNodeLabelTemplateMatch(nodeLabels map[string]string, template string) (b } return false, "", "", "" } + +func GetDriverTypeNodeLabel(devConfig *v1alpha1.DeviceConfig) string { + return fmt.Sprintf(DriverTypeNodeLabelTemplate, devConfig.Namespace, devConfig.Name) +} + +func UpdateDriverTypeNodeLabel(ctx context.Context, cli client.Client, devConfig *v1alpha1.DeviceConfig, nodes *v1.NodeList, cleanup bool) error { + if devConfig == nil { + return fmt.Errorf("received nil DeviceConfig") + } + if nodes == nil { + return fmt.Errorf("received nil node list") + } + if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { + var err error + for _, node := range nodes.Items { + var nodeCopy *v1.Node + if cleanup { + if _, ok := node.Labels[GetDriverTypeNodeLabel(devConfig)]; !ok { + // no need to clean up driver type node label if it is non-existing + continue + } + nodeCopy = node.DeepCopy() + delete(node.Labels, GetDriverTypeNodeLabel(devConfig)) + } else { + if val, ok := node.Labels[GetDriverTypeNodeLabel(devConfig)]; ok && val == devConfig.Spec.Driver.DriverType { + // no need to patch the driver type node label if it is existing + continue + } + nodeCopy = node.DeepCopy() + if node.Labels == nil { + node.Labels = map[string]string{} + } + node.Labels[GetDriverTypeNodeLabel(devConfig)] = devConfig.Spec.Driver.DriverType + } + if patchErr := cli.Patch(ctx, &node, client.MergeFrom(nodeCopy)); patchErr != nil { + err = errors.Join(err, patchErr) + } + } + return err + } + return nil +} + +// ShouldUseKMM return true if KMM needs to be triggered otherwise return false +func ShouldUseKMM(devConfig *v1alpha1.DeviceConfig) bool { + if devConfig == nil { + return false + } + if devConfig.Spec.Driver.Enable != nil && *devConfig.Spec.Driver.Enable { + switch devConfig.Spec.Driver.DriverType { + case DriverTypePFPassthrough: + // for pf-passthrough there is no need to install driver via KMM + return false + } + // for container or vf-passthrough driver KMM is needed to install driver + return true + } + return false +} diff --git a/internal/utils_container/Dockerfile b/internal/utils_container/Dockerfile index 9a65b9e2..61e40296 100644 --- a/internal/utils_container/Dockerfile +++ b/internal/utils_container/Dockerfile @@ -1,7 +1,7 @@ -FROM registry.access.redhat.com/ubi9/ubi-minimal:9.3 +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5 # Install nsenter from util-linux package -RUN microdnf install -y util-linux pciutils && \ +RUN microdnf install -y util-linux pciutils kmod && \ cp /usr/bin/nsenter /nsenter && \ microdnf clean all diff --git a/internal/utils_test.go b/internal/utils_test.go index 12290f2c..6529257b 100644 --- a/internal/utils_test.go +++ b/internal/utils_test.go @@ -21,6 +21,8 @@ import ( "reflect" "testing" + "github.com/ROCm/gpu-operator/api/v1alpha1" + "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -177,7 +179,7 @@ func TestHasNodeLabelTemplateMatch(t *testing.T) { }, } - templates := []string{VFIOMountReadyLabelTemplate, KMMModuleReadyLabelTemplate} + templates := []string{VFIOMountReadyLabelTemplate, KMMModuleReadyLabelTemplate, DriverTypeNodeLabelTemplate} for _, tc := range testCases { for _, template := range templates { @@ -195,3 +197,83 @@ func TestHasNodeLabelTemplateMatch(t *testing.T) { } } } + +func TestShouldUseKMM(t *testing.T) { + boolTrue := true + boolFalse := false + testCases := []struct { + Description string + DevConfig *v1alpha1.DeviceConfig + Expect bool + }{ + { + Description: "nil DeviceConfig", + DevConfig: nil, + Expect: false, + }, + { + Description: "nil spec.driver.enable", + DevConfig: &v1alpha1.DeviceConfig{ + Spec: v1alpha1.DeviceConfigSpec{ + Driver: v1alpha1.DriverSpec{ + Enable: nil, + DriverType: DriverTypeContainer, + }, + }, + }, + Expect: false, + }, + { + Description: "disable driver management", + DevConfig: &v1alpha1.DeviceConfig{ + Spec: v1alpha1.DeviceConfigSpec{ + Driver: v1alpha1.DriverSpec{ + Enable: &boolFalse, + DriverType: DriverTypeContainer, + }, + }, + }, + Expect: false, + }, + { + Description: "enbale driver management with container driver type", + DevConfig: &v1alpha1.DeviceConfig{ + Spec: v1alpha1.DeviceConfigSpec{ + Driver: v1alpha1.DriverSpec{ + Enable: &boolTrue, + DriverType: DriverTypeContainer, + }, + }, + }, + Expect: true, + }, + { + Description: "enbale driver management with vf-passthrough driver type", + DevConfig: &v1alpha1.DeviceConfig{ + Spec: v1alpha1.DeviceConfigSpec{ + Driver: v1alpha1.DriverSpec{ + Enable: &boolTrue, + DriverType: DriverTypeVFPassthrough, + }, + }, + }, + Expect: true, + }, + { + Description: "enbale driver management with pf-passthrough driver type", + DevConfig: &v1alpha1.DeviceConfig{ + Spec: v1alpha1.DeviceConfigSpec{ + Driver: v1alpha1.DriverSpec{ + Enable: &boolTrue, + DriverType: DriverTypePFPassthrough, + }, + }, + }, + Expect: false, // pf-passthrough doesn't need to trigger KMM + }, + } + + for _, tc := range testCases { + assert.Equal(t, ShouldUseKMM(tc.DevConfig), tc.Expect, fmt.Sprintf("test case %+v expect ShouldUseKMM() return %+v but got %+v", tc.Description, tc.Expect, ShouldUseKMM(tc.DevConfig))) + } +} diff --git a/internal/validator/specValidators.go b/internal/validator/specValidators.go index 9f0bbb78..fa7bf259 100644 --- a/internal/validator/specValidators.go +++ b/internal/validator/specValidators.go @@ -27,9 +27,22 @@ import ( // DriverSpec validation func ValidateDriverSpec(ctx context.Context, client client.Client, devConfig *amdv1alpha1.DeviceConfig) error { + if devConfig.Spec.Driver.DriverType == "" { + devConfig.Spec.Driver.DriverType = utils.DriverTypeContainer + } dSpec := devConfig.Spec.Driver - if dSpec.Enable == nil || !*dSpec.Enable { + switch dSpec.DriverType { + case utils.DriverTypeContainer, + utils.DriverTypeVFPassthrough, + utils.DriverTypePFPassthrough: + // valid + default: + return fmt.Errorf("invalid driver type %v", dSpec.DriverType) + } + + // if KMM is not triggered, no need to verify the rest of the config + if !utils.ShouldUseKMM(devConfig) { return nil } diff --git a/tests/e2e/kubevirt_test.go b/tests/e2e/kubevirt_test.go index 4a039482..11c9e312 100644 --- a/tests/e2e/kubevirt_test.go +++ b/tests/e2e/kubevirt_test.go @@ -103,3 +103,35 @@ func (s *E2ESuite) TestVFPassthroughDeployment(c *C) { s.deleteDeviceConfig(devCfg, c) s.verifyVFIOReadyLabel(devCfg, false, c) } + +func (s *E2ESuite) TestPFPassthroughDeployment(c *C) { + // only run this test case when all the worker node has AMD GPU model supported by GIM driver for VF Passthrough + if s.simEnable { + c.Skip("Skipping for non amd gpu testbed") + } + _, err := s.dClient.DeviceConfigs(s.ns).Get(s.cfgName, metav1.GetOptions{}) + assert.Errorf(c, err, fmt.Sprintf("config %v exists", s.cfgName)) + + logger.Infof("create %v", s.cfgName) + devCfg := s.getDeviceConfig(c) + + enableDriver := true + enableNodeLabeller := true + devCfg.Spec.Driver.Enable = &enableDriver + devCfg.Spec.Driver.DriverType = utils.DriverTypePFPassthrough + devCfg.Spec.DevicePlugin.EnableNodeLabeller = &enableNodeLabeller + devCfg.Spec.DevicePlugin.DevicePluginImage = kubeVirtHostDevicePluginImage + devCfg.Spec.DevicePlugin.NodeLabellerImage = kubeVirtHostNodeLabellerImage + + s.createDeviceConfig(devCfg, c) + s.checkNFDWorkerStatus(s.ns, c, "") + s.checkNodeLabellerStatus(s.ns, c, devCfg) + s.checkMetricsExporterStatus(devCfg, s.ns, v1.ServiceTypeClusterIP, c) + s.verifyDeviceConfigStatus(devCfg, c) + s.verifyNodeGPULabel(devCfg, c) + s.verifyVFIOReadyLabel(devCfg, true, c) + + // delete + s.deleteDeviceConfig(devCfg, c) + s.verifyVFIOReadyLabel(devCfg, false, c) +} From f9ab531b0230d3989c3dd7d32ed8e44dba5bf73b Mon Sep 17 00:00:00 2001 From: Yan Sun Date: Wed, 28 May 2025 12:59:54 -0700 Subject: [PATCH 11/21] [Feature] Allow users to configure modprobe arguments and parameters (#706) --- api/v1alpha1/deviceconfig_types.go | 21 +++++++++++++ api/v1alpha1/zz_generated.deepcopy.go | 31 +++++++++++++++++++ ...md-gpu-operator.clusterserviceversion.yaml | 24 ++++++++++++++ bundle/manifests/amd.com_deviceconfigs.yaml | 25 +++++++++++++++ config/crd/bases/amd.com_deviceconfigs.yaml | 25 +++++++++++++++ ...md-gpu-operator.clusterserviceversion.yaml | 24 ++++++++++++++ helm-charts-k8s/crds/deviceconfig-crd.yaml | 25 +++++++++++++++ .../crds/deviceconfig-crd.yaml | 25 +++++++++++++++ internal/kmmmodule/kmmmodule.go | 18 ++++++++++- 9 files changed, 217 insertions(+), 1 deletion(-) diff --git a/api/v1alpha1/deviceconfig_types.go b/api/v1alpha1/deviceconfig_types.go index 48bb773c..8725a4d4 100644 --- a/api/v1alpha1/deviceconfig_types.go +++ b/api/v1alpha1/deviceconfig_types.go @@ -104,6 +104,11 @@ type DriverSpec struct { // +kubebuilder:default=container DriverType string `json:"driverType,omitempty"` + // advanced arguments, parameters and more configs to manage tne driver + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="KernelModuleConfig",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:kernelModuleConfig"} + // +optional + KernelModuleConfig KernelModuleConfigSpec `json:"kernelModuleConfig,omitempty"` + // blacklist amdgpu drivers on the host. Node reboot is required to apply the baclklist on the worker nodes. // Not working for OpenShift cluster. OpenShift users please use the Machine Config Operator (MCO) resource to configure amdgpu blacklist. // Example MCO resource is available at https://instinct.docs.amd.com/projects/gpu-operator/en/latest/installation/openshift-olm.html#create-blacklist-for-installing-out-of-tree-kernel-module @@ -165,6 +170,22 @@ type DriverSpec struct { Tolerations []v1.Toleration `json:"tolerations,omitempty"` } +// KernelModuleConfigSpec contains the advanced configs to manage the driver kernel module +type KernelModuleConfigSpec struct { + // LoadArg are the arguments when modprobe is executed to load the kernel module. The command will be `modprobe ${Args} module_name`. + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="LoadArg",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:loadArg"} + // +optional + LoadArgs []string `json:"loadArgs,omitempty"` + // UnloadArg are the arguments when modprobe is executed to unload the kernel module. The command will be `modprobe -r ${Args} module_name`. + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="UnloadArg",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:unloadArg"} + // +optional + UnloadArgs []string `json:"unloadArgs,omitempty"` + // Parameters is being used for modprobe commands. The command will be `modprobe ${Args} module_name ${Parameters}`. + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Parameters",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:parameters"} + // +optional + Parameters []string `json:"parameters,omitempty"` +} + // UpgradeState captures the state of the upgrade process on a node // +enum type UpgradeState string diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index e55c07f4..e1cab307 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -342,6 +342,7 @@ func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { *out = new(bool) **out = **in } + in.KernelModuleConfig.DeepCopyInto(&out.KernelModuleConfig) if in.Blacklist != nil { in, out := &in.Blacklist, &out.Blacklist *out = new(bool) @@ -456,6 +457,36 @@ func (in *ImageSignSpec) DeepCopy() *ImageSignSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelModuleConfigSpec) DeepCopyInto(out *KernelModuleConfigSpec) { + *out = *in + if in.LoadArgs != nil { + in, out := &in.LoadArgs, &out.LoadArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UnloadArgs != nil { + in, out := &in.UnloadArgs, &out.UnloadArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelModuleConfigSpec. +func (in *KernelModuleConfigSpec) DeepCopy() *KernelModuleConfigSpec { + if in == nil { + return nil + } + out := new(KernelModuleConfigSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeRbacConfig) DeepCopyInto(out *KubeRbacConfig) { *out = *in diff --git a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml index 500360d2..f5f82154 100644 --- a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml +++ b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml @@ -359,6 +359,30 @@ spec: path: driver.tolerations x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:tolerations + - description: advanced arguments, parameters and more configs to manage tne + driver + displayName: KernelModuleConfig + path: driver.kernelModuleConfig + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:kernelModuleConfig + - description: LoadArg are the arguments when modprobe is executed to load the + kernel module. The command will be `modprobe ${Args} module_name`. + displayName: LoadArg + path: driver.kernelModuleConfig.loadArgs + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:loadArg + - description: Parameters is being used for modprobe commands. The command will + be `modprobe ${Args} module_name ${Parameters}`. + displayName: Parameters + path: driver.kernelModuleConfig.parameters + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:parameters + - description: UnloadArg are the arguments when modprobe is executed to unload + the kernel module. The command will be `modprobe -r ${Args} module_name`. + displayName: UnloadArg + path: driver.kernelModuleConfig.unloadArgs + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:unloadArg - description: policy to upgrade the drivers displayName: UpgradePolicy path: driver.upgradePolicy diff --git a/bundle/manifests/amd.com_deviceconfigs.yaml b/bundle/manifests/amd.com_deviceconfigs.yaml index da6b5083..c88c90b4 100644 --- a/bundle/manifests/amd.com_deviceconfigs.yaml +++ b/bundle/manifests/amd.com_deviceconfigs.yaml @@ -523,6 +523,31 @@ spec: type: string type: object type: array + kernelModuleConfig: + description: advanced arguments, parameters and more configs to + manage tne driver + properties: + loadArgs: + description: LoadArg are the arguments when modprobe is executed + to load the kernel module. The command will be `modprobe + ${Args} module_name`. + items: + type: string + type: array + parameters: + description: Parameters is being used for modprobe commands. + The command will be `modprobe ${Args} module_name ${Parameters}`. + items: + type: string + type: array + unloadArgs: + description: UnloadArg are the arguments when modprobe is + executed to unload the kernel module. The command will be + `modprobe -r ${Args} module_name`. + items: + type: string + type: array + type: object upgradePolicy: description: policy to upgrade the drivers properties: diff --git a/config/crd/bases/amd.com_deviceconfigs.yaml b/config/crd/bases/amd.com_deviceconfigs.yaml index 7a3bb967..92a5b7a3 100644 --- a/config/crd/bases/amd.com_deviceconfigs.yaml +++ b/config/crd/bases/amd.com_deviceconfigs.yaml @@ -519,6 +519,31 @@ spec: type: string type: object type: array + kernelModuleConfig: + description: advanced arguments, parameters and more configs to + manage tne driver + properties: + loadArgs: + description: LoadArg are the arguments when modprobe is executed + to load the kernel module. The command will be `modprobe + ${Args} module_name`. + items: + type: string + type: array + parameters: + description: Parameters is being used for modprobe commands. + The command will be `modprobe ${Args} module_name ${Parameters}`. + items: + type: string + type: array + unloadArgs: + description: UnloadArg are the arguments when modprobe is + executed to unload the kernel module. The command will be + `modprobe -r ${Args} module_name`. + items: + type: string + type: array + type: object upgradePolicy: description: policy to upgrade the drivers properties: diff --git a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml index f6e08672..504a798f 100644 --- a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml @@ -330,6 +330,30 @@ spec: path: driver.tolerations x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:tolerations + - description: advanced arguments, parameters and more configs to manage tne + driver + displayName: KernelModuleConfig + path: driver.kernelModuleConfig + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:kernelModuleConfig + - description: LoadArg are the arguments when modprobe is executed to load the + kernel module. The command will be `modprobe ${Args} module_name`. + displayName: LoadArg + path: driver.kernelModuleConfig.loadArgs + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:loadArg + - description: Parameters is being used for modprobe commands. The command will + be `modprobe ${Args} module_name ${Parameters}`. + displayName: Parameters + path: driver.kernelModuleConfig.parameters + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:parameters + - description: UnloadArg are the arguments when modprobe is executed to unload + the kernel module. The command will be `modprobe -r ${Args} module_name`. + displayName: UnloadArg + path: driver.kernelModuleConfig.unloadArgs + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:unloadArg - description: policy to upgrade the drivers displayName: UpgradePolicy path: driver.upgradePolicy diff --git a/helm-charts-k8s/crds/deviceconfig-crd.yaml b/helm-charts-k8s/crds/deviceconfig-crd.yaml index 79a6d043..804ff577 100644 --- a/helm-charts-k8s/crds/deviceconfig-crd.yaml +++ b/helm-charts-k8s/crds/deviceconfig-crd.yaml @@ -526,6 +526,31 @@ spec: type: string type: object type: array + kernelModuleConfig: + description: advanced arguments, parameters and more configs to + manage tne driver + properties: + loadArgs: + description: LoadArg are the arguments when modprobe is executed + to load the kernel module. The command will be `modprobe ${Args} + module_name`. + items: + type: string + type: array + parameters: + description: Parameters is being used for modprobe commands. + The command will be `modprobe ${Args} module_name ${Parameters}`. + items: + type: string + type: array + unloadArgs: + description: UnloadArg are the arguments when modprobe is executed + to unload the kernel module. The command will be `modprobe + -r ${Args} module_name`. + items: + type: string + type: array + type: object upgradePolicy: description: policy to upgrade the drivers properties: diff --git a/helm-charts-openshift/crds/deviceconfig-crd.yaml b/helm-charts-openshift/crds/deviceconfig-crd.yaml index 79a6d043..804ff577 100644 --- a/helm-charts-openshift/crds/deviceconfig-crd.yaml +++ b/helm-charts-openshift/crds/deviceconfig-crd.yaml @@ -526,6 +526,31 @@ spec: type: string type: object type: array + kernelModuleConfig: + description: advanced arguments, parameters and more configs to + manage tne driver + properties: + loadArgs: + description: LoadArg are the arguments when modprobe is executed + to load the kernel module. The command will be `modprobe ${Args} + module_name`. + items: + type: string + type: array + parameters: + description: Parameters is being used for modprobe commands. + The command will be `modprobe ${Args} module_name ${Parameters}`. + items: + type: string + type: array + unloadArgs: + description: UnloadArg are the arguments when modprobe is executed + to unload the kernel module. The command will be `modprobe + -r ${Args} module_name`. + items: + type: string + type: array + type: object upgradePolicy: description: policy to upgrade the drivers properties: diff --git a/internal/kmmmodule/kmmmodule.go b/internal/kmmmodule/kmmmodule.go index 3a82813e..f318acab 100644 --- a/internal/kmmmodule/kmmmodule.go +++ b/internal/kmmmodule/kmmmodule.go @@ -458,7 +458,7 @@ func setKMMModuleLoader(ctx context.Context, mod *kmmv1beta1.Module, devConfig * Modprobe: kmmv1beta1.ModprobeSpec{ ModuleName: moduleName, FirmwarePath: firmwarePath, - Args: &kmmv1beta1.ModprobeArgs{}, + Args: getModprobeArgs(devConfig), Parameters: getModprobeParametersFromNodeInfo(nodes, devConfig), ModulesLoadingOrder: modLoadingOrder, }, @@ -715,6 +715,17 @@ func GetVersionLabelKV(devConfig *amdv1alpha1.DeviceConfig) (string, string) { return fmt.Sprintf(kmmNodeVersionLabelTemplate, devConfig.Namespace, devConfig.Name), devConfig.Spec.Driver.Version } +func getModprobeArgs(devCfg *amdv1alpha1.DeviceConfig) *kmmv1beta1.ModprobeArgs { + args := &kmmv1beta1.ModprobeArgs{} + if len(devCfg.Spec.Driver.KernelModuleConfig.LoadArgs) > 0 { + args.Load = devCfg.Spec.Driver.KernelModuleConfig.LoadArgs + } + if len(devCfg.Spec.Driver.KernelModuleConfig.UnloadArgs) > 0 { + args.Unload = devCfg.Spec.Driver.KernelModuleConfig.UnloadArgs + } + return args +} + func setKMMDevicePlugin(mod *kmmv1beta1.Module, devConfig *amdv1alpha1.DeviceConfig) { devicePluginImage := devConfig.Spec.DevicePlugin.DevicePluginImage if devicePluginImage == "" { @@ -760,6 +771,11 @@ func getNodeSelector(devConfig *amdv1alpha1.DeviceConfig) map[string]string { } func getModprobeParametersFromNodeInfo(nodes *v1.NodeList, devConfig *amdv1alpha1.DeviceConfig) []string { + // if users specified any modprobe parameters, use user provided parameters + if len(devConfig.Spec.Driver.KernelModuleConfig.Parameters) > 0 { + return devConfig.Spec.Driver.KernelModuleConfig.Parameters + } + switch devConfig.Spec.Driver.DriverType { case utils.DriverTypeContainer: // if selected nodes have VF device and the driver type is container, we need to pass specific argument to modprobe command From c125df46e9f138b56d2dc0e1ef43f0f8c8950775 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Thu, 29 May 2025 23:14:44 +0000 Subject: [PATCH 12/21] [e2e] Add new CRD fields into helm-e2e --- tests/helm-e2e/helm_e2e_test.go | 51 +++++++++++++++++---------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/tests/helm-e2e/helm_e2e_test.go b/tests/helm-e2e/helm_e2e_test.go index 7d73c368..aeddbc63 100644 --- a/tests/helm-e2e/helm_e2e_test.go +++ b/tests/helm-e2e/helm_e2e_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "github.com/ROCm/gpu-operator/api/v1alpha1" + utils "github.com/ROCm/gpu-operator/internal" ) const ( @@ -90,12 +91,12 @@ func (s *E2ESuite) upgradeHelmChart(c *C, expectErr bool, extraArgs []string) { } } -func (s *E2ESuite) verifyDefaultDeviceConfig(c *C, expect bool, +func (s *E2ESuite) verifyDefaultDeviceConfig(c *C, testName string, expect bool, expectSpec *v1alpha1.DeviceConfigSpec, verifyFunc func(expect, actual *v1alpha1.DeviceConfigSpec) bool) { devCfgList, err := s.dClient.DeviceConfigs(s.ns).List(v1.ListOptions{}) if err != nil && !k8serrors.IsNotFound(err) { - assert.NoError(c, err, "error listing DeviceConfig") + assert.NoError(c, err, fmt.Sprintf("test %v error listing DeviceConfig", testName)) } if !expect && err != nil { // default CR was removed and even CRD was removed @@ -108,24 +109,24 @@ func (s *E2ESuite) verifyDefaultDeviceConfig(c *C, expect bool, if expect && err == nil && devCfgList != nil { // make sure only one default CR exists assert.True(c, len(devCfgList.Items) == 1, - "expect only one default DeviceConfig but got %+v %+v", - len(devCfgList.Items), devCfgList.Items) + "test %v expect only one default DeviceConfig but got %+v %+v", + testName, len(devCfgList.Items), devCfgList.Items) // verify metadata assert.True(c, devCfgList.Items[0].Name == defaultDeviceConfigName, - "expect default DeviceConfig name to be %v but got %v", - defaultDeviceConfigName, devCfgList.Items[0].Name) + "test %v expect default DeviceConfig name to be %v but got %v", + testName, defaultDeviceConfigName, devCfgList.Items[0].Name) assert.True(c, devCfgList.Items[0].Namespace == s.ns, - "expect default DeviceConfig namespace to be %v but got %v", - s.ns, devCfgList.Items[0].Namespace) + "test %v expect default DeviceConfig namespace to be %v but got %v", + testName, s.ns, devCfgList.Items[0].Namespace) // verify spec if expectSpec != nil && verifyFunc != nil { assert.True(c, verifyFunc(expectSpec, &devCfgList.Items[0].Spec), - fmt.Sprintf("expect %+v got %+v", expectSpec, &devCfgList.Items[0].Spec)) + fmt.Sprintf("test %v expect %+v got %+v", testName, expectSpec, &devCfgList.Items[0].Spec)) } return } - c.Fatalf("unexpected default CR, expect %+v list error %+v devCfgList %+v", - expect, err, devCfgList) + c.Fatalf("test %v unexpected default CR, expect %+v list error %+v devCfgList %+v", + testName, expect, err, devCfgList) } func (s *E2ESuite) verifySelector(expect, actual *v1alpha1.DeviceConfigSpec) bool { @@ -164,6 +165,7 @@ func (s *E2ESuite) verifyDevicePlugin(expect, actual *v1alpha1.DeviceConfigSpec) } func (s *E2ESuite) writeYAMLToFile(yamlContent string) error { + os.Remove(tmpValuesYamlPath) file, err := os.Create(tmpValuesYamlPath) if err != nil { return err @@ -180,32 +182,32 @@ func (s *E2ESuite) TestHelmInstallDefaultCR(c *C) { // uninstall + verify default CR was removed s.installHelmChart(c, false, nil) // verify default CR was created - s.verifyDefaultDeviceConfig(c, true, nil, nil) + s.verifyDefaultDeviceConfig(c, "TestHelmInstallDefaultCR - initial install", true, nil, nil) s.uninstallHelmChart(c, false, nil) // verify default CR was removed - s.verifyDefaultDeviceConfig(c, false, nil, nil) + s.verifyDefaultDeviceConfig(c, "TestHelmInstallDefaultCR - uninstall", false, nil, nil) } func (s *E2ESuite) TestHelmUpgradeDefaultCR(c *C) { s.installHelmChart(c, false, []string{"--set", "crds.defaultCR.install=false"}) // verify default CR was not created when disabled by --set - s.verifyDefaultDeviceConfig(c, false, nil, nil) + s.verifyDefaultDeviceConfig(c, "TestHelmUpgradeDefaultCR - initial install", false, nil, nil) s.upgradeHelmChart(c, false, nil) // verify that by default helm upgrade won't deploy default CR - s.verifyDefaultDeviceConfig(c, false, nil, nil) + s.verifyDefaultDeviceConfig(c, "TestHelmUpgradeDefaultCR - initial upgrade", false, nil, nil) s.upgradeHelmChart(c, false, []string{"--set", "crds.defaultCR.upgrade=true"}) // helm upgrade with --set to turn on crds.defaultCR.upgrade will deploy default CR - s.verifyDefaultDeviceConfig(c, true, nil, nil) + s.verifyDefaultDeviceConfig(c, "TestHelmUpgradeDefaultCR - upgrade to deploy default CR", true, nil, nil) s.uninstallHelmChart(c, false, nil) - s.verifyDefaultDeviceConfig(c, false, nil, nil) + s.verifyDefaultDeviceConfig(c, "TestHelmUpgradeDefaultCR - 1st uninstall", false, nil, nil) s.installHelmChart(c, false, nil) - s.verifyDefaultDeviceConfig(c, true, nil, nil) + s.verifyDefaultDeviceConfig(c, "TestHelmUpgradeDefaultCR - 2nd install", true, nil, nil) s.upgradeHelmChart(c, false, nil) // verify that default ugprade won't affect the existing default CR - s.verifyDefaultDeviceConfig(c, true, nil, nil) + s.verifyDefaultDeviceConfig(c, "TestHelmUpgradeDefaultCR - 2nd upgrade", true, nil, nil) s.uninstallHelmChart(c, false, nil) - s.verifyDefaultDeviceConfig(c, false, nil, nil) + s.verifyDefaultDeviceConfig(c, "TestHelmUpgradeDefaultCR - initial uninstall", false, nil, nil) } func (s *E2ESuite) TestHelmRenderDefaultCR(c *C) { @@ -314,9 +316,10 @@ deviceConfig: expectDefaultCR: true, expectSpec: &v1alpha1.DeviceConfigSpec{ Driver: v1alpha1.DriverSpec{ - Enable: &boolTrue, - Blacklist: &boolTrue, - Image: "test.io/username/repo", + Enable: &boolTrue, + DriverType: utils.DriverTypeContainer, + Blacklist: &boolTrue, + Image: "test.io/username/repo", ImageRegistrySecret: &corev1.LocalObjectReference{ Name: "pull-secret", }, @@ -878,6 +881,6 @@ deviceConfig: if tc.expectHelmCommandErr { continue } - s.verifyDefaultDeviceConfig(c, tc.expectDefaultCR, tc.expectSpec, tc.verifyFunc) + s.verifyDefaultDeviceConfig(c, tc.description, tc.expectDefaultCR, tc.expectSpec, tc.verifyFunc) } } From 99f750f1bf771fbdfa62b114bcf41f7d62667889 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Wed, 21 May 2025 08:49:19 +0000 Subject: [PATCH 13/21] [Feature] Configurable vfio binding config --- api/v1alpha1/deviceconfig_types.go | 11 ++++++++++ api/v1alpha1/zz_generated.deepcopy.go | 21 +++++++++++++++++++ ...md-gpu-operator.clusterserviceversion.yaml | 6 ++++++ bundle/manifests/amd.com_deviceconfigs.yaml | 13 ++++++++++++ config/crd/bases/amd.com_deviceconfigs.yaml | 13 ++++++++++++ ...md-gpu-operator.clusterserviceversion.yaml | 6 ++++++ helm-charts-k8s/crds/deviceconfig-crd.yaml | 13 ++++++++++++ .../crds/deviceconfig-crd.yaml | 13 ++++++++++++ internal/controllers/workermgr/workermgr.go | 6 ++++++ 9 files changed, 102 insertions(+) diff --git a/api/v1alpha1/deviceconfig_types.go b/api/v1alpha1/deviceconfig_types.go index 8725a4d4..40a689be 100644 --- a/api/v1alpha1/deviceconfig_types.go +++ b/api/v1alpha1/deviceconfig_types.go @@ -88,6 +88,11 @@ type RegistryTLS struct { InsecureSkipTLSVerify *bool `json:"insecureSkipTLSVerify,omitempty"` } +type VFIOConfigSpec struct { + // list of PCI device IDs to load into vfio-pci driver. default is the list of AMD GPU PF/VF PCI device IDs based on driver type vf-passthrough/pf-passthrough. + DeviceIDs []string `json:"deviceIDs,omitempty"` +} + type DriverSpec struct { // enable driver install. default value is true. // disable is for skipping driver install/uninstall for dryrun or using in-tree amdgpu kernel module @@ -104,6 +109,12 @@ type DriverSpec struct { // +kubebuilder:default=container DriverType string `json:"driverType,omitempty"` + // vfio config + // specify the specific configs for binding PCI devices to vfio-pci kernel module, applies for driver type vf-passthrough and pf-passthrough + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="VFIOConfig",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:vfioConfig"} + // +optional + VFIOConfig VFIOConfigSpec `json:"vfioConfig,omitempty"` + // advanced arguments, parameters and more configs to manage tne driver //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="KernelModuleConfig",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:kernelModuleConfig"} // +optional diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index e1cab307..8538730d 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -342,6 +342,7 @@ func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { *out = new(bool) **out = **in } + in.VFIOConfig.DeepCopyInto(&out.VFIOConfig) in.KernelModuleConfig.DeepCopyInto(&out.KernelModuleConfig) if in.Blacklist != nil { in, out := &in.Blacklist, &out.Blacklist @@ -849,3 +850,23 @@ func (in *UtilsContainerSpec) DeepCopy() *UtilsContainerSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VFIOConfigSpec) DeepCopyInto(out *VFIOConfigSpec) { + *out = *in + if in.DeviceIDs != nil { + in, out := &in.DeviceIDs, &out.DeviceIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VFIOConfigSpec. +func (in *VFIOConfigSpec) DeepCopy() *VFIOConfigSpec { + if in == nil { + return nil + } + out := new(VFIOConfigSpec) + in.DeepCopyInto(out) + return out +} diff --git a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml index f5f82154..2594e27b 100644 --- a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml +++ b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml @@ -435,6 +435,12 @@ spec: path: driver.version x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:version + - description: vfio config specify the specific configs for binding PCI devices + to vfio-pci kernel module, applies for driver type vf-passthrough and pf-passthrough + displayName: VFIOConfig + path: driver.vfioConfig + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:vfioConfig - description: metrics exporter displayName: MetricsExporter path: metricsExporter diff --git a/bundle/manifests/amd.com_deviceconfigs.yaml b/bundle/manifests/amd.com_deviceconfigs.yaml index c88c90b4..03898d32 100644 --- a/bundle/manifests/amd.com_deviceconfigs.yaml +++ b/bundle/manifests/amd.com_deviceconfigs.yaml @@ -628,6 +628,19 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string + vfioConfig: + description: |- + vfio config + specify the specific configs for binding PCI devices to vfio-pci kernel module, applies for driver type vf-passthrough and pf-passthrough + properties: + deviceIDs: + description: list of PCI device IDs to load into vfio-pci + driver. default is the list of AMD GPU PF/VF PCI device + IDs based on driver type vf-passthrough/pf-passthrough. + items: + type: string + type: array + type: object type: object metricsExporter: description: metrics exporter diff --git a/config/crd/bases/amd.com_deviceconfigs.yaml b/config/crd/bases/amd.com_deviceconfigs.yaml index 92a5b7a3..3afb8bcf 100644 --- a/config/crd/bases/amd.com_deviceconfigs.yaml +++ b/config/crd/bases/amd.com_deviceconfigs.yaml @@ -624,6 +624,19 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string + vfioConfig: + description: |- + vfio config + specify the specific configs for binding PCI devices to vfio-pci kernel module, applies for driver type vf-passthrough and pf-passthrough + properties: + deviceIDs: + description: list of PCI device IDs to load into vfio-pci + driver. default is the list of AMD GPU PF/VF PCI device + IDs based on driver type vf-passthrough/pf-passthrough. + items: + type: string + type: array + type: object type: object metricsExporter: description: metrics exporter diff --git a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml index 504a798f..fc1ae27d 100644 --- a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml @@ -406,6 +406,12 @@ spec: path: driver.version x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:version + - description: vfio config specify the specific configs for binding PCI devices + to vfio-pci kernel module, applies for driver type vf-passthrough and pf-passthrough + displayName: VFIOConfig + path: driver.vfioConfig + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:vfioConfig - description: metrics exporter displayName: MetricsExporter path: metricsExporter diff --git a/helm-charts-k8s/crds/deviceconfig-crd.yaml b/helm-charts-k8s/crds/deviceconfig-crd.yaml index 804ff577..869a44bc 100644 --- a/helm-charts-k8s/crds/deviceconfig-crd.yaml +++ b/helm-charts-k8s/crds/deviceconfig-crd.yaml @@ -631,6 +631,19 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string + vfioConfig: + description: |- + vfio config + specify the specific configs for binding PCI devices to vfio-pci kernel module, applies for driver type vf-passthrough and pf-passthrough + properties: + deviceIDs: + description: list of PCI device IDs to load into vfio-pci driver. + default is the list of AMD GPU PF/VF PCI device IDs based + on driver type vf-passthrough/pf-passthrough. + items: + type: string + type: array + type: object type: object metricsExporter: description: metrics exporter diff --git a/helm-charts-openshift/crds/deviceconfig-crd.yaml b/helm-charts-openshift/crds/deviceconfig-crd.yaml index 804ff577..869a44bc 100644 --- a/helm-charts-openshift/crds/deviceconfig-crd.yaml +++ b/helm-charts-openshift/crds/deviceconfig-crd.yaml @@ -631,6 +631,19 @@ spec: version of the drivers source code, can be used as part of image of dockerfile source image default value for different OS is: ubuntu: 6.1.3, coreOS: 6.2.2 type: string + vfioConfig: + description: |- + vfio config + specify the specific configs for binding PCI devices to vfio-pci kernel module, applies for driver type vf-passthrough and pf-passthrough + properties: + deviceIDs: + description: list of PCI device IDs to load into vfio-pci driver. + default is the list of AMD GPU PF/VF PCI device IDs based + on driver type vf-passthrough/pf-passthrough. + items: + type: string + type: array + type: object type: object metricsExporter: description: metrics exporter diff --git a/internal/controllers/workermgr/workermgr.go b/internal/controllers/workermgr/workermgr.go index 4a1c81f5..f0d45b80 100644 --- a/internal/controllers/workermgr/workermgr.go +++ b/internal/controllers/workermgr/workermgr.go @@ -199,8 +199,14 @@ func (w *workerMgr) getPodName(devConfig *amdv1alpha1.DeviceConfig, nodeName str func (w *workerMgr) getVFIOCommand(cmd string, devConfig *v1alpha1.DeviceConfig) string { switch devConfig.Spec.Driver.DriverType { case utils.DriverTypeVFPassthrough: + if len(devConfig.Spec.Driver.VFIOConfig.DeviceIDs) > 0 { + return strings.ReplaceAll(cmd, pciDeviceIDTemplate, strings.Join(devConfig.Spec.Driver.VFIOConfig.DeviceIDs, " ")) + } return strings.ReplaceAll(cmd, pciDeviceIDTemplate, strings.Join(utils.DefaultVFDeviceIDs, " ")) case utils.DriverTypePFPassthrough: + if len(devConfig.Spec.Driver.VFIOConfig.DeviceIDs) > 0 { + return strings.ReplaceAll(cmd, pciDeviceIDTemplate, strings.Join(devConfig.Spec.Driver.VFIOConfig.DeviceIDs, " ")) + } return strings.ReplaceAll(cmd, pciDeviceIDTemplate, strings.Join(utils.DefaultPFDeviceIDs, " ")) default: // unlikely happen From 4a4a67488df62ff63d92bee0fd1de03aecc65eb8 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Mon, 2 Jun 2025 20:04:16 +0000 Subject: [PATCH 14/21] [DOC] Add docs for KubeVirt integration --- docs/kubevirt/kubevirt.md | 265 ++++++++++++++++++++++++++++++++++++++ docs/sphinx/_toc.yml | 3 + docs/sphinx/_toc.yml.in | 3 + 3 files changed, 271 insertions(+) create mode 100644 docs/kubevirt/kubevirt.md diff --git a/docs/kubevirt/kubevirt.md b/docs/kubevirt/kubevirt.md new file mode 100644 index 00000000..765b9eeb --- /dev/null +++ b/docs/kubevirt/kubevirt.md @@ -0,0 +1,265 @@ +# KubeVirt Integration + +## Overview + +The AMD GPU Operator now supports integration with [**KubeVirt**](https://kubevirt.io/), enabling virtual machines (VMs) running in Kubernetes to access AMD Instinct GPUs. This feature extends GPU acceleration capabilities beyond containers to virtualized workloads, making it ideal for hybrid environments that require both containerized and VM-based compute. + +## Key Benefits + +- **GPU Passthrough for VMs**: Assign AMD GPUs directly to KubeVirt-managed VMs. +- **Unified GPU Management**: Use the same operator to manage GPU resources for both containers and VMs. +- **Enhanced Workload Flexibility**: Run specialized workloads in VMs while leveraging GPU acceleration. + +## Prerequisites + +- Kubernetes v1.29.0+ with KubeVirt installed. +- VF-Passthrough requires [AMD MxGPU GIM Driver](https://github.com/amd/MxGPU-Virtualization) supported GPUs. +- VF-Passthrough requires that host should be configured properly to support SR-IOV (Single Root I/O Virtualization) related features by following [GIM driver documentation](https://instinct.docs.amd.com/projects/virt-drv/en/latest/index.html). +- Both VF-Passthrough and PF-Passthrough requires the host operating system has `vfio` related kernel module ready to use. + +## Configure KubeVirt + +After properly installing the KubeVirt, there will be a KubeVirt custom resource installed as well, several configs are required to do in order to enable the AMD GPU Physical Function (PF) and Virtual Function (VF) to be used by KubeVirt. + +1. Enable the `HostDevices` feature gate. +2. Add the PF or VF PCI device information to the host devices permitted list. + +For example, in order to add MI300X VF: +```yaml +$ kubectl get kubevirt -n kubevirt kubevirt -oyaml +apiVersion: kubevirt.io/v1 +kind: KubeVirt +metadata: + name: kubevirt + namespace: kubevirt +spec: + configuration: + developerConfiguration: + featureGates: + - HostDevices + permittedHostDevices: + pciHostDevices: + - externalResourceProvider: true + pciVendorSelector: 1002:74b5 + resourceName: amd.com/gpu +``` + +## Configure GPU Operator + +To enable KubeVirt support during installation, please consider using VF-Passthrough or PF-Passthrough then configure the `DeviceConfig` custom resource properly under different scenarios: + +### VF-Passthrough + +In order to bring up guest VM with VF based GPU-Passthrough, [AMD MxGPU GIM Driver](https://github.com/amd/MxGPU-Virtualization) needs to be installed on the GPU hosts. + +#### Use inbox/pre-installed GIM driver + +If you already prepared the GPU hosts with GIM driver pre-installed and want to directly use it, you don't have to ask AMD GPU Operator to install it for you: + +1. Disable the out-of-tree driver management in `DeviceConfig`: +```yaml +spec: + driver: + enable: false +``` + +2. Make sure the AMD GPU VF on your host is already bound to `vfio-pci` kernel module. +```bash +$ lspci -nnk | grep 1002 -A 3 +85:00.0 Processing accelerators [1200]: Advanced Micro Devices, Inc. [AMD/ATI] Aqua Vanjaram [Instinct MI300X] [1002:74a1] + Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Aqua Vanjaram [Instinct MI300X] [1002:74a1] + Kernel driver in use: gim # PF device is being used by GIM driver to generate VF + Kernel modules: amdgpu +85:02.0 Processing accelerators [1200]: Advanced Micro Devices, Inc. [AMD/ATI] Aqua Vanjaram [Instinct MI300X VF] [1002:74b5] + Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Device [1002:74a1] + Kernel driver in use: vfio-pci # VF device is bound to vfio-pci kernel module for passthrough + Kernel modules: amdgpu +``` + +3. Verify that the VF has been advertised as a resource by device plugin: +```yaml +$ kubectl get node -oyaml | grep -i allocatable -A 5 + allocatable: + amd.com/gpu: "1" +``` + +#### Use out-of-tree GIM driver installed by GPU Operator + +If you don't have GIM driver installed on the GPU hosts, AMD GPU Operator can help you install the out-of-tree GIM kernel module to your hosts and automatically bind the VF devices to the `vfio-pci` kernel module to make it ready for passthrough: + +1. Enable the out-of-tree driver management in `DeviceConfig`: +```yaml +spec: + driver: + # enable out-of-tree driver management + enable: true + + # specify GIM driver version (https://github.com/amd/MxGPU-Virtualization/releases) + version: "8.1.0.K" + + # specify the driver type as vf-passthrough + driverType: vf-passthrough + + # specify the VF device IDs you want to bind to vfio-pci + # by default all the latest AMD Instinct GPU VF deviceIDs will be utilized to detect VF and bind to vfio-pci + #vfioConfig: + # deviceIDs: + # - 74b5 # MI300X VF + # - 7410 # MI210 VF + + # Specify your driver image repository here + # DO NOT include the image tag as AMD GPU Operator will automatically manage the image tag for you + # e.g. docker.io/username/amdgpu-driver + image: docker.io/username/gim-driver-image + + # Specify the credential for your private registry if it requires credential to get pull/push access + # you can create the docker-registry type secret by running command like: + # kubectl create secret docker-registry mySecret -n KMM-NameSpace --docker-server=https://index.docker.io/v1/ --docker-username=xxx --docker-password=xxx + # Make sure you created the secret within the namespace that KMM operator is running + imageRegistrySecret: + name: my-pull-secret +``` + +2. Verify that the worker node is labeled with proper driver type and vfio ready labels: +```yaml +$ kubectl get node -oyaml | grep operator.amd + gpu.operator.amd.com/kube-amd-gpu.test-deviceconfig.driver: vf-passthrough + gpu.operator.amd.com/kube-amd-gpu.test-deviceconfig.vfio.ready: "" +``` + +3. Verify that the AMD GPU VF on your host is bound to `vfio-pci` kernel module. +```bash +$ lspci -nnk | grep 1002 -A 3 +85:00.0 Processing accelerators [1200]: Advanced Micro Devices, Inc. [AMD/ATI] Aqua Vanjaram [Instinct MI300X] [1002:74a1] + Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Aqua Vanjaram [Instinct MI300X] [1002:74a1] + Kernel driver in use: gim # PF device is being used by GIM driver to generate VF + Kernel modules: amdgpu +85:02.0 Processing accelerators [1200]: Advanced Micro Devices, Inc. [AMD/ATI] Aqua Vanjaram [Instinct MI300X VF] [1002:74b5] + Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Device [1002:74a1] + Kernel driver in use: vfio-pci # VF device is bound to vfio-pci kernel module for passthrough + Kernel modules: amdgpu +``` + +4. Verify that the VF has been advertised as a resource by device plugin: +```yaml +$ kubectl get node -oyaml | grep -i allocatable -A 5 + allocatable: + amd.com/gpu: "1" +``` + +### PF-Passthrough + +In order to bring up guest VM with PF based GPU-Passthrough, you don't have to install [AMD MxGPU GIM Driver](https://github.com/amd/MxGPU-Virtualization) on the GPU hosts. However, binding the PF device to `vfio-pci` kernel module is still required. + +#### Use your own method to manage the PF-Passthrough + +If you are using your own method to manage the PF device and it is already bound with `vfio-pci`, please: + +1. Disable the driver management of AMD GPU Operator: +```yaml +spec: + driver: + enable: false +``` + +2. Verify that the AMD GPU PF on your host is already bound to `vfio-pci` kernel module. +```bash +$ lspci -nnk | grep 1002 -A 3 +85:00.0 Processing accelerators [1200]: Advanced Micro Devices, Inc. [AMD/ATI] Aqua Vanjaram [Instinct MI300X] [1002:74a1] + Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Aqua Vanjaram [Instinct MI300X] [1002:74a1] + Kernel driver in use: vfio-pci # PF device is bound to vfio-pci + Kernel modules: amdgpu +``` + +3. Verify that the PF has been advertised as a resource by device plugin: +```yaml +$ kubectl get node -oyaml | grep -i allocatable -A 5 + allocatable: + amd.com/gpu: "1" +``` + +#### Use AMD GPU Operator to manage PF-Passthrough vfio binding +The AMD GPU Operator can help you bind the AMD GPU PF device to the `vfio-pci` kernel module on all the selected GPU hosts: + +1. Configure the `DeviceConfig` custom resource to use PF-Passthrough: +```yaml +spec: + driver: + # enable out-of-tree driver management + enable: true + + # specify the driver type as pf-passthrough + driverType: pf-passthrough + + # specify the PF device IDs you want to bind to vfio-pci + # by default all the latest AMD Instinct GPU PF deviceIDs will be utilized to detect PF and bind to vfio-pci + #vfioConfig: + # deviceIDs: + # - 74a1 # MI300X PF + # - 740f # MI210 PF +``` + +2. Verify that the worker node is labeled with proper driver type and vfio ready labels: +```yaml +$ kubectl get node -oyaml | grep operator.amd + gpu.operator.amd.com/kube-amd-gpu.test-deviceconfig.driver: pf-passthrough + gpu.operator.amd.com/kube-amd-gpu.test-deviceconfig.vfio.ready: "" +``` + +3. Verify that the AMD GPU PF on your host is bound to `vfio-pci` kernel module. +```bash +$ lspci -nnk | grep 1002 -A 3 +85:00.0 Processing accelerators [1200]: Advanced Micro Devices, Inc. [AMD/ATI] Aqua Vanjaram [Instinct MI300X] [1002:74a1] + Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Aqua Vanjaram [Instinct MI300X] [1002:74a1] + Kernel driver in use: vfio-pci # PF device is bound to vfio-pci + Kernel modules: amdgpu +``` + +4. Verify that the PF has been advertised as a resource by device plugin: +```yaml +$ kubectl get node -oyaml | grep -i allocatable -A 5 + allocatable: + amd.com/gpu: "1" +``` + +## Create Guest VM + +After verifying that the PF or VF devices have been advertised by device plugin successfully, you can start to deploy the guest VMs by creating KubeVirt custom resource. By specifying the host devices into the `VirtualMachine` or `VirtualMachineInstance` definition, the guest VM would be scheduled on the GPU host where the requested GPU resources are available. Here is an example: + +```yaml +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +... +spec: + template: + spec: + domain: + devices: + hostDevices: + - deviceName: amd.com/gpu + name: gpu1 +... +``` + +Once the KubeVirt custom resource was created, you can check its status by running these commands to make sure they are scheduled and ready: + +```bash +kubectl get vm +kubectl get vmi +``` + +After `VirtualMachineInstance` became scheduled and ready, it doesn't mean that the guest VM has been fully launched and ready to use, you may need to wait for extra time for the guest VM to be fully ready and accessible. You can check the status of the VM logs by fetching the logs from container guest-console-log. + +```bash +kubectl logs virt-launcher-ubuntu2204-lbc7f -c guest-console-log +``` + +## Verify Guest VM + +Once the VM was up and ready to use, login into the guest VM with the credentials you specified, then verify the list of available PCI devices to make sure the GPU was passed through into the guest VM. In this example the MI300X VF has been successfully passed into the guest VM. + +```bash +$ lspci -nnkk | grep -i 1002 -A 1 +09:00.0 Processing accelerators [1200]: Advanced Micro Devices, Inc. [AMD/ATI] Device [1002:74b5] + Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Device [1002:74a1] +``` diff --git a/docs/sphinx/_toc.yml b/docs/sphinx/_toc.yml index 92d2920d..8a9ca595 100644 --- a/docs/sphinx/_toc.yml +++ b/docs/sphinx/_toc.yml @@ -59,6 +59,9 @@ subtrees: - file: dcm/device-config-manager-configmap - file: dcm/applying-partition-profiles - file: dcm/systemd_integration + - caption: KubeVirt + entries: + - file: kubevirt/kubevirt - caption: Specialized Networks entries: - file: specialized_networks/airgapped-install diff --git a/docs/sphinx/_toc.yml.in b/docs/sphinx/_toc.yml.in index 92d2920d..8a9ca595 100644 --- a/docs/sphinx/_toc.yml.in +++ b/docs/sphinx/_toc.yml.in @@ -59,6 +59,9 @@ subtrees: - file: dcm/device-config-manager-configmap - file: dcm/applying-partition-profiles - file: dcm/systemd_integration + - caption: KubeVirt + entries: + - file: kubevirt/kubevirt - caption: Specialized Networks entries: - file: specialized_networks/airgapped-install From 71b69ce54592bafd26c5e4e774d2051a18e41540 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Tue, 3 Jun 2025 23:17:55 +0000 Subject: [PATCH 15/21] [Feature] Add KubeVirt related CRD fields in Helm default CR --- .../template-patch/default-deviceconfig.yaml | 30 ++++++++++++++ .../templates/default-deviceconfig.yaml | 30 ++++++++++++++ tests/helm-e2e/helm_e2e_test.go | 40 +++++++++++++++++-- 3 files changed, 97 insertions(+), 3 deletions(-) diff --git a/hack/k8s-patch/template-patch/default-deviceconfig.yaml b/hack/k8s-patch/template-patch/default-deviceconfig.yaml index d50069fa..a5cbaf7b 100644 --- a/hack/k8s-patch/template-patch/default-deviceconfig.yaml +++ b/hack/k8s-patch/template-patch/default-deviceconfig.yaml @@ -25,6 +25,36 @@ spec: blacklist: {{ .blacklist }} {{- end }} + {{- with .driverType }} + driverType: {{ . }} + {{- end }} + + {{- with .vfioConfig }} + vfioConfig: + {{- with .deviceIDs }} + deviceIDs: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + + {{- with .kernelModuleConfig }} + kernelModuleConfig: + {{- with .loadArgs }} + loadArgs: + {{- toYaml . | nindent 8 }} + {{- end }} + + {{- with .unloadArgs }} + unloadArgs: + {{- toYaml . | nindent 8 }} + {{- end }} + + {{- with .parameters }} + parameters: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- with .image }} image: {{ . }} {{- end }} diff --git a/helm-charts-k8s/templates/default-deviceconfig.yaml b/helm-charts-k8s/templates/default-deviceconfig.yaml index d50069fa..a5cbaf7b 100644 --- a/helm-charts-k8s/templates/default-deviceconfig.yaml +++ b/helm-charts-k8s/templates/default-deviceconfig.yaml @@ -25,6 +25,36 @@ spec: blacklist: {{ .blacklist }} {{- end }} + {{- with .driverType }} + driverType: {{ . }} + {{- end }} + + {{- with .vfioConfig }} + vfioConfig: + {{- with .deviceIDs }} + deviceIDs: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + + {{- with .kernelModuleConfig }} + kernelModuleConfig: + {{- with .loadArgs }} + loadArgs: + {{- toYaml . | nindent 8 }} + {{- end }} + + {{- with .unloadArgs }} + unloadArgs: + {{- toYaml . | nindent 8 }} + {{- end }} + + {{- with .parameters }} + parameters: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- with .image }} image: {{ . }} {{- end }} diff --git a/tests/helm-e2e/helm_e2e_test.go b/tests/helm-e2e/helm_e2e_test.go index aeddbc63..b6129bea 100644 --- a/tests/helm-e2e/helm_e2e_test.go +++ b/tests/helm-e2e/helm_e2e_test.go @@ -204,7 +204,7 @@ func (s *E2ESuite) TestHelmUpgradeDefaultCR(c *C) { s.installHelmChart(c, false, nil) s.verifyDefaultDeviceConfig(c, "TestHelmUpgradeDefaultCR - 2nd install", true, nil, nil) s.upgradeHelmChart(c, false, nil) - // verify that default ugprade won't affect the existing default CR + // verify that default upgrade won't affect the existing default CR s.verifyDefaultDeviceConfig(c, "TestHelmUpgradeDefaultCR - 2nd upgrade", true, nil, nil) s.uninstallHelmChart(c, false, nil) s.verifyDefaultDeviceConfig(c, "TestHelmUpgradeDefaultCR - initial uninstall", false, nil, nil) @@ -262,6 +262,22 @@ deviceConfig: driver: enable: true blacklist: true + driverType: container + vfioConfig: + deviceIDs: + - 74a1 + - 740f + kernelModuleConfig: + loadArgs: + - arg1=val1 + - arg2=val2 + unloadArgs: + - unloadArg1=unloadVal1 + - unloadArg2=unloadVal2 + parameters: + - parameter1=val1 + - parameter2=val2 + - parameter3=val3 image: "test.io/username/repo" imageRegistrySecret: name: pull-secret @@ -318,8 +334,26 @@ deviceConfig: Driver: v1alpha1.DriverSpec{ Enable: &boolTrue, DriverType: utils.DriverTypeContainer, - Blacklist: &boolTrue, - Image: "test.io/username/repo", + VFIOConfig: v1alpha1.VFIOConfigSpec{ + DeviceIDs: []string{"74a1", "740f"}, + }, + KernelModuleConfig: v1alpha1.KernelModuleConfigSpec{ + LoadArgs: []string{ + "arg1=val1", + "arg2=val2", + }, + UnloadArgs: []string{ + "unloadArg1=unloadVal1", + "unloadArg2=unloadVal2", + }, + Parameters: []string{ + "parameter1=val1", + "parameter2=val2", + "parameter3=val3", + }, + }, + Blacklist: &boolTrue, + Image: "test.io/username/repo", ImageRegistrySecret: &corev1.LocalObjectReference{ Name: "pull-secret", }, From d31241aa678d415c43fd0db311ce33bed7868d61 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Thu, 5 Jun 2025 01:45:22 +0000 Subject: [PATCH 16/21] [Fix] Fix DeviceConfig operand counter out-of-sync with Daemonset --- internal/controllers/watchers/daemonset.go | 44 +++++----------------- 1 file changed, 9 insertions(+), 35 deletions(-) diff --git a/internal/controllers/watchers/daemonset.go b/internal/controllers/watchers/daemonset.go index fadabad5..5c5e5a0e 100644 --- a/internal/controllers/watchers/daemonset.go +++ b/internal/controllers/watchers/daemonset.go @@ -159,48 +159,28 @@ func (h *DaemonsetEventHandler) patchDeviceConfigNodeStatus(ctx context.Context, err := h.client.Get(ctx, types.NamespacedName{Name: devConfigName, Namespace: ds.Namespace}, devConfig) if err != nil && !k8serrors.IsNotFound(err) { logger.Error(err, "cannot get DeviceConfig for handling daemonset event", - "namesace", ds.Namespace, "name", ds.Name) + "namespace", ds.Namespace, "name", ds.Name) return err } - latestDS := &v1.DaemonSet{} - err = h.client.Get(ctx, types.NamespacedName{Name: ds.Name, Namespace: ds.Namespace}, latestDS) - if err != nil && !k8serrors.IsNotFound(err) { - logger.Error(err, "cannot fetch daemonset for handling daemonset event", - "namesace", ds.Namespace, "name", ds.Name) - return err - } - // if err == nil the latest status counter will be pushed to DeviceConfig - // OR if err == NotFound, zero counter values will be pushed to DeviceConfig - devConfigCopy := devConfig.DeepCopy() - update := false switch { - case strings.HasSuffix(latestDS.Name, utils.MetricsExporterNameSuffix): - update = h.handleMetricsExporterStatus(latestDS, devConfig) - case strings.HasSuffix(latestDS.Name, utils.DevicePluginNameSuffix): - update = h.handleDevicePluginStatus(latestDS, devConfig) + case strings.HasSuffix(ds.Name, utils.MetricsExporterNameSuffix): + h.handleMetricsExporterStatus(ds, devConfig) + case strings.HasSuffix(ds.Name, utils.DevicePluginNameSuffix): + h.handleDevicePluginStatus(ds, devConfig) } - if update { - err = h.client.Status().Patch(ctx, devConfig, client.MergeFrom(devConfigCopy)) - if err != nil && !k8serrors.IsNotFound(err) { - logger.Error(err, "cannot patch DeviceConfig status") - } - return err + err = h.client.Status().Patch(ctx, devConfig, client.MergeFrom(devConfigCopy)) + if err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, "cannot patch DeviceConfig status") } - return nil + return err }); err != nil { logger.Error(err, fmt.Sprintf("failed to patch device config status for daemonset %+v", ds.Name)) } } func (h *DaemonsetEventHandler) handleMetricsExporterStatus(ds *v1.DaemonSet, devConfig *v1alpha1.DeviceConfig) bool { - if devConfig.Status.MetricsExporter.AvailableNumber == ds.Status.NumberAvailable && - devConfig.Status.MetricsExporter.NodesMatchingSelectorNumber == ds.Status.NumberAvailable && - devConfig.Status.MetricsExporter.DesiredNumber == ds.Status.DesiredNumberScheduled { - // if there is nothing to update, skip the patch operation - return false - } devConfig.Status.MetricsExporter.AvailableNumber = ds.Status.NumberAvailable devConfig.Status.MetricsExporter.NodesMatchingSelectorNumber = ds.Status.NumberAvailable devConfig.Status.MetricsExporter.DesiredNumber = ds.Status.DesiredNumberScheduled @@ -208,12 +188,6 @@ func (h *DaemonsetEventHandler) handleMetricsExporterStatus(ds *v1.DaemonSet, de } func (h *DaemonsetEventHandler) handleDevicePluginStatus(ds *v1.DaemonSet, devConfig *v1alpha1.DeviceConfig) bool { - if devConfig.Status.DevicePlugin.AvailableNumber == ds.Status.NumberAvailable && - devConfig.Status.DevicePlugin.NodesMatchingSelectorNumber == ds.Status.NumberAvailable && - devConfig.Status.DevicePlugin.DesiredNumber == ds.Status.DesiredNumberScheduled { - // if there is nothing to update, skip the patch operation - return false - } devConfig.Status.DevicePlugin.AvailableNumber = ds.Status.NumberAvailable devConfig.Status.DevicePlugin.NodesMatchingSelectorNumber = ds.Status.NumberAvailable devConfig.Status.DevicePlugin.DesiredNumber = ds.Status.DesiredNumberScheduled From 873784568c04371b23ec1992e2d41d0d7dc91a18 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Tue, 3 Jun 2025 20:31:15 +0000 Subject: [PATCH 17/21] [DOC] Provide more details on KubeVirt host configuration --- docs/kubevirt/kubevirt.md | 99 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 98 insertions(+), 1 deletion(-) diff --git a/docs/kubevirt/kubevirt.md b/docs/kubevirt/kubevirt.md index 765b9eeb..ef2e0f8a 100644 --- a/docs/kubevirt/kubevirt.md +++ b/docs/kubevirt/kubevirt.md @@ -15,7 +15,63 @@ The AMD GPU Operator now supports integration with [**KubeVirt**](https://kubevi - Kubernetes v1.29.0+ with KubeVirt installed. - VF-Passthrough requires [AMD MxGPU GIM Driver](https://github.com/amd/MxGPU-Virtualization) supported GPUs. - VF-Passthrough requires that host should be configured properly to support SR-IOV (Single Root I/O Virtualization) related features by following [GIM driver documentation](https://instinct.docs.amd.com/projects/virt-drv/en/latest/index.html). -- Both VF-Passthrough and PF-Passthrough requires the host operating system has `vfio` related kernel module ready to use. +- Both VF-Passthrough and PF-Passthrough require the host operating system to have `vfio`-related kernel modules ready for use. + +## Host Configuration + +### BIOS Setting + +You need to set up System BIOS to enable the virtualization related features. For example, sample System BIOS settings will look like this (depending on vendor and BIOS version): + +* SR-IOV Support: Enable this option in the Advanced → PCI Subsystem Settings page. + +* Above 4G Decoding: Enable this option in the Advanced → PCI Subsystem Settings page. + +* PCIe ARI Support: Enable this option in the Advanced → PCI Subsystem Settings page. + +* IOMMU: Enable this option in the Advanced → NB Configuration page. + +* ACS Enabled: Enable this option in the Advanced → NB Configuration page. + +### GRUB Config Update + +* Edit GRUB Configuration File: +Use a text editor to modify the /etc/default/grub file (Following example uses “nano” text editor). Open the terminal and run the following command: +```bash +sudo nano /etc/default/grub +``` + +* Modify the `GRUB_CMDLINE_LINUX` Line: +Look for the line that begins with `GRUB_CMDLINE_LINUX`. Modify it to include following parameters, : +```bash +GRUB_CMDLINE_LINUX="modprobe.blacklist=amdgpu iommu=on amd_iommu=on" +``` +If there are already parameters in the quotes, append your new parameters separated by spaces. +```{note} +Note: In case host machine is running Intel CPU, replace `amd_iommu` with `intel_iommu`. +``` + +* After modifying the configuration file, you need to update the GRUB settings by running the following command: +```bash +sudo update-grub +``` + +* Reboot Your System: +For the changes to take effect, reboot your system using the following command: +```bash +sudo reboot +``` + +* Verifying changes: +After the system reboots, confirm that the GRUB parameters were applied successfully by running: +```bash +cat /proc/cmdline +``` +When you run the command above, you should see a line that includes: +```bash +modprobe.blacklist=amdgpu iommu=on amd_iommu=on +``` +This indicates that your changes have been applied correctly.  ## Configure KubeVirt @@ -222,6 +278,47 @@ $ kubectl get node -oyaml | grep -i allocatable -A 5 amd.com/gpu: "1" ``` + +## GPU Operator Components + +### Device Plugin + +The Device Plugin is responsible for discovering AMD GPU devices and advertising them to Kubernetes for scheduling GPU workloads. It supports: + +- **Container workloads**: Standard GPU usage for containerized applications. +- **VF Passthrough**: Virtual Function passthrough using SR-IOV enabled by the AMD GIM driver. All VFs are advertised under the resource name `amd.com/gpu`. The number of GPUs advertised corresponds to the number of unique IOMMU groups these VFs belong to, ensuring VMs are allocated VFs from distinct IOMMU groups for proper isolation. + - *MI210 Specifics*: For MI210-based nodes, VF assignment to a VM is restricted by its XGMI fabric architecture. VFs are grouped into "hives" (typically 4 VFs per hive). A VM can be assigned 1, 2, or 4 VFs from a single hive, or all 8 VFs from both hives. +- **PF Passthrough**: Physical Function passthrough using the VFIO kernel module for exclusive GPU access. All PFs are advertised under the resource name `amd.com/gpu`. + +The Device Plugin assumes homogeneous nodes, meaning a node is configured to operate in a single mode: container, vf-passthrough, or pf-passthrough. All discoverable GPU resources on that node will be of the same type. + +The Device Plugin uses automatic mode detection. If no explicit operational mode is specified using the `driver_type` command-line argument, it inspects the system setup (such as the presence of /dev/kfd, virtfn* symlinks, or driver bindings) and selects the appropriate mode (container, vf-passthrough, or pf-passthrough) accordingly. This simplifies deployment and reduces manual configuration requirements. + +### Node Labeler + +The Node Labeler automatically assigns meaningful labels to Kubernetes nodes to reflect GPU device capabilities, operational modes, and other essential details. These labels are crucial for scheduling and managing GPU resources effectively, especially in KubeVirt passthrough scenarios. + +A new label, `amd.com/gpu.mode` (and its beta counterpart `beta.amd.com/gpu.mode`), has been introduced to specify the GPU operational mode (container, vf-passthrough, or pf-passthrough) on the node. Existing labels such as `amd.com/gpu.device-id` (and `beta.amd.com/gpu.device-id`) and `amd.com/gpu.driver-version` (used for containerized workloads) have been extended to support VF and PF passthrough modes. Note that `amd.com/gpu.driver-version` is not applicable in `pf-passthrough` mode as the driver is not managed by the operator in this scenario. + +Similar to the Device Plugin, the Node Labeler can auto-detect the operational mode based on node configuration, or it can be explicitly set using the `driver-type` command-line argument. + +Key labels for PF and VF passthrough modes are listed below. Placeholders like ``, ``, ``, and `` represent actual device IDs (e.g., `74a1`, `74b5`), device counts, and GIM driver versions (e.g., `8.1.0.K`) respectively. + +**PF Passthrough Mode Labels:** +- `amd.com/gpu.mode=pf-passthrough` +- `beta.amd.com/gpu.mode=pf-passthrough` +- `amd.com/gpu.device-id=` +- `beta.amd.com/gpu.device-id=` +- `beta.amd.com/gpu.device-id.=` + +**VF Passthrough Mode Labels:** +- `amd.com/gpu.mode=vf-passthrough` +- `beta.amd.com/gpu.mode=vf-passthrough` +- `amd.com/gpu.device-id=` +- `beta.amd.com/gpu.device-id=` +- `beta.amd.com/gpu.device-id.=` +- `amd.com/gpu.driver-version=` + ## Create Guest VM After verifying that the PF or VF devices have been advertised by device plugin successfully, you can start to deploy the guest VMs by creating KubeVirt custom resource. By specifying the host devices into the `VirtualMachine` or `VirtualMachineInstance` definition, the guest VM would be scheduled on the GPU host where the requested GPU resources are available. Here is an example: From 6173ed369d235e455687c182813e83ed818e80d2 Mon Sep 17 00:00:00 2001 From: Yan Sun Date: Tue, 17 Jun 2025 14:39:19 -0700 Subject: [PATCH 18/21] [Helm] Add MI350X and MI355X PF and VF device ID into NFD rule (#777) --- internal/utils.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/utils.go b/internal/utils.go index 43922be3..db4e81f1 100644 --- a/internal/utils.go +++ b/internal/utils.go @@ -86,8 +86,12 @@ var ( "74b9", // MI325X VF "7461", // Radeon Pro V710 MxGPU "73ae", // Radeon Pro V620 MxGPU + "75b0", // MI350X + "75b3", // MI355X } DefaultPFDeviceIDs = []string{ + "75a3", // MI355X + "75a0", // MI350X "74a5", // MI325X "74a2", // MI308X "74b6", // MI308X From a29309ad6574afd2d8346a50d326471509ccb4fa Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Wed, 21 May 2025 10:22:53 +0000 Subject: [PATCH 19/21] [Build] Optimize makefile to auto remove outdated binary tools --- Makefile | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/Makefile b/Makefile index 617a33c6..cfbc2d57 100644 --- a/Makefile +++ b/Makefile @@ -481,6 +481,21 @@ rm -f $(1); \ fi endef +# remove-wrong-version-tool will use $1 $2 to check binary version +# any binary with mismatched version compared to $3 will be removed +# 1 - Path to the binary +# 2 - Version argument (e.g., --version) +# 3 - Expected version string (e.g., v0.17.0) +define remove-wrong-version-tool +@if [ -f $(1) ]; then \ +version_output=`$(1) $(2) 2>/dev/null || echo "not found"`; \ +echo "$$version_output" | grep -q $(3) || { \ +echo "Incorrect version ($$version_output), removing $(1)"; \ +rm -f $(1); \ +}; \ +fi +endef + OPERATOR_SDK = $(shell pwd)/bin/operator-sdk OPERATOR_SDK_VERSION=v1.32.0 .PHONY: operator-sdk From 6b7d7580b14efdb252816d256c7bcc7d97f225a8 Mon Sep 17 00:00:00 2001 From: yansun1996 Date: Wed, 13 Aug 2025 23:43:12 +0000 Subject: [PATCH 20/21] [Build] resolve conflicts for opensourcing kubevirt related commits --- ...md-gpu-operator.clusterserviceversion.yaml | 12 ++--- bundle/manifests/amd.com_deviceconfigs.yaml | 50 +++++++++---------- config/crd/bases/amd.com_deviceconfigs.yaml | 50 +++++++++---------- ...md-gpu-operator.clusterserviceversion.yaml | 10 ++-- helm-charts-k8s/Chart.lock | 2 +- helm-charts-k8s/crds/deviceconfig-crd.yaml | 50 +++++++++---------- helm-charts-openshift/Chart.lock | 2 +- .../crds/deviceconfig-crd.yaml | 50 +++++++++---------- 8 files changed, 113 insertions(+), 113 deletions(-) diff --git a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml index 2594e27b..42d02d53 100644 --- a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml +++ b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml @@ -32,7 +32,7 @@ metadata: capabilities: Seamless Upgrades categories: AI/Machine Learning,Monitoring containerImage: docker.io/rocm/gpu-operator:v1.2.0 - createdAt: "2025-08-09T01:44:36Z" + createdAt: "2025-08-13T23:39:41Z" description: |- Operator responsible for deploying AMD GPU kernel drivers, device plugin, device test runner and device metrics exporter For more information, visit [documentation](https://instinct.docs.amd.com/projects/gpu-operator/en/latest/) @@ -354,11 +354,6 @@ spec: path: driver.imageSign.keySecret x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:imageSignKeySecret - - description: tolerations for kmm module object - displayName: Tolerations - path: driver.tolerations - x-descriptors: - - urn:alm:descriptor:com.amd.deviceconfigs:tolerations - description: advanced arguments, parameters and more configs to manage tne driver displayName: KernelModuleConfig @@ -383,6 +378,11 @@ spec: path: driver.kernelModuleConfig.unloadArgs x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:unloadArg + - description: tolerations for kmm module object + displayName: Tolerations + path: driver.tolerations + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:tolerations - description: policy to upgrade the drivers displayName: UpgradePolicy path: driver.upgradePolicy diff --git a/bundle/manifests/amd.com_deviceconfigs.yaml b/bundle/manifests/amd.com_deviceconfigs.yaml index 03898d32..84f36d2d 100644 --- a/bundle/manifests/amd.com_deviceconfigs.yaml +++ b/bundle/manifests/amd.com_deviceconfigs.yaml @@ -484,6 +484,31 @@ spec: type: object x-kubernetes-map-type: atomic type: object + kernelModuleConfig: + description: advanced arguments, parameters and more configs to + manage tne driver + properties: + loadArgs: + description: LoadArg are the arguments when modprobe is executed + to load the kernel module. The command will be `modprobe + ${Args} module_name`. + items: + type: string + type: array + parameters: + description: Parameters is being used for modprobe commands. + The command will be `modprobe ${Args} module_name ${Parameters}`. + items: + type: string + type: array + unloadArgs: + description: UnloadArg are the arguments when modprobe is + executed to unload the kernel module. The command will be + `modprobe -r ${Args} module_name`. + items: + type: string + type: array + type: object tolerations: description: tolerations for kmm module object items: @@ -523,31 +548,6 @@ spec: type: string type: object type: array - kernelModuleConfig: - description: advanced arguments, parameters and more configs to - manage tne driver - properties: - loadArgs: - description: LoadArg are the arguments when modprobe is executed - to load the kernel module. The command will be `modprobe - ${Args} module_name`. - items: - type: string - type: array - parameters: - description: Parameters is being used for modprobe commands. - The command will be `modprobe ${Args} module_name ${Parameters}`. - items: - type: string - type: array - unloadArgs: - description: UnloadArg are the arguments when modprobe is - executed to unload the kernel module. The command will be - `modprobe -r ${Args} module_name`. - items: - type: string - type: array - type: object upgradePolicy: description: policy to upgrade the drivers properties: diff --git a/config/crd/bases/amd.com_deviceconfigs.yaml b/config/crd/bases/amd.com_deviceconfigs.yaml index 3afb8bcf..342b1eed 100644 --- a/config/crd/bases/amd.com_deviceconfigs.yaml +++ b/config/crd/bases/amd.com_deviceconfigs.yaml @@ -480,6 +480,31 @@ spec: type: object x-kubernetes-map-type: atomic type: object + kernelModuleConfig: + description: advanced arguments, parameters and more configs to + manage tne driver + properties: + loadArgs: + description: LoadArg are the arguments when modprobe is executed + to load the kernel module. The command will be `modprobe + ${Args} module_name`. + items: + type: string + type: array + parameters: + description: Parameters is being used for modprobe commands. + The command will be `modprobe ${Args} module_name ${Parameters}`. + items: + type: string + type: array + unloadArgs: + description: UnloadArg are the arguments when modprobe is + executed to unload the kernel module. The command will be + `modprobe -r ${Args} module_name`. + items: + type: string + type: array + type: object tolerations: description: tolerations for kmm module object items: @@ -519,31 +544,6 @@ spec: type: string type: object type: array - kernelModuleConfig: - description: advanced arguments, parameters and more configs to - manage tne driver - properties: - loadArgs: - description: LoadArg are the arguments when modprobe is executed - to load the kernel module. The command will be `modprobe - ${Args} module_name`. - items: - type: string - type: array - parameters: - description: Parameters is being used for modprobe commands. - The command will be `modprobe ${Args} module_name ${Parameters}`. - items: - type: string - type: array - unloadArgs: - description: UnloadArg are the arguments when modprobe is - executed to unload the kernel module. The command will be - `modprobe -r ${Args} module_name`. - items: - type: string - type: array - type: object upgradePolicy: description: policy to upgrade the drivers properties: diff --git a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml index fc1ae27d..8477fbd0 100644 --- a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml @@ -325,11 +325,6 @@ spec: path: driver.imageSign.keySecret x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:imageSignKeySecret - - description: tolerations for kmm module object - displayName: Tolerations - path: driver.tolerations - x-descriptors: - - urn:alm:descriptor:com.amd.deviceconfigs:tolerations - description: advanced arguments, parameters and more configs to manage tne driver displayName: KernelModuleConfig @@ -354,6 +349,11 @@ spec: path: driver.kernelModuleConfig.unloadArgs x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:unloadArg + - description: tolerations for kmm module object + displayName: Tolerations + path: driver.tolerations + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:tolerations - description: policy to upgrade the drivers displayName: UpgradePolicy path: driver.upgradePolicy diff --git a/helm-charts-k8s/Chart.lock b/helm-charts-k8s/Chart.lock index 6d62e1c8..e409fa85 100644 --- a/helm-charts-k8s/Chart.lock +++ b/helm-charts-k8s/Chart.lock @@ -6,4 +6,4 @@ dependencies: repository: file://./charts/kmm version: v1.0.0 digest: sha256:f9a315dd2ce3d515ebf28c8e9a6a82158b493ca2686439ec381487761261b597 -generated: "2025-08-09T01:44:10.510383817Z" +generated: "2025-08-13T23:39:27.892020259Z" diff --git a/helm-charts-k8s/crds/deviceconfig-crd.yaml b/helm-charts-k8s/crds/deviceconfig-crd.yaml index 869a44bc..597f6ecc 100644 --- a/helm-charts-k8s/crds/deviceconfig-crd.yaml +++ b/helm-charts-k8s/crds/deviceconfig-crd.yaml @@ -487,6 +487,31 @@ spec: type: object x-kubernetes-map-type: atomic type: object + kernelModuleConfig: + description: advanced arguments, parameters and more configs to + manage tne driver + properties: + loadArgs: + description: LoadArg are the arguments when modprobe is executed + to load the kernel module. The command will be `modprobe ${Args} + module_name`. + items: + type: string + type: array + parameters: + description: Parameters is being used for modprobe commands. + The command will be `modprobe ${Args} module_name ${Parameters}`. + items: + type: string + type: array + unloadArgs: + description: UnloadArg are the arguments when modprobe is executed + to unload the kernel module. The command will be `modprobe + -r ${Args} module_name`. + items: + type: string + type: array + type: object tolerations: description: tolerations for kmm module object items: @@ -526,31 +551,6 @@ spec: type: string type: object type: array - kernelModuleConfig: - description: advanced arguments, parameters and more configs to - manage tne driver - properties: - loadArgs: - description: LoadArg are the arguments when modprobe is executed - to load the kernel module. The command will be `modprobe ${Args} - module_name`. - items: - type: string - type: array - parameters: - description: Parameters is being used for modprobe commands. - The command will be `modprobe ${Args} module_name ${Parameters}`. - items: - type: string - type: array - unloadArgs: - description: UnloadArg are the arguments when modprobe is executed - to unload the kernel module. The command will be `modprobe - -r ${Args} module_name`. - items: - type: string - type: array - type: object upgradePolicy: description: policy to upgrade the drivers properties: diff --git a/helm-charts-openshift/Chart.lock b/helm-charts-openshift/Chart.lock index 8294f2bb..5fd9280d 100644 --- a/helm-charts-openshift/Chart.lock +++ b/helm-charts-openshift/Chart.lock @@ -6,4 +6,4 @@ dependencies: repository: file://./charts/kmm version: v1.0.0 digest: sha256:25200c34a5cc846a1275e5bf3fc637b19e909dc68de938189c5278d77d03f5ac -generated: "2025-08-09T01:44:30.971839872Z" +generated: "2025-08-13T23:39:39.216809884Z" diff --git a/helm-charts-openshift/crds/deviceconfig-crd.yaml b/helm-charts-openshift/crds/deviceconfig-crd.yaml index 869a44bc..597f6ecc 100644 --- a/helm-charts-openshift/crds/deviceconfig-crd.yaml +++ b/helm-charts-openshift/crds/deviceconfig-crd.yaml @@ -487,6 +487,31 @@ spec: type: object x-kubernetes-map-type: atomic type: object + kernelModuleConfig: + description: advanced arguments, parameters and more configs to + manage tne driver + properties: + loadArgs: + description: LoadArg are the arguments when modprobe is executed + to load the kernel module. The command will be `modprobe ${Args} + module_name`. + items: + type: string + type: array + parameters: + description: Parameters is being used for modprobe commands. + The command will be `modprobe ${Args} module_name ${Parameters}`. + items: + type: string + type: array + unloadArgs: + description: UnloadArg are the arguments when modprobe is executed + to unload the kernel module. The command will be `modprobe + -r ${Args} module_name`. + items: + type: string + type: array + type: object tolerations: description: tolerations for kmm module object items: @@ -526,31 +551,6 @@ spec: type: string type: object type: array - kernelModuleConfig: - description: advanced arguments, parameters and more configs to - manage tne driver - properties: - loadArgs: - description: LoadArg are the arguments when modprobe is executed - to load the kernel module. The command will be `modprobe ${Args} - module_name`. - items: - type: string - type: array - parameters: - description: Parameters is being used for modprobe commands. - The command will be `modprobe ${Args} module_name ${Parameters}`. - items: - type: string - type: array - unloadArgs: - description: UnloadArg are the arguments when modprobe is executed - to unload the kernel module. The command will be `modprobe - -r ${Args} module_name`. - items: - type: string - type: array - type: object upgradePolicy: description: policy to upgrade the drivers properties: From f4fce6044d429325ddda0127f7ba3168d8d45bd8 Mon Sep 17 00:00:00 2001 From: vm Date: Thu, 17 Jul 2025 08:59:21 +0000 Subject: [PATCH 21/21] GPU Operator Integration with Remediation Workflows using Argo Workflows --- Makefile | 9 +- api/v1alpha1/deviceconfig_types.go | 22 + api/v1alpha1/zz_generated.deepcopy.go | 26 + ...md-gpu-operator.clusterserviceversion.yaml | 24 +- bundle/manifests/amd.com_deviceconfigs.yaml | 30 + cmd/main.go | 2 + config/crd/bases/amd.com_deviceconfigs.yaml | 30 + ...md-gpu-operator.clusterserviceversion.yaml | 22 + docs/autoremediation/auto-remediation.md | 187 + docs/sphinx/_toc.yml | 3 + docs/sphinx/_toc.yml.in | 3 + go.mod | 30 +- go.sum | 83 +- .../template-patch/deployment.yaml | 6 + .../metadata-patch/Chart.yaml | 5 + .../metadata-patch/values.yaml | 2 + .../template-patch/deployment.yaml | 3023 + hack/k8s-patch/metadata-patch/Chart.yaml | 6 +- hack/k8s-patch/metadata-patch/values.yaml | 8 + hack/k8s-patch/template-patch/argo-rbac.yaml | 46 + hack/k8s-patch/template-patch/deployment.yaml | 3 + helm-charts-k8s/Chart.lock | 7 +- helm-charts-k8s/Chart.yaml | 6 +- helm-charts-k8s/README.md | 5 +- .../charts/kmm/templates/deployment.yaml | 6 + helm-charts-k8s/charts/remediation/Chart.yaml | 5 + .../remediation/templates/deployment.yaml | 3023 + .../charts/remediation/values.yaml | 2 + helm-charts-k8s/crds/deviceconfig-crd.yaml | 29 + helm-charts-k8s/templates/argo-rbac.yaml | 46 + helm-charts-k8s/templates/deployment.yaml | 3 + helm-charts-k8s/values.yaml | 8 + helm-charts-openshift/Chart.lock | 2 +- .../crds/deviceconfig-crd.yaml | 29 + .../controllers/device_config_reconciler.go | 106 +- .../device_config_reconciler_test.go | 12 +- .../mock_device_config_reconciler.go | 29 + .../controllers/mock_remediation_handler.go | 315 + internal/controllers/remediation_handler.go | 972 + internal/kmmmodule/kmmmodule.go | 5 + internal/kmmmodule/kmmmodule_test.go | 10 + internal/metricsexporter/metricsexporter.go | 9 + tests/e2e/cluster_test.go | 281 +- tests/e2e/doc.go | 2 + tests/e2e/e2e_test.go | 7 + .../argoproj/argo-workflows/v3/LICENSE | 202 + .../argo-workflows/v3/errors/errors.go | 168 + .../v3/pkg/apis/workflow/common.go | 53 + .../v3/pkg/apis/workflow/register.go | 41 + .../v3/pkg/apis/workflow/v1alpha1/amount.go | 33 + .../pkg/apis/workflow/v1alpha1/anystring.go | 52 + .../v1alpha1/artifact_gc_task_types.go | 63 + .../v1alpha1/artifact_repository_types.go | 181 + .../cluster_workflow_template_types.go | 63 + .../v3/pkg/apis/workflow/v1alpha1/common.go | 74 + .../v1alpha1/container_set_template_types.go | 142 + .../workflow/v1alpha1/cron_workflow_types.go | 224 + .../pkg/apis/workflow/v1alpha1/data_types.go | 40 + .../v3/pkg/apis/workflow/v1alpha1/doc.go | 5 + .../workflow/v1alpha1/estimated_duration.go | 14 + .../pkg/apis/workflow/v1alpha1/event_types.go | 48 + .../apis/workflow/v1alpha1/generated.pb.go | 48009 ++++++++++++++++ .../apis/workflow/v1alpha1/generated.proto | 2292 + .../workflow/v1alpha1/generated.swagger.json | 15 + .../pkg/apis/workflow/v1alpha1/http_types.go | 62 + .../v3/pkg/apis/workflow/v1alpha1/info.go | 25 + .../v3/pkg/apis/workflow/v1alpha1/item.go | 119 + .../v3/pkg/apis/workflow/v1alpha1/label.go | 11 + .../v3/pkg/apis/workflow/v1alpha1/marshall.go | 86 + .../apis/workflow/v1alpha1/object_types.go | 24 + .../workflow/v1alpha1/openapi_generated.go | 8669 +++ .../apis/workflow/v1alpha1/plugin_types.go | 29 + .../v3/pkg/apis/workflow/v1alpha1/progress.go | 54 + .../v3/pkg/apis/workflow/v1alpha1/register.go | 56 + .../workflow/v1alpha1/task_result_types.go | 23 + .../apis/workflow/v1alpha1/task_set_types.go | 42 + .../v3/pkg/apis/workflow/v1alpha1/utils.go | 20 + .../workflow/v1alpha1/validation_utils.go | 118 + .../apis/workflow/v1alpha1/version_types.go | 30 + .../apis/workflow/v1alpha1/workflow_phase.go | 22 + .../v1alpha1/workflow_template_types.go | 62 + .../apis/workflow/v1alpha1/workflow_types.go | 3947 ++ .../v1alpha1/zz_generated.deepcopy.go | 4385 ++ .../client/clientset/versioned/clientset.go | 81 + .../v3/pkg/client/clientset/versioned/doc.go | 4 + .../client/clientset/versioned/scheme/doc.go | 4 + .../clientset/versioned/scheme/register.go | 40 + .../v1alpha1/clusterworkflowtemplate.go | 152 + .../typed/workflow/v1alpha1/cronworkflow.go | 162 + .../versioned/typed/workflow/v1alpha1/doc.go | 4 + .../workflow/v1alpha1/generated_expansion.go | 19 + .../typed/workflow/v1alpha1/workflow.go | 162 + .../workflow/v1alpha1/workflow_client.go | 108 + .../v1alpha1/workflowartifactgctask.go | 179 + .../workflow/v1alpha1/workfloweventbinding.go | 162 + .../workflow/v1alpha1/workflowtaskresult.go | 162 + .../workflow/v1alpha1/workflowtaskset.go | 179 + .../workflow/v1alpha1/workflowtemplate.go | 162 + .../argo-workflows/v3/util/context/context.go | 35 + .../v3/util/deprecation/deprecation.go | 56 + .../argo-workflows/v3/util/json/fix.go | 11 + .../argo-workflows/v3/util/json/json.go | 36 + .../argo-workflows/v3/util/json/jsonify.go | 12 + .../github.com/evanphx/json-patch/README.md | 4 +- vendor/github.com/evanphx/json-patch/patch.go | 46 +- .../golang/protobuf/descriptor/descriptor.go | 180 + .../golang/protobuf/jsonpb/decode.go | 531 + .../golang/protobuf/jsonpb/encode.go | 560 + .../github.com/golang/protobuf/jsonpb/json.go | 69 + .../protoc-gen-go/descriptor/descriptor.pb.go | 324 + .../golang/protobuf/ptypes/any/any.pb.go | 62 + .../protobuf/ptypes/duration/duration.pb.go | 63 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 + .../protobuf/ptypes/wrappers/wrappers.pb.go | 71 + .../gorilla/websocket/.editorconfig | 20 + .../github.com/gorilla/websocket/.gitignore | 26 +- .../gorilla/websocket/.golangci.yml | 3 + vendor/github.com/gorilla/websocket/AUTHORS | 9 - vendor/github.com/gorilla/websocket/LICENSE | 39 +- vendor/github.com/gorilla/websocket/Makefile | 34 + vendor/github.com/gorilla/websocket/README.md | 19 +- vendor/github.com/gorilla/websocket/client.go | 44 +- .../gorilla/websocket/compression.go | 9 +- vendor/github.com/gorilla/websocket/conn.go | 83 +- vendor/github.com/gorilla/websocket/mask.go | 4 + vendor/github.com/gorilla/websocket/proxy.go | 17 +- vendor/github.com/gorilla/websocket/server.go | 42 +- .../gorilla/websocket/tls_handshake.go | 3 - .../gorilla/websocket/tls_handshake_116.go | 21 - vendor/github.com/gorilla/websocket/util.go | 19 +- .../gorilla/websocket/x_net_proxy.go | 473 - .../grpc-ecosystem/grpc-gateway/LICENSE.txt | 27 + .../grpc-gateway/internal/BUILD.bazel | 23 + .../grpc-gateway/internal/errors.pb.go | 189 + .../grpc-gateway/internal/errors.proto | 26 + .../grpc-gateway/runtime/BUILD.bazel | 85 + .../grpc-gateway/runtime/context.go | 291 + .../grpc-gateway/runtime/convert.go | 318 + .../grpc-gateway/runtime/doc.go | 5 + .../grpc-gateway/runtime/errors.go | 186 + .../grpc-gateway/runtime/fieldmask.go | 89 + .../grpc-gateway/runtime/handler.go | 212 + .../runtime/marshal_httpbodyproto.go | 43 + .../grpc-gateway/runtime/marshal_json.go | 45 + .../grpc-gateway/runtime/marshal_jsonpb.go | 262 + .../grpc-gateway/runtime/marshal_proto.go | 62 + .../grpc-gateway/runtime/marshaler.go | 55 + .../runtime/marshaler_registry.go | 99 + .../grpc-gateway/runtime/mux.go | 300 + .../grpc-gateway/runtime/pattern.go | 262 + .../grpc-gateway/runtime/proto2_convert.go | 80 + .../grpc-gateway/runtime/proto_errors.go | 106 + .../grpc-gateway/runtime/query.go | 406 + .../grpc-gateway/utilities/BUILD.bazel | 21 + .../grpc-gateway/utilities/doc.go | 2 + .../grpc-gateway/utilities/pattern.go | 22 + .../grpc-gateway/utilities/readerfactory.go | 20 + .../grpc-gateway/utilities/trie.go | 177 + .../github.com/imdario/mergo/CONTRIBUTING.md | 112 + vendor/github.com/imdario/mergo/README.md | 5 +- vendor/github.com/imdario/mergo/SECURITY.md | 14 + vendor/github.com/imdario/mergo/map.go | 6 +- vendor/github.com/imdario/mergo/merge.go | 59 +- vendor/github.com/imdario/mergo/mergo.go | 11 +- .../testify/assert/assertion_compare.go | 35 +- .../testify/assert/assertion_format.go | 34 +- .../testify/assert/assertion_forward.go | 68 +- .../testify/assert/assertion_order.go | 10 +- .../stretchr/testify/assert/assertions.go | 157 +- .../testify/assert/yaml/yaml_custom.go | 25 + .../testify/assert/yaml/yaml_default.go | 37 + .../stretchr/testify/assert/yaml/yaml_fail.go | 18 + vendor/golang.org/x/oauth2/oauth2.go | 8 +- vendor/golang.org/x/oauth2/pkce.go | 4 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 3 +- vendor/golang.org/x/sync/errgroup/go120.go | 13 - .../golang.org/x/sync/errgroup/pre_go120.go | 14 - vendor/golang.org/x/sys/unix/auxv.go | 36 + .../golang.org/x/sys/unix/auxv_unsupported.go | 13 + .../golang.org/x/sys/unix/syscall_solaris.go | 87 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 20 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 4 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../x/sys/unix/zsyscall_solaris_amd64.go | 114 + .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 4 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 6 +- vendor/golang.org/x/text/language/parse.go | 2 +- vendor/google.golang.org/genproto/LICENSE | 202 + .../genproto/googleapis/api/LICENSE | 202 + .../googleapis/api/httpbody/httpbody.pb.go | 235 + .../genproto/googleapis/rpc/LICENSE | 202 + .../googleapis/rpc/status/status.pb.go | 203 + .../protobuf/field_mask/field_mask.go | 23 + vendor/google.golang.org/grpc/AUTHORS | 1 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/NOTICE.txt | 13 + .../grpc/codes/code_string.go | 111 + vendor/google.golang.org/grpc/codes/codes.go | 250 + .../grpc/connectivity/connectivity.go | 94 + .../grpc/grpclog/component.go | 117 + .../google.golang.org/grpc/grpclog/grpclog.go | 132 + .../google.golang.org/grpc/grpclog/logger.go | 87 + .../grpc/grpclog/loggerv2.go | 258 + .../grpc/internal/experimental.go | 28 + .../grpc/internal/grpclog/grpclog.go | 126 + .../grpc/internal/grpclog/prefixLogger.go | 93 + .../grpc/internal/internal.go | 239 + .../grpc/internal/status/status.go | 205 + .../grpc/internal/tcp_keepalive_others.go | 29 + .../grpc/internal/tcp_keepalive_unix.go | 54 + .../grpc/internal/tcp_keepalive_windows.go | 54 + .../grpc/metadata/metadata.go | 300 + .../grpc/serviceconfig/serviceconfig.go | 44 + .../google.golang.org/grpc/status/status.go | 162 + .../protobuf/encoding/protojson/decode.go | 685 + .../protobuf/encoding/protojson/doc.go | 11 + .../protobuf/encoding/protojson/encode.go | 380 + .../encoding/protojson/well_known_types.go | 880 + .../protobuf/internal/encoding/json/decode.go | 340 + .../internal/encoding/json/decode_number.go | 254 + .../internal/encoding/json/decode_string.go | 91 + .../internal/encoding/json/decode_token.go | 192 + .../protobuf/internal/encoding/json/encode.go | 278 + .../protobuf/protoadapt/convert.go | 31 + .../types/known/durationpb/duration.pb.go | 357 + .../types/known/fieldmaskpb/field_mask.pb.go | 571 + .../types/known/wrapperspb/wrappers.pb.go | 623 + .../k8s.io/cli-runtime/pkg/resource/mapper.go | 2 +- vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go | 7 +- vendor/k8s.io/kubectl/pkg/drain/drain.go | 4 +- vendor/k8s.io/kubectl/pkg/drain/filters.go | 2 +- vendor/k8s.io/kubectl/pkg/scheme/install.go | 3 +- .../kubectl/de_DE/LC_MESSAGES/k8s.po | 4 +- .../kubectl/default/LC_MESSAGES/k8s.po | 4 +- .../kubectl/en_US/LC_MESSAGES/k8s.po | 4 +- .../kubectl/it_IT/LC_MESSAGES/k8s.po | 4 +- .../kubectl/ja_JP/LC_MESSAGES/k8s.po | 4 +- .../kubectl/pt_BR/LC_MESSAGES/k8s.po | 4 +- .../kubectl/zh_CN/LC_MESSAGES/k8s.po | 4 +- vendor/k8s.io/kubectl/pkg/util/openapi/doc.go | 2 +- vendor/modules.txt | 88 +- 268 files changed, 94546 insertions(+), 940 deletions(-) create mode 100644 docs/autoremediation/auto-remediation.md create mode 100644 hack/k8s-patch/k8s-remediation-patch/metadata-patch/Chart.yaml create mode 100644 hack/k8s-patch/k8s-remediation-patch/metadata-patch/values.yaml create mode 100644 hack/k8s-patch/k8s-remediation-patch/template-patch/deployment.yaml create mode 100644 hack/k8s-patch/template-patch/argo-rbac.yaml create mode 100644 helm-charts-k8s/charts/remediation/Chart.yaml create mode 100644 helm-charts-k8s/charts/remediation/templates/deployment.yaml create mode 100644 helm-charts-k8s/charts/remediation/values.yaml create mode 100644 helm-charts-k8s/templates/argo-rbac.yaml create mode 100644 internal/controllers/mock_remediation_handler.go create mode 100644 internal/controllers/remediation_handler.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/LICENSE create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/errors/errors.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/common.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/register.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/amount.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/anystring.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_gc_task_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cluster_workflow_template_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/common.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cron_workflow_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/data_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/doc.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/estimated_duration.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.swagger.json create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/http_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/item.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/label.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/plugin_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/progress.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/register.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_result_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_set_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/utils.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/validation_utils.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/version_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_phase.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_template_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/clientset.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/doc.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/clusterworkflowtemplate.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/cronworkflow.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/doc.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowartifactgctask.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workfloweventbinding.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskresult.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskset.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtemplate.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/context/context.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/deprecation/deprecation.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/json/fix.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go create mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go create mode 100644 vendor/github.com/golang/protobuf/descriptor/descriptor.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go create mode 100644 vendor/github.com/gorilla/websocket/.editorconfig create mode 100644 vendor/github.com/gorilla/websocket/.golangci.yml delete mode 100644 vendor/github.com/gorilla/websocket/AUTHORS create mode 100644 vendor/github.com/gorilla/websocket/Makefile delete mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go delete mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go create mode 100644 vendor/github.com/imdario/mergo/CONTRIBUTING.md create mode 100644 vendor/github.com/imdario/mergo/SECURITY.md create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go delete mode 100644 vendor/golang.org/x/sync/errgroup/go120.go delete mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go create mode 100644 vendor/golang.org/x/sys/unix/auxv.go create mode 100644 vendor/golang.org/x/sys/unix/auxv_unsupported.go create mode 100644 vendor/google.golang.org/genproto/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/api/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/grpclog/component.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/internal/experimental.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/encode.go create mode 100644 vendor/google.golang.org/protobuf/protoadapt/convert.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go diff --git a/Makefile b/Makefile index cfbc2d57..6f6900f0 100644 --- a/Makefile +++ b/Makefile @@ -70,6 +70,10 @@ ifdef SKIP_INSTALL_DEFAULT_CR SKIP_INSTALL_DEFAULT_CR_CMD=--set crds.defaultCR.install=false endif +ifdef SKIP_REMEDIATION_CONTROLLER + SKIP_REMEDIATION_CONTROLLER_CMD=--set remediation.enabled=false +endif + ################################# # OpenShift OLM Bundle varaiables # BUNDLE_IMG defines the image:tag used for the bundle. @@ -325,6 +329,9 @@ helm-k8s: helmify manifests kustomize clean-helm-k8s gen-kmm-charts-k8s ## Build # Patching k8s helm chart kmm subchart cp $(shell pwd)/hack/k8s-patch/k8s-kmm-patch/metadata-patch/*.yaml $(shell pwd)/helm-charts-k8s/charts/kmm/ cp $(shell pwd)/hack/k8s-patch/k8s-kmm-patch/template-patch/*.yaml $(shell pwd)/helm-charts-k8s/charts/kmm/templates/ + mkdir -p $(shell pwd)/helm-charts-k8s/charts/remediation/templates + cp $(shell pwd)/hack/k8s-patch/k8s-remediation-patch/metadata-patch/*.yaml $(shell pwd)/helm-charts-k8s/charts/remediation/ + cp $(shell pwd)/hack/k8s-patch/k8s-remediation-patch/template-patch/*.yaml $(shell pwd)/helm-charts-k8s/charts/remediation/templates/ cd $(shell pwd)/helm-charts-k8s; helm dependency update; helm lint; cd ..; mkdir $(shell pwd)/helm-charts-k8s/crds echo "moving crd yaml files to crds folder" @@ -590,7 +597,7 @@ helm-uninstall-openshift: helm uninstall amd-gpu-operator -n kube-amd-gpu helm-install-k8s: - helm install -f helm-charts-k8s/values.yaml amd-gpu-operator ${GPU_OPERATOR_CHART} -n kube-amd-gpu --create-namespace ${SKIP_NFD_CMD} ${SKIP_KMM_CMD} ${HELM_OC_CMD} ${SIM_ENABLE_CMD} ${SKIP_INSTALL_DEFAULT_CR_CMD} + helm install -f helm-charts-k8s/values.yaml amd-gpu-operator ${GPU_OPERATOR_CHART} -n kube-amd-gpu --create-namespace ${SKIP_NFD_CMD} ${SKIP_KMM_CMD} ${SKIP_REMEDIATION_CONTROLLER_CMD} ${HELM_OC_CMD} ${SIM_ENABLE_CMD} ${SKIP_INSTALL_DEFAULT_CR_CMD} helm-uninstall-k8s: echo "Deleting all device configs before uninstalling operator..." diff --git a/api/v1alpha1/deviceconfig_types.go b/api/v1alpha1/deviceconfig_types.go index 40a689be..a388a86d 100644 --- a/api/v1alpha1/deviceconfig_types.go +++ b/api/v1alpha1/deviceconfig_types.go @@ -75,6 +75,28 @@ type DeviceConfigSpec struct { //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Selector",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:selector"} // +optional Selector map[string]string `json:"selector,omitempty"` + + // remediation workflow + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="RemediationWorkflow",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:remediationWorkflow"} + // +optional + RemediationWorkflow RemediationWorkflowSpec `json:"remediationWorkflow,omitempty"` +} + +// RemediationWorkflowSpec defines workflows to run based on node conditions +type RemediationWorkflowSpec struct { + // enable remediation workflows. disabled by default + // enable if operator should automatically handle remediation of node incase of gpu issues + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Enable",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:enable"} + Enable *bool `json:"enable,omitempty"` + + // Name of the ConfigMap that holds condition-to-workflow mappings. + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="ConditionalWorkflows",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:conditionalWorkflows"} + ConditionalWorkflows *v1.LocalObjectReference `json:"conditionalWorkflows,omitempty"` + + // Time to live for argo workflow object and its pods for a failed workflow in hours. By default, it is set to 24 hours + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="TtlForFailedWorkflows",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:ttlForFailedWorkflows"} + // +kubebuilder:default:=24 + TtlForFailedWorkflows int `json:"ttlForFailedWorkflows,omitempty"` } type RegistryTLS struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 8538730d..7426ae46 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -213,6 +213,7 @@ func (in *DeviceConfigSpec) DeepCopyInto(out *DeviceConfigSpec) { (*out)[key] = val } } + in.RemediationWorkflow.DeepCopyInto(&out.RemediationWorkflow) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfigSpec. @@ -700,6 +701,31 @@ func (in *RegistryTLS) DeepCopy() *RegistryTLS { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemediationWorkflowSpec) DeepCopyInto(out *RemediationWorkflowSpec) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.ConditionalWorkflows != nil { + in, out := &in.ConditionalWorkflows, &out.ConditionalWorkflows + *out = new(v1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemediationWorkflowSpec. +func (in *RemediationWorkflowSpec) DeepCopy() *RemediationWorkflowSpec { + if in == nil { + return nil + } + out := new(RemediationWorkflowSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceMonitorConfig) DeepCopyInto(out *ServiceMonitorConfig) { *out = *in diff --git a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml index 42d02d53..3a3c164b 100644 --- a/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml +++ b/bundle/manifests/amd-gpu-operator.clusterserviceversion.yaml @@ -32,7 +32,7 @@ metadata: capabilities: Seamless Upgrades categories: AI/Machine Learning,Monitoring containerImage: docker.io/rocm/gpu-operator:v1.2.0 - createdAt: "2025-08-13T23:39:41Z" + createdAt: "2025-08-14T12:20:47Z" description: |- Operator responsible for deploying AMD GPU kernel drivers, device plugin, device test runner and device metrics exporter For more information, visit [documentation](https://instinct.docs.amd.com/projects/gpu-operator/en/latest/) @@ -645,6 +645,28 @@ spec: path: metricsExporter.upgradePolicy.upgradeStrategy x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:upgradeStrategy + - description: remediation workflow + displayName: RemediationWorkflow + path: remediationWorkflow + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:remediationWorkflow + - description: Name of the ConfigMap that holds condition-to-workflow mappings. + displayName: ConditionalWorkflows + path: remediationWorkflow.conditionalWorkflows + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:conditionalWorkflows + - description: enable remediation workflows. disabled by default enable if operator + should automatically handle remediation of node incase of gpu issues + displayName: Enable + path: remediationWorkflow.enable + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:enable + - description: Time to live for argo workflow object and its pods for a failed + workflow in hours. By default, it is set to 24 hours + displayName: TtlForFailedWorkflows + path: remediationWorkflow.ttlForFailedWorkflows + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:ttlForFailedWorkflows - description: Selector describes on which nodes the GPU Operator should enable the GPU device. displayName: Selector diff --git a/bundle/manifests/amd.com_deviceconfigs.yaml b/bundle/manifests/amd.com_deviceconfigs.yaml index 84f36d2d..c1e905ed 100644 --- a/bundle/manifests/amd.com_deviceconfigs.yaml +++ b/bundle/manifests/amd.com_deviceconfigs.yaml @@ -1269,6 +1269,36 @@ spec: type: string type: object type: object + remediationWorkflow: + description: remediation workflow + properties: + conditionalWorkflows: + description: Name of the ConfigMap that holds condition-to-workflow + mappings. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + enable: + description: |- + enable remediation workflows. disabled by default + enable if operator should automatically handle remediation of node incase of gpu issues + type: boolean + ttlForFailedWorkflows: + default: 24 + description: Time to live for argo workflow object and its pods + for a failed workflow in hours. By default, it is set to 24 + hours + type: integer + type: object selector: additionalProperties: type: string diff --git a/cmd/main.go b/cmd/main.go index abc38477..8b565a14 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -35,6 +35,7 @@ package main import ( "flag" + workflowv1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" kmmv1beta1 "github.com/rh-ecosystem-edge/kernel-module-management/api/v1beta1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -76,6 +77,7 @@ func init() { utilruntime.Must(kmmv1beta1.AddToScheme(scheme)) utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) utilruntime.Must(monitoringv1.AddToScheme(scheme)) + utilruntime.Must(workflowv1alpha1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } diff --git a/config/crd/bases/amd.com_deviceconfigs.yaml b/config/crd/bases/amd.com_deviceconfigs.yaml index 342b1eed..91c1723b 100644 --- a/config/crd/bases/amd.com_deviceconfigs.yaml +++ b/config/crd/bases/amd.com_deviceconfigs.yaml @@ -1265,6 +1265,36 @@ spec: type: string type: object type: object + remediationWorkflow: + description: remediation workflow + properties: + conditionalWorkflows: + description: Name of the ConfigMap that holds condition-to-workflow + mappings. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + enable: + description: |- + enable remediation workflows. disabled by default + enable if operator should automatically handle remediation of node incase of gpu issues + type: boolean + ttlForFailedWorkflows: + default: 24 + description: Time to live for argo workflow object and its pods + for a failed workflow in hours. By default, it is set to 24 + hours + type: integer + type: object selector: additionalProperties: type: string diff --git a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml index 8477fbd0..6c348776 100644 --- a/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/amd-gpu-operator.clusterserviceversion.yaml @@ -616,6 +616,28 @@ spec: path: metricsExporter.upgradePolicy.upgradeStrategy x-descriptors: - urn:alm:descriptor:com.amd.deviceconfigs:upgradeStrategy + - description: remediation workflow + displayName: RemediationWorkflow + path: remediationWorkflow + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:remediationWorkflow + - description: Name of the ConfigMap that holds condition-to-workflow mappings. + displayName: ConditionalWorkflows + path: remediationWorkflow.conditionalWorkflows + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:conditionalWorkflows + - description: enable remediation workflows. disabled by default enable if operator + should automatically handle remediation of node incase of gpu issues + displayName: Enable + path: remediationWorkflow.enable + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:enable + - description: Time to live for argo workflow object and its pods for a failed + workflow in hours. By default, it is set to 24 hours + displayName: TtlForFailedWorkflows + path: remediationWorkflow.ttlForFailedWorkflows + x-descriptors: + - urn:alm:descriptor:com.amd.deviceconfigs:ttlForFailedWorkflows - description: Selector describes on which nodes the GPU Operator should enable the GPU device. displayName: Selector diff --git a/docs/autoremediation/auto-remediation.md b/docs/autoremediation/auto-remediation.md new file mode 100644 index 00000000..3b1a2489 --- /dev/null +++ b/docs/autoremediation/auto-remediation.md @@ -0,0 +1,187 @@ +# Auto Remediation of GPU nodes using Argo Workflows + +The GPU Operator supports remediation of GPU worker nodes that have moved into an unhealthy state due to GPU problems by triggering a workflow (set of steps) which attempts to remediate the issue. To achieve this, the GPU Operator makes use of Argo Workflows and its workflow templates. Argo Workflows is a popular open-source workflow engine for Kubernetes. It is lightweight and scalable. The GPU Operator, as part of its helm installation, installs the following: + +1) Argo workflow controller as a k8s deployment +2) Argo CRDs for defining workflow templates and workflos + +GPU Operator installs Argo v3.6.5 + +The source yaml to install it is present here: https://github.com/argoproj/argo-workflows/releases/download/v3.6.5/install.yaml + +It has been modified to fit the requirements of this feature. For example, the workflow server is not necessary, so it doesn't get deployed as part of the +GPU Operator-packaged argo installation + +## About Workflows and Workflow Templates + +The workflow controller is responsible for running a workflow and managing its lifecycle. + +Argo workflows by default uses Kubernetes API Server(etcd) as its database. Once a workflow is triggered, the controller maintains the running state of the workflow and persists in the database. In case workflow controller restarts in between, we still have the state. + +A typical workflow refers a workflow template. A workflow template can either be used to define a specific work, or it can be used to orchestrate a workflow. Each task within a workflow is run inside a container. + +Creating a `workflow-template` on the cluster will store the template with its steps in k8s apiserver (etcd) but not trigger any action. +Creating a `workflow` which invokes a `workflow-template` will store the workflow in k8s apiserver(etcd) and also trigger the actual steps in the template. +GPU Operator creates the `workflow` which invokes the `workflow-template` to trigger remediation + +## Configuration to be handled by the User + +-> Toggling `RemediationWorkflow.Enable` to True. + +-> NPD daemonset is relied upon to verify that the issue is fixed during the workflow run. Hence, user needs to add this toleration to NPD daemonset so that it can continue to be scheduled during the workflow run: + + `amd-gpu-unhealthy:NoSchedule op=Exists` + +GPU Operator will handle adding this toleration for in-house components like KMM, metrics-exporter which should stay running during the workflow run + +-> If a workflow runs and fails, the node will remain in tainted state. If the user wants to go ahead and make the node schedulable again for workloads, the node should be untainted with: + `kubectl taint node amd-gpu-unhealthy:NoSchedule-` + +## How Workflows are triggered + +Node problem detector (NPD) can set the node conditions by listening to GPU health reported by device metrics exporter periodically. +GPU-Operator keeps monitoring the node conditions periodically and creates appropriate workflow based on the node condition status moving to `True`. For example, the below node condition would mean node is in a bad state: + +```yaml + - lastHeartbeatTime: "2025-08-04T08:56:04Z" + lastTransitionTime: "2025-08-04T08:56:04Z" + reason: "Temperature Threshold Exceeded" + status: "True" + type: AMDGPUUnhealthy +``` + +When the status of the node condition is `False`, it means that node condition is currently fine and in good state. +These are the new fields introduced under the RemediationWorkflow field in the DeviceConfig CR: + +```yaml + type RemediationWorkflowSpec struct { + // enable remediation workflows. disabled by default + // enable if operator should automatically handle remediation of node incase of gpu issues + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Enable",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:enable"} + Enable *bool `json:"enable,omitempty"` + + // Name of the ConfigMap that holds condition-to-workflow mappings. + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="ConditionalWorkflows",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:conditionalWorkflows"} + ConditionalWorkflows *v1.LocalObjectReference `json:"conditionalWorkflows,omitempty"` + + // Time to live for argo workflow object and its pods for a failed workflow in hours. By default, it is set to 24 hours + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="TtlForFailedWorkflows",xDescriptors={"urn:alm:descriptor:com.amd.deviceconfigs:ttlForFailedWorkflows"} + // +kubebuilder:default:=24 + TtlForFailedWorkflows int `json:"ttlForFailedWorkflows,omitempty"` + } +``` +The mappings are present in the configmap referenced by the ConditionalWorkflows field. +GPU-Operator will create the `default-conditional-workflow-mappings` configmap on the cluster with some default mappings. The user can modify them if required and can add more mappings as well. If the user wants to use this default configmap, then they may leave the `RemediationWorkflow.ConditionalWorkflows` field empty in the CR. The user can also come up with their own configmap and mention the name of the configmap under `RemediationWorkflow.ConditionalWorkflows` if they do not want to use the default `default-conditional-workflow-mappings` configmap. + +Note: `default-conditional-workflow-mappings` will be created on the cluster by GPU-Operator + +```yaml +apiVersion: v1 +kind: ConfigMap +data: + workflow: |- + - nodeCondition: "AMDGPUUnhealthy" + workflowTemplate: "default-template" + validationTestsProfile: + framework: "AGFHC" + recipe: "all_lvl4" + iterations: 1 + stopOnFailure: true + timeoutSeconds: 4800 +``` + +`NodeCondition` field refers to the node condition that the user wants the Operator to watch for and to trigger remediation workflow. + +`WorkflowTemplate` will use the default-template in most cases which is discussed below. If user wants to use his own workflow template for a certain node condition, he can create the template in the cluster and mention the name of the template in this field but the recommended way is to let Operator handle it through the default-template + +`validationTestsProfile` field refers to the AGFHC/RVS test-profile to be run by the workflow to verify that the problem is fixed. The test-profile will be passed onto testrunner for it to be run. + +```yaml + validationTestsProfile: + framework: "AGFHC" + recipe: "all_lvl4" + iterations: 1 + stopOnFailure: true + timeoutSeconds: 4800` + ``` + +If a user would like to run a testsuite as part of the workflow, these fields under `validationTestsProfile` are mandatory and they correspond to the fields of the same in the [Test Runner Documentation](../test/manual-test.md) + +`physicalActionNeeded` field refers to the physical action the user has to take for certain conditions that will not be fixed by a reboot. The action will be mentioned for each of those conditions in the `default-conditional-workflow-mappings`. For conditions where reboot fixes the issue, this field will be left empty. + +This integration works on the basis that NPD applies different node conditions for different critical errors. + +Note: Operator ensures that when a node is tainted and a workflow is already running, we don’t trigger any new workflows on the node. + +## Enable auto remediation + +To enable this feature, the user needs to toggle `RemediationWorkflow.Enable` to true in the Device Config CR. It is disabled by default. +The most common CR users will be using will be of this form which will use the `default-conditional-workflow-mappings` for ConditionalWorkflows field unless the user wants to create their own configmap. + +```yaml + remediationWorkflow: + enable: true +``` + +## Default Workflow Template + +Note: `default-template` will be created on the cluster by GPU-Operator + + +`default-template` will perform the following steps: + +1. Taint the node with `key = "AMD_GPU_Unhealthy”, op = equal, value = node_condition, effect = noSchedule ` + +2. Check if physical intervention is required + +3. Suspend workflow + +4. Drain workloads/pods that are using AMD GPUs + +5. Reboot the node + +6. Run AGFHC/RVS tests to verify the GPUs are healthy post reboot. + +7. Verify that the node condition has become False + +8. Un-taint the node and this will make the GPUs available for scheduling again. + +For each step in the workflow template, a pod is spun up that performs the task. +For the case when user wants to create his own template, the argo CRDs are present on the cluster and the user can create any workflow template and refer it in the config-map. + +Most steps in the default-template are self-explanatory. However, there are some details to be known about Step 2, 3 and 6 + +## Workflow Step 2: Check if physical intervention is required + +As per AMD service action guide, many problems require user to intervene physically (checking wiring, screws, retorquing, etc.). The workflow, as per this, will raise a k8s event to suggest the physical action required to the user in such cases before suspending the workflow in step3. If a physical action is needed for a certain node condition, it will be present in the `physicalActionNeeded` field in the configmap mapping corresponding to that node condition. + +The benefit of having this step is that admin can see which node is waiting for physical intervention. Once he fixes it physically, he can simply resume the workflow for validation using the label mentioned in Workflow Step3. + +## Workflow Step 3: Suspend/Resume the Workflow + +The GPU-Operator determines whether to resume the workflow after it has been paused in Step 2. This pause provides an opportunity for users to perform necessary manual actions. There are two primary scenarios where user intervention may be required: + +1. **Excessive Node Remediation:** + Users can define a `RecoveryPolicy` in the `ConditionalWorkflowMappings` ConfigMap, specifying the maximum number of recovery attempts allowed within a given time window. If a node exceeds this limit, the workflow remains paused. +2. **Physical Action Required:** + If a physical action is specified for a workflow in the `ConditionalWorkflowMappings` ConfigMap, the node will pause at this step, allowing the user to perform the required action. The user is also notified via an event. + +If neither of these conditions apply, the workflow will automatically resume from this step. + +### Resuming a paused workflow +Whenever the user is satisfied that the workflow can be resumed, they can add the label `operator.amd.com/gpu-force-resume-workflow=true` to the relevant node. The operator will detect this label and resume the workflow. + +To abort the workflow, label the node with `operator.amd.com/gpu-abort-workflow=true`. The node will remain in a tainted state for manual intervention. If remediation is no longer desired, this label provides the option to delete the workflow while the node is paused. + +## Workflow Step 6: Run AGFHC/RVS tests + +-> The user will mention the test-profile to pass to test runner to run in the configmap for each condition under `validationTestsProfile` + +-> The workflow step will ensure that a k8s job is created which spins up a test runner container which picks up that test-profile to run as part of this step. + +-> The test results will be checked by the workflow step and will ensure that the workflow moves ahead only if the tests pass. If the tests fail, the workflow will fail. + +#### **Notes** +During helm installation of GPU Operator, by default, installation of remediation components like workflow controller and crds is enabled. If the admin does not require this auto remediation feature and would like to disable the installation of these components, they can simply pass this flag during the helm installation: + + `--set remediation.enabled=false` \ No newline at end of file diff --git a/docs/sphinx/_toc.yml b/docs/sphinx/_toc.yml index 8a9ca595..f73ec07b 100644 --- a/docs/sphinx/_toc.yml +++ b/docs/sphinx/_toc.yml @@ -70,6 +70,9 @@ subtrees: - caption: Slurm on Kubernetes entries: - file: slinky/slinky-example + - caption: Auto Remediation + entries: + - file: autoremediation/auto-remediation - caption: Contributing entries: - file: contributing/developer-guide diff --git a/docs/sphinx/_toc.yml.in b/docs/sphinx/_toc.yml.in index 8a9ca595..f73ec07b 100644 --- a/docs/sphinx/_toc.yml.in +++ b/docs/sphinx/_toc.yml.in @@ -70,6 +70,9 @@ subtrees: - caption: Slurm on Kubernetes entries: - file: slinky/slinky-example + - caption: Auto Remediation + entries: + - file: autoremediation/auto-remediation - caption: Contributing entries: - file: contributing/developer-guide diff --git a/go.mod b/go.mod index cd8cea4a..2f087ac8 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,11 @@ module github.com/ROCm/gpu-operator -go 1.23.0 +go 1.23.1 toolchain go1.23.4 require ( + github.com/argoproj/argo-workflows/v3 v3.6.5 github.com/go-logr/logr v1.4.2 github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 @@ -13,7 +14,7 @@ require ( github.com/prometheus/common v0.55.0 github.com/rh-ecosystem-edge/kernel-module-management v0.0.0-20250217131402-3522d8ca4d5f github.com/sirupsen/logrus v1.9.3 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 go.uber.org/mock v0.4.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c @@ -23,7 +24,7 @@ require ( k8s.io/apimachinery v0.32.2 k8s.io/client-go v0.32.2 k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.29.3 + k8s.io/kubectl v0.30.3 k8s.io/utils v0.0.0-20241210054802-24370beab758 open-cluster-management.io/api v0.13.0 sigs.k8s.io/controller-runtime v0.20.3 @@ -39,7 +40,7 @@ require ( github.com/chai2010/gettext-go v1.0.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch v5.8.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect @@ -58,9 +59,10 @@ require ( github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -90,19 +92,23 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect golang.org/x/net v0.34.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.29.0 // indirect - golang.org/x/term v0.28.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/oauth2 v0.28.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.36.3 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/cli-runtime v0.29.3 // indirect + k8s.io/cli-runtime v0.30.3 // indirect k8s.io/component-base v0.32.2 // indirect k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect diff --git a/go.sum b/go.sum index 39d2a140..89cbfa12 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -6,6 +7,9 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/a8m/envsubst v1.4.2 h1:4yWIHXOLEJHQEFd4UjrWDrYeYlV7ncFWJOCBRLOZHQg= github.com/a8m/envsubst v1.4.2/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/argoproj/argo-workflows/v3 v3.6.5 h1:0dvQr+CrhYgCc7ZJ4ZxusLVL/LKwHnpkT1lw/BP4+W4= +github.com/argoproj/argo-workflows/v3 v3.6.5/go.mod h1:w6kuZAqk9vLskhg8hT/67xCu89SFq5FE6sFk5POQD0o= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -19,20 +23,23 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= +github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.8.0+incompatible h1:1Av9pn2FyxPdvrWNQszj1g6D6YthSmvCfcN6SYclTJg= +github.com/evanphx/json-patch v5.8.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= @@ -41,6 +48,7 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -61,6 +69,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -89,14 +98,17 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -160,13 +172,14 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rh-ecosystem-edge/kernel-module-management v0.0.0-20250217131402-3522d8ca4d5f h1:npF3DvPQZCzPho3pvBy1pmZx7vGCIooMhVyo3viwgHw= github.com/rh-ecosystem-edge/kernel-module-management v0.0.0-20250217131402-3522d8ca4d5f/go.mod h1:xLQRe8xJfmxDwYJx0ObPSH7K+14EV2d793RoKEfhsY4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= @@ -178,8 +191,8 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= @@ -209,40 +222,45 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= +golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -265,10 +283,21 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= +google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -286,10 +315,10 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSP gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -300,8 +329,8 @@ k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscgh k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ= k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= -k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= +k8s.io/cli-runtime v0.30.3 h1:aG69oRzJuP2Q4o8dm+f5WJIX4ZBEwrvdID0+MXyUY6k= +k8s.io/cli-runtime v0.30.3/go.mod h1:hwrrRdd9P84CXSKzhHxrOivAR9BRnkMt0OeP5mj7X30= k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA= k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94= k8s.io/component-base v0.32.2 h1:1aUL5Vdmu7qNo4ZsE+569PV5zFatM9hl+lb3dEea2zU= @@ -310,8 +339,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= -k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= -k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= +k8s.io/kubectl v0.30.3 h1:YIBBvMdTW0xcDpmrOBzcpUVsn+zOgjMYIu7kAq+yqiI= +k8s.io/kubectl v0.30.3/go.mod h1:IcR0I9RN2+zzTRUa1BzZCm4oM0NLOawE6RzlDvd1Fpo= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/api v0.13.0 h1:dlcJEZlNlE0DmSDctK2s7iWKg9l+Tgb0V78Z040nMuk= diff --git a/hack/k8s-patch/k8s-kmm-patch/template-patch/deployment.yaml b/hack/k8s-patch/k8s-kmm-patch/template-patch/deployment.yaml index c7be70b4..a2554ffb 100644 --- a/hack/k8s-patch/k8s-kmm-patch/template-patch/deployment.yaml +++ b/hack/k8s-patch/k8s-kmm-patch/template-patch/deployment.yaml @@ -100,6 +100,9 @@ spec: terminationGracePeriodSeconds: 10 {{- with .Values.controller.manager.tolerations }} tolerations: + - key: "amd-gpu-unhealthy" + operator: "Exists" + effect: "NoSchedule" {{- toYaml . | nindent 8 }} {{- end }} volumes: @@ -191,6 +194,9 @@ spec: terminationGracePeriodSeconds: 10 {{- with .Values.webhookServer.webhookServer.tolerations }} tolerations: + - key: "amd-gpu-unhealthy" + operator: "Exists" + effect: "NoSchedule" {{- toYaml . | nindent 8 }} {{- end }} volumes: diff --git a/hack/k8s-patch/k8s-remediation-patch/metadata-patch/Chart.yaml b/hack/k8s-patch/k8s-remediation-patch/metadata-patch/Chart.yaml new file mode 100644 index 00000000..caf35c8e --- /dev/null +++ b/hack/k8s-patch/k8s-remediation-patch/metadata-patch/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +name: remediation-controller +description: A Helm chart for remediation workflow controller for AMD GPU Operator +type: application +version: v1.0.0 \ No newline at end of file diff --git a/hack/k8s-patch/k8s-remediation-patch/metadata-patch/values.yaml b/hack/k8s-patch/k8s-remediation-patch/metadata-patch/values.yaml new file mode 100644 index 00000000..52383fc8 --- /dev/null +++ b/hack/k8s-patch/k8s-remediation-patch/metadata-patch/values.yaml @@ -0,0 +1,2 @@ +controller: + image: "quay.io/argoproj/workflow-controller:v3.6.5" \ No newline at end of file diff --git a/hack/k8s-patch/k8s-remediation-patch/template-patch/deployment.yaml b/hack/k8s-patch/k8s-remediation-patch/template-patch/deployment.yaml new file mode 100644 index 00000000..c9f62061 --- /dev/null +++ b/hack/k8s-patch/k8s-remediation-patch/template-patch/deployment.yaml @@ -0,0 +1,3023 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + listKind: ClusterWorkflowTemplateList + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + singular: clusterworkflowtemplate + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cronworkflows.argoproj.io +spec: + group: argoproj.io + names: + kind: CronWorkflow + listKind: CronWorkflowList + plural: cronworkflows + shortNames: + - cwf + - cronwf + singular: cronworkflow + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowartifactgctasks.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowArtifactGCTask + listKind: WorkflowArtifactGCTaskList + plural: workflowartifactgctasks + shortNames: + - wfat + singular: workflowartifactgctask + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + artifactsByNode: + additionalProperties: + properties: + archiveLocation: + properties: + archiveLogs: + type: boolean + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + raw: + properties: + data: + type: string + required: + - data + type: object + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + type: object + artifacts: + additionalProperties: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: object + type: object + type: object + type: object + status: + properties: + artifactResultsByNode: + additionalProperties: + properties: + artifactResults: + additionalProperties: + properties: + error: + type: string + name: + type: string + success: + type: boolean + required: + - name + type: object + type: object + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workfloweventbindings.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowEventBinding + listKind: WorkflowEventBindingList + plural: workfloweventbindings + shortNames: + - wfeb + singular: workfloweventbinding + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + event: + properties: + selector: + type: string + required: + - selector + type: object + submit: + properties: + arguments: + properties: + artifacts: + items: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: array + parameters: + items: + properties: + default: + type: string + description: + type: string + enum: + items: + type: string + type: array + globalName: + type: string + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + default: + type: string + event: + type: string + expression: + type: string + jqFilter: + type: string + jsonPath: + type: string + parameter: + type: string + path: + type: string + supplied: + type: object + type: object + required: + - name + type: object + type: array + type: object + metadata: + type: object + workflowTemplateRef: + properties: + clusterScope: + type: boolean + name: + type: string + type: object + required: + - workflowTemplateRef + type: object + required: + - event + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflows.argoproj.io +spec: + group: argoproj.io + names: + kind: Workflow + listKind: WorkflowList + plural: workflows + shortNames: + - wf + singular: workflow + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of the workflow + jsonPath: .status.phase + name: Status + type: string + - description: When the workflow was started + format: date-time + jsonPath: .status.startedAt + name: Age + type: date + - description: Human readable message indicating details about why the workflow + is in this condition. + jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtaskresults.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTaskResult + listKind: WorkflowTaskResultList + plural: workflowtaskresults + singular: workflowtaskresult + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + message: + type: string + metadata: + type: object + outputs: + properties: + artifacts: + items: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: array + exitCode: + type: string + parameters: + items: + properties: + default: + type: string + description: + type: string + enum: + items: + type: string + type: array + globalName: + type: string + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + default: + type: string + event: + type: string + expression: + type: string + jqFilter: + type: string + jsonPath: + type: string + parameter: + type: string + path: + type: string + supplied: + type: object + type: object + required: + - name + type: object + type: array + result: + type: string + type: object + phase: + type: string + progress: + type: string + required: + - metadata + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtasksets.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTaskSet + listKind: WorkflowTaskSetList + plural: workflowtasksets + shortNames: + - wfts + singular: workflowtaskset + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTemplate + listKind: WorkflowTemplateList + plural: workflowtemplates + shortNames: + - wftmpl + singular: workflowtemplate + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo + namespace: '{{ .Release.Namespace }}' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argo-role + namespace: '{{ .Release.Namespace }}' +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: argo-aggregate-to-admin +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: argo-aggregate-to-edit +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: argo-aggregate-to-view +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-cluster-role +rules: +- apiGroups: + - "" + resources: + - pods + - pods/exec + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumeclaims/finalizers + verbs: + - create + - update + - delete + - get +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowartifactgctasks + verbs: + - get + - list + - watch + - update + - patch + - delete + - create +- apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - list + - watch + - deletecollection +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - argoproj.io + resources: + - cronworkflows + - cronworkflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - get + - delete +- apiGroups: + - "" + resourceNames: + - argo-workflows-agent-ca-certificates + resources: + - secrets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: argo-binding + namespace: '{{ .Release.Namespace }}' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-role +subjects: +- kind: ServiceAccount + name: argo + namespace: '{{ .Release.Namespace }}' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-cluster-role +subjects: +- kind: ServiceAccount + name: argo + namespace: '{{ .Release.Namespace }}' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: workflow-controller-configmap + namespace: '{{ .Release.Namespace }}' +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: workflow-controller +value: 1000000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: amd-gpu-operator-workflow-controller + namespace: '{{ .Release.Namespace }}' +spec: + selector: + matchLabels: + app: amd-gpu-operator-workflow-controller + template: + metadata: + labels: + app: amd-gpu-operator-workflow-controller + spec: + containers: + - args: [] + command: + - workflow-controller + env: + - name: LEADER_ELECTION_IDENTITY + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: {{ .Values.controller.image }} + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6060 + initialDelaySeconds: 90 + periodSeconds: 60 + timeoutSeconds: 30 + name: workflow-controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 6060 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: workflow-controller + securityContext: + runAsNonRoot: true + serviceAccountName: argo + tolerations: + - key: "amd-gpu-unhealthy" + operator: "Exists" + effect: "NoSchedule" \ No newline at end of file diff --git a/hack/k8s-patch/metadata-patch/Chart.yaml b/hack/k8s-patch/metadata-patch/Chart.yaml index 3d3b6301..0a593930 100644 --- a/hack/k8s-patch/metadata-patch/Chart.yaml +++ b/hack/k8s-patch/metadata-patch/Chart.yaml @@ -30,4 +30,8 @@ dependencies: - name: kmm version: v1.0.0 repository: "file://./charts/kmm" - condition: kmm.enabled \ No newline at end of file + condition: kmm.enabled +- name: remediation-controller + version: v1.0.0 + repository: "file://./charts/remediation" + condition: remediation.enabled \ No newline at end of file diff --git a/hack/k8s-patch/metadata-patch/values.yaml b/hack/k8s-patch/metadata-patch/values.yaml index ba86c3da..e19d05f4 100644 --- a/hack/k8s-patch/metadata-patch/values.yaml +++ b/hack/k8s-patch/metadata-patch/values.yaml @@ -10,6 +10,9 @@ node-feature-discovery: operator: "Equal" value: "up" effect: "NoExecute" + - key: "amd-gpu-unhealthy" + operator: "Exists" + effect: "NoSchedule" # -- Set nodeSelector for NFD worker daemonset nodeSelector: {} @@ -18,6 +21,11 @@ kmm: # -- Set to true/false to enable/disable the installation of kernel module management (KMM) operator enabled: true +# Remediation related configs +remediation: + # -- Set to true/false to enable/disable the installation of remediation workflow controller + enabled: true + # -- Default NFD rule will detect amd gpu based on pci vendor ID installdefaultNFDRule: true diff --git a/hack/k8s-patch/template-patch/argo-rbac.yaml b/hack/k8s-patch/template-patch/argo-rbac.yaml new file mode 100644 index 00000000..d707b00d --- /dev/null +++ b/hack/k8s-patch/template-patch/argo-rbac.yaml @@ -0,0 +1,46 @@ +{{- if .Values.remediation.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "helm-charts-k8s.fullname" . }}-argo-workflow + labels: + app.kubernetes.io/component: amd-gpu + app.kubernetes.io/part-of: amd-gpu + {{- include "helm-charts-k8s.labels" . | nindent 4 }} +rules: +- apiGroups: + - argoproj.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - batch + resources: + - jobs + verbs: + - '*' +- apiGroups: + - "" + resources: + - pods/log + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "helm-charts-k8s.fullname" . }}-argo-workflow + labels: + app.kubernetes.io/component: amd-gpu + app.kubernetes.io/part-of: amd-gpu + {{- include "helm-charts-k8s.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: '{{ include "helm-charts-k8s.fullname" . }}-argo-workflow' +subjects: +- kind: ServiceAccount + name: '{{ include "helm-charts-k8s.fullname" . }}-controller-manager' + namespace: '{{ .Release.Namespace }}' +{{- end }} \ No newline at end of file diff --git a/hack/k8s-patch/template-patch/deployment.yaml b/hack/k8s-patch/template-patch/deployment.yaml index 6397c246..aa79b0c7 100644 --- a/hack/k8s-patch/template-patch/deployment.yaml +++ b/hack/k8s-patch/template-patch/deployment.yaml @@ -75,6 +75,9 @@ spec: terminationGracePeriodSeconds: 10 {{- with .Values.controllerManager.manager.tolerations }} tolerations: + - key: "amd-gpu-unhealthy" + operator: "Exists" + effect: "NoSchedule" {{- toYaml . | nindent 8 }} {{- end }} volumes: diff --git a/helm-charts-k8s/Chart.lock b/helm-charts-k8s/Chart.lock index e409fa85..dc6f6596 100644 --- a/helm-charts-k8s/Chart.lock +++ b/helm-charts-k8s/Chart.lock @@ -5,5 +5,8 @@ dependencies: - name: kmm repository: file://./charts/kmm version: v1.0.0 -digest: sha256:f9a315dd2ce3d515ebf28c8e9a6a82158b493ca2686439ec381487761261b597 -generated: "2025-08-13T23:39:27.892020259Z" +- name: remediation-controller + repository: file://./charts/remediation + version: v1.0.0 +digest: sha256:41fa6a6232514acebf6abdcb1bccaf087e134b9f413b8fa33a7fec1f58a99e07 +generated: "2025-08-14T12:20:36.836915168Z" diff --git a/helm-charts-k8s/Chart.yaml b/helm-charts-k8s/Chart.yaml index 3d3b6301..0a593930 100644 --- a/helm-charts-k8s/Chart.yaml +++ b/helm-charts-k8s/Chart.yaml @@ -30,4 +30,8 @@ dependencies: - name: kmm version: v1.0.0 repository: "file://./charts/kmm" - condition: kmm.enabled \ No newline at end of file + condition: kmm.enabled +- name: remediation-controller + version: v1.0.0 + repository: "file://./charts/remediation" + condition: remediation.enabled \ No newline at end of file diff --git a/helm-charts-k8s/README.md b/helm-charts-k8s/README.md index ea8640fb..18f99fce 100644 --- a/helm-charts-k8s/README.md +++ b/helm-charts-k8s/README.md @@ -140,6 +140,7 @@ Kubernetes: `>= 1.29.0-0` | Repository | Name | Version | |------------|------|---------| | file://./charts/kmm | kmm | v1.0.0 | +| file://./charts/remediation | remediation-controller | v1.0.0 | | https://kubernetes-sigs.github.io/node-feature-discovery/charts | node-feature-discovery | v0.16.1 | ## Values @@ -245,7 +246,8 @@ Kubernetes: `>= 1.29.0-0` | kmm.enabled | bool | `true` | Set to true/false to enable/disable the installation of kernel module management (KMM) operator | | node-feature-discovery.enabled | bool | `true` | Set to true/false to enable/disable the installation of node feature discovery (NFD) operator | | node-feature-discovery.worker.nodeSelector | object | `{}` | Set nodeSelector for NFD worker daemonset | -| node-feature-discovery.worker.tolerations | list | `[{"effect":"NoExecute","key":"amd-dcm","operator":"Equal","value":"up"}]` | Set tolerations for NFD worker daemonset | +| node-feature-discovery.worker.tolerations | list | `[{"effect":"NoExecute","key":"amd-dcm","operator":"Equal","value":"up"},{"effect":"NoSchedule","key":"amd-gpu-unhealthy","operator":"Exists"}]` | Set tolerations for NFD worker daemonset | +| remediation.enabled | bool | `true` | Set to true/false to enable/disable the installation of remediation workflow controller | | upgradeCRD | bool | `true` | CRD will be patched as pre-upgrade/pre-rollback hook when doing helm upgrade/rollback to current helm chart | | kmm.controller.affinity | object | `{"nodeAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"preference":{"matchExpressions":[{"key":"node-role.kubernetes.io/control-plane","operator":"Exists"}]},"weight":1}]}}` | Affinity for the KMM controller manager deployment | | kmm.controller.manager.args[0] | string | `"--config=controller_config.yaml"` | | @@ -310,3 +312,4 @@ Kubernetes: `>= 1.29.0-0` | kmm.webhookService.ports[0].protocol | string | `"TCP"` | | | kmm.webhookService.ports[0].targetPort | int | `9443` | | | kmm.webhookService.type | string | `"ClusterIP"` | | +| remediation-controller.controller.image | string | `"quay.io/argoproj/workflow-controller:v3.6.5"` | | diff --git a/helm-charts-k8s/charts/kmm/templates/deployment.yaml b/helm-charts-k8s/charts/kmm/templates/deployment.yaml index c7be70b4..a2554ffb 100644 --- a/helm-charts-k8s/charts/kmm/templates/deployment.yaml +++ b/helm-charts-k8s/charts/kmm/templates/deployment.yaml @@ -100,6 +100,9 @@ spec: terminationGracePeriodSeconds: 10 {{- with .Values.controller.manager.tolerations }} tolerations: + - key: "amd-gpu-unhealthy" + operator: "Exists" + effect: "NoSchedule" {{- toYaml . | nindent 8 }} {{- end }} volumes: @@ -191,6 +194,9 @@ spec: terminationGracePeriodSeconds: 10 {{- with .Values.webhookServer.webhookServer.tolerations }} tolerations: + - key: "amd-gpu-unhealthy" + operator: "Exists" + effect: "NoSchedule" {{- toYaml . | nindent 8 }} {{- end }} volumes: diff --git a/helm-charts-k8s/charts/remediation/Chart.yaml b/helm-charts-k8s/charts/remediation/Chart.yaml new file mode 100644 index 00000000..caf35c8e --- /dev/null +++ b/helm-charts-k8s/charts/remediation/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +name: remediation-controller +description: A Helm chart for remediation workflow controller for AMD GPU Operator +type: application +version: v1.0.0 \ No newline at end of file diff --git a/helm-charts-k8s/charts/remediation/templates/deployment.yaml b/helm-charts-k8s/charts/remediation/templates/deployment.yaml new file mode 100644 index 00000000..c9f62061 --- /dev/null +++ b/helm-charts-k8s/charts/remediation/templates/deployment.yaml @@ -0,0 +1,3023 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + listKind: ClusterWorkflowTemplateList + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + singular: clusterworkflowtemplate + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cronworkflows.argoproj.io +spec: + group: argoproj.io + names: + kind: CronWorkflow + listKind: CronWorkflowList + plural: cronworkflows + shortNames: + - cwf + - cronwf + singular: cronworkflow + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowartifactgctasks.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowArtifactGCTask + listKind: WorkflowArtifactGCTaskList + plural: workflowartifactgctasks + shortNames: + - wfat + singular: workflowartifactgctask + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + artifactsByNode: + additionalProperties: + properties: + archiveLocation: + properties: + archiveLogs: + type: boolean + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + raw: + properties: + data: + type: string + required: + - data + type: object + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + type: object + artifacts: + additionalProperties: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: object + type: object + type: object + type: object + status: + properties: + artifactResultsByNode: + additionalProperties: + properties: + artifactResults: + additionalProperties: + properties: + error: + type: string + name: + type: string + success: + type: boolean + required: + - name + type: object + type: object + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workfloweventbindings.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowEventBinding + listKind: WorkflowEventBindingList + plural: workfloweventbindings + shortNames: + - wfeb + singular: workfloweventbinding + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + event: + properties: + selector: + type: string + required: + - selector + type: object + submit: + properties: + arguments: + properties: + artifacts: + items: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: array + parameters: + items: + properties: + default: + type: string + description: + type: string + enum: + items: + type: string + type: array + globalName: + type: string + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + default: + type: string + event: + type: string + expression: + type: string + jqFilter: + type: string + jsonPath: + type: string + parameter: + type: string + path: + type: string + supplied: + type: object + type: object + required: + - name + type: object + type: array + type: object + metadata: + type: object + workflowTemplateRef: + properties: + clusterScope: + type: boolean + name: + type: string + type: object + required: + - workflowTemplateRef + type: object + required: + - event + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflows.argoproj.io +spec: + group: argoproj.io + names: + kind: Workflow + listKind: WorkflowList + plural: workflows + shortNames: + - wf + singular: workflow + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of the workflow + jsonPath: .status.phase + name: Status + type: string + - description: When the workflow was started + format: date-time + jsonPath: .status.startedAt + name: Age + type: date + - description: Human readable message indicating details about why the workflow + is in this condition. + jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtaskresults.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTaskResult + listKind: WorkflowTaskResultList + plural: workflowtaskresults + singular: workflowtaskresult + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + message: + type: string + metadata: + type: object + outputs: + properties: + artifacts: + items: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: array + exitCode: + type: string + parameters: + items: + properties: + default: + type: string + description: + type: string + enum: + items: + type: string + type: array + globalName: + type: string + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + default: + type: string + event: + type: string + expression: + type: string + jqFilter: + type: string + jsonPath: + type: string + parameter: + type: string + path: + type: string + supplied: + type: object + type: object + required: + - name + type: object + type: array + result: + type: string + type: object + phase: + type: string + progress: + type: string + required: + - metadata + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtasksets.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTaskSet + listKind: WorkflowTaskSetList + plural: workflowtasksets + shortNames: + - wfts + singular: workflowtaskset + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTemplate + listKind: WorkflowTemplateList + plural: workflowtemplates + shortNames: + - wftmpl + singular: workflowtemplate + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo + namespace: '{{ .Release.Namespace }}' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argo-role + namespace: '{{ .Release.Namespace }}' +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: argo-aggregate-to-admin +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: argo-aggregate-to-edit +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: argo-aggregate-to-view +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-cluster-role +rules: +- apiGroups: + - "" + resources: + - pods + - pods/exec + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumeclaims/finalizers + verbs: + - create + - update + - delete + - get +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowartifactgctasks + verbs: + - get + - list + - watch + - update + - patch + - delete + - create +- apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - list + - watch + - deletecollection +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - argoproj.io + resources: + - cronworkflows + - cronworkflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - get + - delete +- apiGroups: + - "" + resourceNames: + - argo-workflows-agent-ca-certificates + resources: + - secrets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: argo-binding + namespace: '{{ .Release.Namespace }}' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-role +subjects: +- kind: ServiceAccount + name: argo + namespace: '{{ .Release.Namespace }}' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-cluster-role +subjects: +- kind: ServiceAccount + name: argo + namespace: '{{ .Release.Namespace }}' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: workflow-controller-configmap + namespace: '{{ .Release.Namespace }}' +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: workflow-controller +value: 1000000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: amd-gpu-operator-workflow-controller + namespace: '{{ .Release.Namespace }}' +spec: + selector: + matchLabels: + app: amd-gpu-operator-workflow-controller + template: + metadata: + labels: + app: amd-gpu-operator-workflow-controller + spec: + containers: + - args: [] + command: + - workflow-controller + env: + - name: LEADER_ELECTION_IDENTITY + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: {{ .Values.controller.image }} + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6060 + initialDelaySeconds: 90 + periodSeconds: 60 + timeoutSeconds: 30 + name: workflow-controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 6060 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: workflow-controller + securityContext: + runAsNonRoot: true + serviceAccountName: argo + tolerations: + - key: "amd-gpu-unhealthy" + operator: "Exists" + effect: "NoSchedule" \ No newline at end of file diff --git a/helm-charts-k8s/charts/remediation/values.yaml b/helm-charts-k8s/charts/remediation/values.yaml new file mode 100644 index 00000000..52383fc8 --- /dev/null +++ b/helm-charts-k8s/charts/remediation/values.yaml @@ -0,0 +1,2 @@ +controller: + image: "quay.io/argoproj/workflow-controller:v3.6.5" \ No newline at end of file diff --git a/helm-charts-k8s/crds/deviceconfig-crd.yaml b/helm-charts-k8s/crds/deviceconfig-crd.yaml index 597f6ecc..95f67fcc 100644 --- a/helm-charts-k8s/crds/deviceconfig-crd.yaml +++ b/helm-charts-k8s/crds/deviceconfig-crd.yaml @@ -1270,6 +1270,35 @@ spec: type: string type: object type: object + remediationWorkflow: + description: remediation workflow + properties: + conditionalWorkflows: + description: Name of the ConfigMap that holds condition-to-workflow + mappings. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + enable: + description: |- + enable remediation workflows. disabled by default + enable if operator should automatically handle remediation of node incase of gpu issues + type: boolean + ttlForFailedWorkflows: + default: 24 + description: Time to live for argo workflow object and its pods + for a failed workflow in hours. By default, it is set to 24 hours + type: integer + type: object selector: additionalProperties: type: string diff --git a/helm-charts-k8s/templates/argo-rbac.yaml b/helm-charts-k8s/templates/argo-rbac.yaml new file mode 100644 index 00000000..d707b00d --- /dev/null +++ b/helm-charts-k8s/templates/argo-rbac.yaml @@ -0,0 +1,46 @@ +{{- if .Values.remediation.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "helm-charts-k8s.fullname" . }}-argo-workflow + labels: + app.kubernetes.io/component: amd-gpu + app.kubernetes.io/part-of: amd-gpu + {{- include "helm-charts-k8s.labels" . | nindent 4 }} +rules: +- apiGroups: + - argoproj.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - batch + resources: + - jobs + verbs: + - '*' +- apiGroups: + - "" + resources: + - pods/log + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "helm-charts-k8s.fullname" . }}-argo-workflow + labels: + app.kubernetes.io/component: amd-gpu + app.kubernetes.io/part-of: amd-gpu + {{- include "helm-charts-k8s.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: '{{ include "helm-charts-k8s.fullname" . }}-argo-workflow' +subjects: +- kind: ServiceAccount + name: '{{ include "helm-charts-k8s.fullname" . }}-controller-manager' + namespace: '{{ .Release.Namespace }}' +{{- end }} \ No newline at end of file diff --git a/helm-charts-k8s/templates/deployment.yaml b/helm-charts-k8s/templates/deployment.yaml index 6397c246..aa79b0c7 100644 --- a/helm-charts-k8s/templates/deployment.yaml +++ b/helm-charts-k8s/templates/deployment.yaml @@ -75,6 +75,9 @@ spec: terminationGracePeriodSeconds: 10 {{- with .Values.controllerManager.manager.tolerations }} tolerations: + - key: "amd-gpu-unhealthy" + operator: "Exists" + effect: "NoSchedule" {{- toYaml . | nindent 8 }} {{- end }} volumes: diff --git a/helm-charts-k8s/values.yaml b/helm-charts-k8s/values.yaml index ba86c3da..e19d05f4 100644 --- a/helm-charts-k8s/values.yaml +++ b/helm-charts-k8s/values.yaml @@ -10,6 +10,9 @@ node-feature-discovery: operator: "Equal" value: "up" effect: "NoExecute" + - key: "amd-gpu-unhealthy" + operator: "Exists" + effect: "NoSchedule" # -- Set nodeSelector for NFD worker daemonset nodeSelector: {} @@ -18,6 +21,11 @@ kmm: # -- Set to true/false to enable/disable the installation of kernel module management (KMM) operator enabled: true +# Remediation related configs +remediation: + # -- Set to true/false to enable/disable the installation of remediation workflow controller + enabled: true + # -- Default NFD rule will detect amd gpu based on pci vendor ID installdefaultNFDRule: true diff --git a/helm-charts-openshift/Chart.lock b/helm-charts-openshift/Chart.lock index 5fd9280d..a2b9495e 100644 --- a/helm-charts-openshift/Chart.lock +++ b/helm-charts-openshift/Chart.lock @@ -6,4 +6,4 @@ dependencies: repository: file://./charts/kmm version: v1.0.0 digest: sha256:25200c34a5cc846a1275e5bf3fc637b19e909dc68de938189c5278d77d03f5ac -generated: "2025-08-13T23:39:39.216809884Z" +generated: "2025-08-14T12:20:46.342279035Z" diff --git a/helm-charts-openshift/crds/deviceconfig-crd.yaml b/helm-charts-openshift/crds/deviceconfig-crd.yaml index 597f6ecc..95f67fcc 100644 --- a/helm-charts-openshift/crds/deviceconfig-crd.yaml +++ b/helm-charts-openshift/crds/deviceconfig-crd.yaml @@ -1270,6 +1270,35 @@ spec: type: string type: object type: object + remediationWorkflow: + description: remediation workflow + properties: + conditionalWorkflows: + description: Name of the ConfigMap that holds condition-to-workflow + mappings. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + enable: + description: |- + enable remediation workflows. disabled by default + enable if operator should automatically handle remediation of node incase of gpu issues + type: boolean + ttlForFailedWorkflows: + default: 24 + description: Time to live for argo workflow object and its pods + for a failed workflow in hours. By default, it is set to 24 hours + type: integer + type: object selector: additionalProperties: type: string diff --git a/internal/controllers/device_config_reconciler.go b/internal/controllers/device_config_reconciler.go index 7ff07799..b68c65f7 100644 --- a/internal/controllers/device_config_reconciler.go +++ b/internal/controllers/device_config_reconciler.go @@ -102,7 +102,8 @@ func NewDeviceConfigReconciler( workerMgr workermgr.WorkerMgrAPI, isOpenShift bool) *DeviceConfigReconciler { upgradeMgrHandler := newUpgradeMgrHandler(client, k8sConfig) - helper := newDeviceConfigReconcilerHelper(client, kmmHandler, nlHandler, upgradeMgrHandler, metricsHandler, testrunnerHandler, configmanagerHandler, workerMgr) + remediationMgrHandler := newRemediationMgrHandler(client, k8sConfig) + helper := newDeviceConfigReconcilerHelper(client, kmmHandler, nlHandler, upgradeMgrHandler, remediationMgrHandler, metricsHandler, testrunnerHandler, configmanagerHandler, workerMgr) podEventHandler := watchers.NewPodEventHandler(client, workerMgr) nodeEventHandler := watchers.NewNodeEventHandler(client, workerMgr) daemonsetEventHandler := watchers.NewDaemonsetEventHandler(client) @@ -219,6 +220,9 @@ func (r *DeviceConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request if _, err := r.helper.handleModuleUpgrade(ctx, devConfig, nodes, true); err != nil { logger.Error(err, fmt.Sprintf("upgrade manager delete device config error: %v", err)) } + if _, err := r.helper.handleRemediationWorkflow(ctx, devConfig, nodes, true); err != nil { + logger.Error(err, fmt.Sprintf("remediation manager delete device config error: %v", err)) + } // DeviceConfig is being deleted err = r.helper.finalizeDeviceConfig(ctx, devConfig, nodes) if err != nil { @@ -306,20 +310,31 @@ func (r *DeviceConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request return res, fmt.Errorf("failed to handle config manager for DeviceConfig %s: %v", req.NamespacedName, err) } + logger.Info("start remediation workflow reconciliation") + remediationRes, err := r.helper.handleRemediationWorkflow(ctx, devConfig, nodes, false) + // Upgrade manager and Remediation manager both can decide whether a requeue is needed on the overall reconcile loop. + // So we need to reconcile if any one or both of them require it and if one of them needs a requeue faster than the other in the future, + // it should be honoured. + finalRes := r.helper.shouldReconcile(ctx, res, remediationRes) + + if err != nil { + return finalRes, fmt.Errorf("failed to handle remediation workflow for DeviceConfig %s: %v", req.NamespacedName, err) + } + err = r.helper.buildDeviceConfigStatus(ctx, devConfig, nodes) if err != nil { - return res, fmt.Errorf("failed to build status for DeviceConfig %s: %v", req.NamespacedName, err) + return finalRes, fmt.Errorf("failed to build status for DeviceConfig %s: %v", req.NamespacedName, err) } err = r.helper.updateDeviceConfigStatus(ctx, devConfig) if err != nil { - return res, fmt.Errorf("failed to update status for DeviceConfig %s: %v", req.NamespacedName, err) + return finalRes, fmt.Errorf("failed to update status for DeviceConfig %s: %v", req.NamespacedName, err) } // Update nodeAssignments after DeviceConfig status update r.helper.updateNodeAssignments(req.NamespacedName.String(), nodes, false) - return res, nil + return finalRes, nil } //go:generate mockgen -source=device_config_reconciler.go -package=controllers -destination=mock_device_config_reconciler.go deviceConfigReconcilerHelperAPI @@ -344,31 +359,35 @@ type deviceConfigReconcilerHelperAPI interface { handleMetricsExporter(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) error handleTestRunner(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) error handleConfigManager(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) error + handleRemediationWorkflow(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList, delete bool) (ctrl.Result, error) setCondition(ctx context.Context, condition string, devConfig *amdv1alpha1.DeviceConfig, status metav1.ConditionStatus, reason string, message string) error deleteCondition(ctx context.Context, condition string, devConfig *amdv1alpha1.DeviceConfig) error validateDeviceConfig(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) []string handleModuleUpgrade(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList, delete bool) (ctrl.Result, error) + shouldReconcile(ctx context.Context, ugpgradeRes, remediationRes ctrl.Result) ctrl.Result } type deviceConfigReconcilerHelper struct { - client client.Client - kmmHandler kmmmodule.KMMModuleAPI - nlHandler nodelabeller.NodeLabeller - metricsHandler metricsexporter.MetricsExporter - testrunnerHandler testrunner.TestRunner - configmanagerHandler configmanager.ConfigManager - nodeAssignments map[string]string - conditionUpdater conditions.ConditionUpdater - validator validator.ValidatorAPI - kmmPostProcessor workermgr.WorkerMgrAPI - upgradeMgrHandler upgradeMgrAPI - namespace string + client client.Client + kmmHandler kmmmodule.KMMModuleAPI + nlHandler nodelabeller.NodeLabeller + metricsHandler metricsexporter.MetricsExporter + testrunnerHandler testrunner.TestRunner + configmanagerHandler configmanager.ConfigManager + nodeAssignments map[string]string + conditionUpdater conditions.ConditionUpdater + validator validator.ValidatorAPI + kmmPostProcessor workermgr.WorkerMgrAPI + upgradeMgrHandler upgradeMgrAPI + remediationMgrHandler remediationMgrAPI + namespace string } func newDeviceConfigReconcilerHelper(client client.Client, kmmHandler kmmmodule.KMMModuleAPI, nlHandler nodelabeller.NodeLabeller, upgradeMgrHandler upgradeMgrAPI, + remediationMgrHandler remediationMgrAPI, metricsHandler metricsexporter.MetricsExporter, testrunnerHandler testrunner.TestRunner, configmanagerHandler configmanager.ConfigManager, @@ -376,19 +395,43 @@ func newDeviceConfigReconcilerHelper(client client.Client, conditionUpdater := conditions.NewDeviceConfigConditionMgr() validator := validator.NewValidator() return &deviceConfigReconcilerHelper{ - client: client, - kmmHandler: kmmHandler, - nlHandler: nlHandler, - metricsHandler: metricsHandler, - testrunnerHandler: testrunnerHandler, - configmanagerHandler: configmanagerHandler, - nodeAssignments: make(map[string]string), - conditionUpdater: conditionUpdater, - validator: validator, - kmmPostProcessor: workerMgr, - upgradeMgrHandler: upgradeMgrHandler, - namespace: os.Getenv("OPERATOR_NAMESPACE"), + client: client, + kmmHandler: kmmHandler, + nlHandler: nlHandler, + metricsHandler: metricsHandler, + testrunnerHandler: testrunnerHandler, + configmanagerHandler: configmanagerHandler, + nodeAssignments: make(map[string]string), + conditionUpdater: conditionUpdater, + validator: validator, + kmmPostProcessor: workerMgr, + upgradeMgrHandler: upgradeMgrHandler, + remediationMgrHandler: remediationMgrHandler, + namespace: os.Getenv("OPERATOR_NAMESPACE"), + } +} + +func (dcrh *deviceConfigReconcilerHelper) shouldReconcile(ctx context.Context, ugpgradeRes, remediationRes ctrl.Result) ctrl.Result { + var finalRes ctrl.Result + switch { + case ugpgradeRes.RequeueAfter > 0 && remediationRes.RequeueAfter > 0: + if ugpgradeRes.RequeueAfter < remediationRes.RequeueAfter { + finalRes = ugpgradeRes + } else { + finalRes = remediationRes + } + + case ugpgradeRes.RequeueAfter > 0: + finalRes = ugpgradeRes + + case remediationRes.RequeueAfter > 0: + finalRes = remediationRes + + default: + finalRes = ctrl.Result{} } + + return finalRes } func (dcrh *deviceConfigReconcilerHelper) listDeviceConfigs(ctx context.Context) (*amdv1alpha1.DeviceConfigList, error) { @@ -1249,6 +1292,13 @@ func (dcrh *deviceConfigReconcilerHelper) handleTestRunner(ctx context.Context, return nil } +func (dcrh *deviceConfigReconcilerHelper) handleRemediationWorkflow(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList, delete bool) (ctrl.Result, error) { + if delete { + return dcrh.remediationMgrHandler.HandleDelete(ctx, devConfig, nodes) + } + return dcrh.remediationMgrHandler.HandleRemediation(ctx, devConfig, nodes) +} + func (dcrh *deviceConfigReconcilerHelper) handleConfigManager(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) error { logger := log.FromContext(ctx) ds := &appsv1.DaemonSet{ diff --git a/internal/controllers/device_config_reconciler_test.go b/internal/controllers/device_config_reconciler_test.go index 3f0960cc..11e7ac60 100644 --- a/internal/controllers/device_config_reconciler_test.go +++ b/internal/controllers/device_config_reconciler_test.go @@ -194,7 +194,7 @@ var _ = Describe("getLabelsPerModules", func() { BeforeEach(func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -239,7 +239,7 @@ var _ = Describe("setFinalizer", func() { BeforeEach(func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -275,7 +275,7 @@ var _ = Describe("finalizeDeviceConfig", func() { BeforeEach(func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nil, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -486,7 +486,7 @@ var _ = Describe("handleKMMModule", func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) kmmHelper = kmmmodule.NewMockKMMModuleAPI(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, kmmHelper, nil, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, kmmHelper, nil, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -556,7 +556,7 @@ var _ = Describe("handleBuildConfigMap", func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) kmmHelper = kmmmodule.NewMockKMMModuleAPI(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, kmmHelper, nil, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, kmmHelper, nil, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() @@ -623,7 +623,7 @@ var _ = Describe("handleNodeLabeller", func() { ctrl := gomock.NewController(GinkgoT()) kubeClient = mock_client.NewMockClient(ctrl) nodeLabellerHelper = nodelabeller.NewMockNodeLabeller(ctrl) - dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nodeLabellerHelper, nil, nil, nil, nil, nil) + dcrh = newDeviceConfigReconcilerHelper(kubeClient, nil, nodeLabellerHelper, nil, nil, nil, nil, nil, nil) }) ctx := context.Background() diff --git a/internal/controllers/mock_device_config_reconciler.go b/internal/controllers/mock_device_config_reconciler.go index 750ad697..a3d8e539 100644 --- a/internal/controllers/mock_device_config_reconciler.go +++ b/internal/controllers/mock_device_config_reconciler.go @@ -289,6 +289,21 @@ func (mr *MockdeviceConfigReconcilerHelperAPIMockRecorder) handleNodeLabeller(ct return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleNodeLabeller", reflect.TypeOf((*MockdeviceConfigReconcilerHelperAPI)(nil).handleNodeLabeller), ctx, devConfig, nodes) } +// handleRemediationWorkflow mocks base method. +func (m *MockdeviceConfigReconcilerHelperAPI) handleRemediationWorkflow(ctx context.Context, devConfig *v1alpha1.DeviceConfig, nodes *v1.NodeList, delete bool) (controllerruntime.Result, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "handleRemediationWorkflow", ctx, devConfig, nodes, delete) + ret0, _ := ret[0].(controllerruntime.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// handleRemediationWorkflow indicates an expected call of handleRemediationWorkflow. +func (mr *MockdeviceConfigReconcilerHelperAPIMockRecorder) handleRemediationWorkflow(ctx, devConfig, nodes, delete any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleRemediationWorkflow", reflect.TypeOf((*MockdeviceConfigReconcilerHelperAPI)(nil).handleRemediationWorkflow), ctx, devConfig, nodes, delete) +} + // handleTestRunner mocks base method. func (m *MockdeviceConfigReconcilerHelperAPI) handleTestRunner(ctx context.Context, devConfig *v1alpha1.DeviceConfig, nodes *v1.NodeList) error { m.ctrl.T.Helper() @@ -346,6 +361,20 @@ func (mr *MockdeviceConfigReconcilerHelperAPIMockRecorder) setFinalizer(ctx, dev return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setFinalizer", reflect.TypeOf((*MockdeviceConfigReconcilerHelperAPI)(nil).setFinalizer), ctx, devConfig) } +// shouldReconcile mocks base method. +func (m *MockdeviceConfigReconcilerHelperAPI) shouldReconcile(ctx context.Context, ugpgradeRes, remediationRes controllerruntime.Result) controllerruntime.Result { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "shouldReconcile", ctx, ugpgradeRes, remediationRes) + ret0, _ := ret[0].(controllerruntime.Result) + return ret0 +} + +// shouldReconcile indicates an expected call of shouldReconcile. +func (mr *MockdeviceConfigReconcilerHelperAPIMockRecorder) shouldReconcile(ctx, ugpgradeRes, remediationRes any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "shouldReconcile", reflect.TypeOf((*MockdeviceConfigReconcilerHelperAPI)(nil).shouldReconcile), ctx, ugpgradeRes, remediationRes) +} + // updateDeviceConfigStatus mocks base method. func (m *MockdeviceConfigReconcilerHelperAPI) updateDeviceConfigStatus(ctx context.Context, devConfig *v1alpha1.DeviceConfig) error { m.ctrl.T.Helper() diff --git a/internal/controllers/mock_remediation_handler.go b/internal/controllers/mock_remediation_handler.go new file mode 100644 index 00000000..b99bc521 --- /dev/null +++ b/internal/controllers/mock_remediation_handler.go @@ -0,0 +1,315 @@ +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: remediation_handler.go +// +// Generated by this command: +// +// mockgen -source=remediation_handler.go -package=controllers -destination=mock_remediation_handler.go remediationMgrHelperAPI +// +// Package controllers is a generated GoMock package. +package controllers + +import ( + context "context" + reflect "reflect" + + v1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + v1alpha10 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + gomock "go.uber.org/mock/gomock" + v1 "k8s.io/api/core/v1" + controllerruntime "sigs.k8s.io/controller-runtime" +) + +// MockremediationMgrAPI is a mock of remediationMgrAPI interface. +type MockremediationMgrAPI struct { + ctrl *gomock.Controller + recorder *MockremediationMgrAPIMockRecorder +} + +// MockremediationMgrAPIMockRecorder is the mock recorder for MockremediationMgrAPI. +type MockremediationMgrAPIMockRecorder struct { + mock *MockremediationMgrAPI +} + +// NewMockremediationMgrAPI creates a new mock instance. +func NewMockremediationMgrAPI(ctrl *gomock.Controller) *MockremediationMgrAPI { + mock := &MockremediationMgrAPI{ctrl: ctrl} + mock.recorder = &MockremediationMgrAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockremediationMgrAPI) EXPECT() *MockremediationMgrAPIMockRecorder { + return m.recorder +} + +// HandleDelete mocks base method. +func (m *MockremediationMgrAPI) HandleDelete(ctx context.Context, deviceConfig *v1alpha1.DeviceConfig, nodes *v1.NodeList) (controllerruntime.Result, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HandleDelete", ctx, deviceConfig, nodes) + ret0, _ := ret[0].(controllerruntime.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HandleDelete indicates an expected call of HandleDelete. +func (mr *MockremediationMgrAPIMockRecorder) HandleDelete(ctx, deviceConfig, nodes any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleDelete", reflect.TypeOf((*MockremediationMgrAPI)(nil).HandleDelete), ctx, deviceConfig, nodes) +} + +// HandleRemediation mocks base method. +func (m *MockremediationMgrAPI) HandleRemediation(ctx context.Context, deviceConfig *v1alpha1.DeviceConfig, nodes *v1.NodeList) (controllerruntime.Result, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HandleRemediation", ctx, deviceConfig, nodes) + ret0, _ := ret[0].(controllerruntime.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HandleRemediation indicates an expected call of HandleRemediation. +func (mr *MockremediationMgrAPIMockRecorder) HandleRemediation(ctx, deviceConfig, nodes any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleRemediation", reflect.TypeOf((*MockremediationMgrAPI)(nil).HandleRemediation), ctx, deviceConfig, nodes) +} + +// MockremediationMgrHelperAPI is a mock of remediationMgrHelperAPI interface. +type MockremediationMgrHelperAPI struct { + ctrl *gomock.Controller + recorder *MockremediationMgrHelperAPIMockRecorder +} + +// MockremediationMgrHelperAPIMockRecorder is the mock recorder for MockremediationMgrHelperAPI. +type MockremediationMgrHelperAPIMockRecorder struct { + mock *MockremediationMgrHelperAPI +} + +// NewMockremediationMgrHelperAPI creates a new mock instance. +func NewMockremediationMgrHelperAPI(ctrl *gomock.Controller) *MockremediationMgrHelperAPI { + mock := &MockremediationMgrHelperAPI{ctrl: ctrl} + mock.recorder = &MockremediationMgrHelperAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockremediationMgrHelperAPI) EXPECT() *MockremediationMgrHelperAPIMockRecorder { + return m.recorder +} + +// checkIfTaintExists mocks base method. +func (m *MockremediationMgrHelperAPI) checkIfTaintExists(node *v1.Node, targetTaint v1.Taint) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "checkIfTaintExists", node, targetTaint) + ret0, _ := ret[0].(bool) + return ret0 +} + +// checkIfTaintExists indicates an expected call of checkIfTaintExists. +func (mr *MockremediationMgrHelperAPIMockRecorder) checkIfTaintExists(node, targetTaint any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "checkIfTaintExists", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).checkIfTaintExists), node, targetTaint) +} + +// createDefaultConfigMap mocks base method. +func (m *MockremediationMgrHelperAPI) createDefaultConfigMap(ctx context.Context, name, namespace string) (*v1.ConfigMap, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "createDefaultConfigMap", ctx, name, namespace) + ret0, _ := ret[0].(*v1.ConfigMap) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// createDefaultConfigMap indicates an expected call of createDefaultConfigMap. +func (mr *MockremediationMgrHelperAPIMockRecorder) createDefaultConfigMap(ctx, name, namespace any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "createDefaultConfigMap", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).createDefaultConfigMap), ctx, name, namespace) +} + +// createDefaultObjects mocks base method. +func (m *MockremediationMgrHelperAPI) createDefaultObjects(ctx context.Context, devConfig *v1alpha1.DeviceConfig) (*v1.ConfigMap, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "createDefaultObjects", ctx, devConfig) + ret0, _ := ret[0].(*v1.ConfigMap) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// createDefaultObjects indicates an expected call of createDefaultObjects. +func (mr *MockremediationMgrHelperAPIMockRecorder) createDefaultObjects(ctx, devConfig any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "createDefaultObjects", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).createDefaultObjects), ctx, devConfig) +} + +// createDefaultWorkflowTemplate mocks base method. +func (m *MockremediationMgrHelperAPI) createDefaultWorkflowTemplate(ctx context.Context, namespace string) (*v1alpha10.WorkflowTemplate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "createDefaultWorkflowTemplate", ctx, namespace) + ret0, _ := ret[0].(*v1alpha10.WorkflowTemplate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// createDefaultWorkflowTemplate indicates an expected call of createDefaultWorkflowTemplate. +func (mr *MockremediationMgrHelperAPIMockRecorder) createDefaultWorkflowTemplate(ctx, namespace any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "createDefaultWorkflowTemplate", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).createDefaultWorkflowTemplate), ctx, namespace) +} + +// createWorkflow mocks base method. +func (m *MockremediationMgrHelperAPI) createWorkflow(ctx context.Context, workflow *v1alpha10.Workflow) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "createWorkflow", ctx, workflow) + ret0, _ := ret[0].(error) + return ret0 +} + +// createWorkflow indicates an expected call of createWorkflow. +func (mr *MockremediationMgrHelperAPIMockRecorder) createWorkflow(ctx, workflow any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "createWorkflow", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).createWorkflow), ctx, workflow) +} + +// deleteConfigMap mocks base method. +func (m *MockremediationMgrHelperAPI) deleteConfigMap(ctx context.Context, name, namespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "deleteConfigMap", ctx, name, namespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// deleteConfigMap indicates an expected call of deleteConfigMap. +func (mr *MockremediationMgrHelperAPIMockRecorder) deleteConfigMap(ctx, name, namespace any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "deleteConfigMap", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).deleteConfigMap), ctx, name, namespace) +} + +// deleteWorkflow mocks base method. +func (m *MockremediationMgrHelperAPI) deleteWorkflow(ctx context.Context, workflow *v1alpha10.Workflow) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "deleteWorkflow", ctx, workflow) + ret0, _ := ret[0].(error) + return ret0 +} + +// deleteWorkflow indicates an expected call of deleteWorkflow. +func (mr *MockremediationMgrHelperAPIMockRecorder) deleteWorkflow(ctx, workflow any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "deleteWorkflow", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).deleteWorkflow), ctx, workflow) +} + +// getConfigMap mocks base method. +func (m *MockremediationMgrHelperAPI) getConfigMap(ctx context.Context, configmapName, namespace string) (*v1.ConfigMap, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getConfigMap", ctx, configmapName, namespace) + ret0, _ := ret[0].(*v1.ConfigMap) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getConfigMap indicates an expected call of getConfigMap. +func (mr *MockremediationMgrHelperAPIMockRecorder) getConfigMap(ctx, configmapName, namespace any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getConfigMap", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).getConfigMap), ctx, configmapName, namespace) +} + +// getWorkflowList mocks base method. +func (m *MockremediationMgrHelperAPI) getWorkflowList(ctx context.Context, namespace string) (*v1alpha10.WorkflowList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getWorkflowList", ctx, namespace) + ret0, _ := ret[0].(*v1alpha10.WorkflowList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getWorkflowList indicates an expected call of getWorkflowList. +func (mr *MockremediationMgrHelperAPIMockRecorder) getWorkflowList(ctx, namespace any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getWorkflowList", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).getWorkflowList), ctx, namespace) +} + +// getWorkflowTemplate mocks base method. +func (m *MockremediationMgrHelperAPI) getWorkflowTemplate(ctx context.Context, workflowTemplateName, namespace string) (*v1alpha10.WorkflowTemplate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getWorkflowTemplate", ctx, workflowTemplateName, namespace) + ret0, _ := ret[0].(*v1alpha10.WorkflowTemplate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getWorkflowTemplate indicates an expected call of getWorkflowTemplate. +func (mr *MockremediationMgrHelperAPIMockRecorder) getWorkflowTemplate(ctx, workflowTemplateName, namespace any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getWorkflowTemplate", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).getWorkflowTemplate), ctx, workflowTemplateName, namespace) +} + +// isDriverUpgradeInProgress mocks base method. +func (m *MockremediationMgrHelperAPI) isDriverUpgradeInProgress(devCfg *v1alpha1.DeviceConfig, node *v1.Node) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "isDriverUpgradeInProgress", devCfg, node) + ret0, _ := ret[0].(bool) + return ret0 +} + +// isDriverUpgradeInProgress indicates an expected call of isDriverUpgradeInProgress. +func (mr *MockremediationMgrHelperAPIMockRecorder) isDriverUpgradeInProgress(devCfg, node any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isDriverUpgradeInProgress", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).isDriverUpgradeInProgress), devCfg, node) +} + +// isRemediationDisabled mocks base method. +func (m *MockremediationMgrHelperAPI) isRemediationDisabled(ctx context.Context, devConfig *v1alpha1.DeviceConfig) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "isRemediationDisabled", ctx, devConfig) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// isRemediationDisabled indicates an expected call of isRemediationDisabled. +func (mr *MockremediationMgrHelperAPIMockRecorder) isRemediationDisabled(ctx, devConfig any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isRemediationDisabled", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).isRemediationDisabled), ctx, devConfig) +} + +// populateWorkflow mocks base method. +func (m *MockremediationMgrHelperAPI) populateWorkflow(ctx context.Context, wfTemplate *v1alpha10.WorkflowTemplate, mapping *ConditionWorkflowMapping, nodeName string, devCfg *v1alpha1.DeviceConfig) *v1alpha10.Workflow { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "populateWorkflow", ctx, wfTemplate, mapping, nodeName, devCfg) + ret0, _ := ret[0].(*v1alpha10.Workflow) + return ret0 +} + +// populateWorkflow indicates an expected call of populateWorkflow. +func (mr *MockremediationMgrHelperAPIMockRecorder) populateWorkflow(ctx, wfTemplate, mapping, nodeName, devCfg any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "populateWorkflow", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).populateWorkflow), ctx, wfTemplate, mapping, nodeName, devCfg) +} + +// resumeSuspendedWorkflow mocks base method. +func (m *MockremediationMgrHelperAPI) resumeSuspendedWorkflow(ctx context.Context, wfName, namespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "resumeSuspendedWorkflow", ctx, wfName, namespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// resumeSuspendedWorkflow indicates an expected call of resumeSuspendedWorkflow. +func (mr *MockremediationMgrHelperAPIMockRecorder) resumeSuspendedWorkflow(ctx, wfName, namespace any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "resumeSuspendedWorkflow", reflect.TypeOf((*MockremediationMgrHelperAPI)(nil).resumeSuspendedWorkflow), ctx, wfName, namespace) +} diff --git a/internal/controllers/remediation_handler.go b/internal/controllers/remediation_handler.go new file mode 100644 index 00000000..55e606c2 --- /dev/null +++ b/internal/controllers/remediation_handler.go @@ -0,0 +1,972 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Copyright (c) Advanced Micro Devices, Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the \"License\"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an \"AS IS\" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "gopkg.in/yaml.v3" + "strings" + "time" + + amdv1alpha1 "github.com/ROCm/gpu-operator/api/v1alpha1" + + workflowv1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +const ( + RemediationTaintKey = "amd-gpu-unhealthy" + DefaultConfigMapSuffix = "default-conditional-workflow-mappings" + DefaultTemplate = "default-template" + TestRunnerImage = "registry.test.pensando.io:5000/test-runner:agfhc-latest" + TestRunnerServiceAccount = "amd-gpu-operator-test-runner" +) + +// ConditionWorkflowMapping defines a single condition-to-workflow mapping. +// This is used when parsing the ConfigMap specified in the DeviceConfig. +type ConditionWorkflowMapping struct { + NodeCondition string `json:"nodeCondition" yaml:"nodeCondition"` + WorkflowTemplate string `json:"workflowTemplate" yaml:"workflowTemplate"` + ValidationTests ValidationTestsProfile `json:"validationTestsProfile" yaml:"validationTestsProfile"` + PhysicalActionNeeded string `json:"physicalActionNeeded" yaml:"physicalActionNeeded"` +} + +type ValidationTestsProfile struct { + Framework string `json:"framework" yaml:"framework"` + Recipe string `json:"recipe" yaml:"recipe"` + Iterations int `json:"iterations" yaml:"iterations"` + StopOnFailure bool `json:"stopOnFailure" yaml:"stopOnFailure"` + TimeoutSeconds int `json:"timeoutSeconds" yaml:"timeoutSeconds"` +} + +type remediationMgr struct { + helper remediationMgrHelperAPI +} + +//go:generate mockgen -source=remediation_handler.go -package=controllers -destination=mock_remediation_handler.go remediationMgr +type remediationMgrAPI interface { + HandleRemediation(ctx context.Context, deviceConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) (ctrl.Result, error) + HandleDelete(ctx context.Context, deviceConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) (ctrl.Result, error) +} + +func newRemediationMgrHandler(client client.Client, k8sConfig *rest.Config) remediationMgrAPI { + k8sIntf, err := kubernetes.NewForConfig(k8sConfig) + if err != nil { + return nil + } + return &remediationMgr{ + helper: newRemediationMgrHelperHandler(client, k8sIntf), + } +} + +/*================================= Remediation Manager APIs===================================*/ + +// HandleRemediation handles the remediation functionalities for device config +func (n *remediationMgr) HandleRemediation(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig, nodes *v1.NodeList) (ctrl.Result, error) { + res := ctrl.Result{Requeue: true, RequeueAfter: time.Second * 20} + logger := log.FromContext(ctx) + + // Don't handle remediation if disabled + remediationDisabled, err := n.helper.isRemediationDisabled(ctx, devConfig) + + if err != nil { + return res, err + } + + if remediationDisabled { + return ctrl.Result{}, nil + } + + var configMap *v1.ConfigMap + if configMap, err = n.helper.createDefaultObjects(ctx, devConfig); err != nil { + return res, err + } + + var mappingsList []ConditionWorkflowMapping + if err = yaml.Unmarshal([]byte(configMap.Data["workflow"]), &mappingsList); err != nil { + return res, fmt.Errorf("failed to parse workflows from ConfigMap: %w", err) + } + + mappings := make(map[string]ConditionWorkflowMapping) + for _, m := range mappingsList { + mappings[m.NodeCondition] = m + } + + for _, node := range nodes.Items { + NodeLoop: + for _, cond := range node.Status.Conditions { + + wfList, err := n.helper.getWorkflowList(ctx, devConfig.Namespace) + if err != nil { + logger.Error(err, fmt.Sprintf("Failed to list workflows. Workflow list: %v", wfList)) + break NodeLoop + } + + // If a workflow is already running on that node, then skip the node but resume/delete workflow if needed + for _, wf := range wfList.Items { + if strings.HasPrefix(wf.Name, fmt.Sprintf("%s-", node.Name)) { + if wf.Status.Phase == workflowv1alpha1.WorkflowSucceeded { + if err := n.helper.deleteWorkflow(ctx, &wf); err != nil { + logger.Error(err, fmt.Sprintf("Failed to delete workflow %s", wf.Name)) + } + logger.Info(fmt.Sprintf("Deleted workflow: %s", wf.Name)) + } else if wf.Status.Phase == workflowv1alpha1.WorkflowRunning { + stages := wf.Status.Nodes + for _, wfStage := range stages { + if wfStage.Type == "Suspend" && wfStage.Phase == "Running" { + logger.Info(fmt.Sprintf("Suspended workflow %s found for node %s. Attempting resume.", wf.Name, node.Name)) + if err := n.helper.resumeSuspendedWorkflow(ctx, wf.Name, wf.Namespace); err != nil { + logger.Error(err, fmt.Sprintf("Failed to resume workflow %s", wf.Name)) + } + break NodeLoop + } + } + logger.Info(fmt.Sprintf("Workflow: %s already running on the node: %s, skipping creation of workflow", wf.Name, node.Name)) + break NodeLoop + } + } + } + + if cond.Status != v1.ConditionTrue { + continue + } + mapping, exists := mappings[string(cond.Type)] + if !exists { + continue + } + + logger.Info(fmt.Sprintf("Matching condition found on node %s for condition %s", node.Name, mapping.NodeCondition)) + + taint := v1.Taint{ + Key: RemediationTaintKey, + Value: mapping.NodeCondition, + Effect: v1.TaintEffectNoSchedule, + } + + // If taint already exists, skip the node + if hasTaint := n.helper.checkIfTaintExists(&node, taint); hasTaint { + logger.Info(fmt.Sprintf("Taint %s already present on node %s, skipping creation of workflow", taint.Key, node.Name)) + break NodeLoop + } + + // If driver install/upgrade is in progress, skip the node + if driverUpgradeInProgress := n.helper.isDriverUpgradeInProgress(devConfig, &node); driverUpgradeInProgress { + logger.Info(fmt.Sprintf("Driver Install/Upgrade is in progress, skipping creation of workflow on node %s", node.Name)) + break NodeLoop + } + + logger.Info(fmt.Sprintf("GPU Condition: %s observed and node: %s is unhealthy. Triggering Remediation Workflow: %s", mapping.NodeCondition, node.Name, mapping.WorkflowTemplate)) + + // Fetch WorkflowTemplate + wfTemplate, err := n.helper.getWorkflowTemplate(ctx, mapping.WorkflowTemplate, devConfig.Namespace) + if err != nil { + logger.Error(err, fmt.Sprintf("Failed to fetch WorkflowTemplate %s", mapping.WorkflowTemplate)) + return res, err + } + + // Populate Workflow Object + wf := n.helper.populateWorkflow(ctx, wfTemplate, &mapping, node.Name, devConfig) + + // Create Workflow + if err := n.helper.createWorkflow(ctx, wf); err != nil { + logger.Error(err, fmt.Sprintf("Failed to create Remediation Workflow for node %s", node.Name)) + return res, err + } + + logger.Info(fmt.Sprintf("Remediation Workflow for the condition is created successfully on node %s using template %s", node.Name, mapping.WorkflowTemplate)) + break NodeLoop + } + } + logger.Info("Requeue for any node conditions that may be present") + return res, nil +} + +// HandleDelete handles the delete operations during remediation process +func (n *remediationMgr) HandleDelete(ctx context.Context, deviceConfig *amdv1alpha1.DeviceConfig, nodeList *v1.NodeList) (res ctrl.Result, err error) { + + wfList, err := n.helper.getWorkflowList(ctx, deviceConfig.Namespace) + if err != nil { + log.FromContext(ctx).Error(err, "Failed to list workflows during delete") + } + + for _, wf := range wfList.Items { + if err := n.helper.deleteWorkflow(ctx, &wf); err != nil { + log.FromContext(ctx).Error(err, fmt.Sprintf("Failed to delete workflow %s", wf.Name)) + } + log.FromContext(ctx).Info(fmt.Sprintf("Deleted workflow: %s", wf.Name)) + } + + var cfgMapName string + if deviceConfig.Spec.RemediationWorkflow.ConditionalWorkflows != nil { + cfgMapName = deviceConfig.Spec.RemediationWorkflow.ConditionalWorkflows.Name + } else { + cfgMapName = deviceConfig.Name + "-" + DefaultConfigMapSuffix + } + if err := n.helper.deleteConfigMap(ctx, cfgMapName, deviceConfig.Namespace); err == nil { + log.FromContext(ctx).Info(fmt.Sprintf("Deleted ConfigMap: %s", cfgMapName)) + } + + return +} + +/*=========================================== Remediation Manager Helper APIs ==========================================*/ + +//go:generate mockgen -source=remediation_handler.go -package=controllers -destination=mock_remediation_handler.go remediationMgrHelperAPI +type remediationMgrHelperAPI interface { + isRemediationDisabled(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) (bool, error) + resumeSuspendedWorkflow(ctx context.Context, wfName, namespace string) error + isDriverUpgradeInProgress(devCfg *amdv1alpha1.DeviceConfig, node *v1.Node) bool + checkIfTaintExists(node *v1.Node, targetTaint v1.Taint) bool + getWorkflowList(ctx context.Context, namespace string) (*workflowv1alpha1.WorkflowList, error) + getWorkflowTemplate(ctx context.Context, workflowTemplateName, namespace string) (*workflowv1alpha1.WorkflowTemplate, error) + getConfigMap(ctx context.Context, configmapName string, namespace string) (*v1.ConfigMap, error) + deleteConfigMap(ctx context.Context, name, namespace string) error + createDefaultConfigMap(ctx context.Context, name, namespace string) (*v1.ConfigMap, error) + createDefaultWorkflowTemplate(ctx context.Context, namespace string) (*workflowv1alpha1.WorkflowTemplate, error) + createDefaultObjects(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) (*v1.ConfigMap, error) + populateWorkflow(ctx context.Context, wfTemplate *workflowv1alpha1.WorkflowTemplate, mapping *ConditionWorkflowMapping, nodeName string, devCfg *amdv1alpha1.DeviceConfig) *workflowv1alpha1.Workflow + createWorkflow(ctx context.Context, workflow *workflowv1alpha1.Workflow) error + deleteWorkflow(ctx context.Context, workflow *workflowv1alpha1.Workflow) error +} + +type remediationMgrHelper struct { + client client.Client + k8sInterface kubernetes.Interface +} + +// Initialize remediation manager helper interface +func newRemediationMgrHelperHandler(client client.Client, k8sInterface kubernetes.Interface) remediationMgrHelperAPI { + return &remediationMgrHelper{ + client: client, + k8sInterface: k8sInterface, + } +} + +func (h *remediationMgrHelper) isRemediationDisabled(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) (bool, error) { + + logger := log.FromContext(ctx) + if devConfig.Spec.RemediationWorkflow.Enable == nil || !*devConfig.Spec.RemediationWorkflow.Enable { + return true, nil + } + + podList := &v1.PodList{} + if err := h.client.List(ctx, podList, client.InNamespace(devConfig.Namespace)); err != nil { + logger.Error(err, "failed to list pods") + return false, err + } + + found := false + for _, pod := range podList.Items { + if strings.HasPrefix(pod.Name, "amd-gpu-operator-workflow-controller") { + found = true + break + } + } + + if !found { + logger.Info("Workflow controller pod not found. Please check if it was disabled during bringup, skipping remediation") + return true, nil + } + return false, nil +} + +func (h *remediationMgrHelper) resumeSuspendedWorkflow(ctx context.Context, wfName, namespace string) error { + + logger := log.FromContext(ctx) + var wf workflowv1alpha1.Workflow + if err := h.client.Get(ctx, client.ObjectKey{Name: wfName, Namespace: namespace}, &wf); err != nil { + return fmt.Errorf("could not fetch workflow: %w", err) + } + + modified := false + stages := wf.Status.Nodes + for wfStageID, wfStage := range stages { + if wfStage.Type == "Suspend" && wfStage.Phase == "Running" { + logger.Info(fmt.Sprintf("Workflow %s is suspended. Resuming...", wfName)) + + wfStage.Phase = workflowv1alpha1.NodeSucceeded + wfStage.FinishedAt = metav1.Time{Time: time.Now().UTC()} + stages[wfStageID] = wfStage + modified = true + } + } + if !modified { + logger.Info(fmt.Sprintf("Workflow %q is not in suspended state", wfName)) + return nil + } + + if err := h.client.Update(ctx, &wf); err != nil { + return fmt.Errorf("failed to patch suspended node status: %w", err) + } + + logger.Info(fmt.Sprintf("Workflow %s resumed successfully", wfName)) + return nil +} + +func (h *remediationMgrHelper) isDriverUpgradeInProgress(devCfg *amdv1alpha1.DeviceConfig, node *v1.Node) bool { + // Define the blocked states that indicate an upgrade is in progress + blockedStates := map[amdv1alpha1.UpgradeState]bool{ + amdv1alpha1.UpgradeStateNotStarted: true, + amdv1alpha1.UpgradeStateStarted: true, + amdv1alpha1.UpgradeStateInstallInProgress: true, + amdv1alpha1.UpgradeStateInProgress: true, + amdv1alpha1.UpgradeStateRebootInProgress: true, + } + + for nodeName, moduleStatus := range devCfg.Status.NodeModuleStatus { + if nodeName == node.Name { + if blockedStates[moduleStatus.Status] { + return true + } + } + } + + return false +} + +func (h *remediationMgrHelper) checkIfTaintExists(node *v1.Node, targetTaint v1.Taint) bool { + for _, t := range node.Spec.Taints { + if t.Key == targetTaint.Key && t.Effect == targetTaint.Effect { + return true + } + } + return false +} + +func (h *remediationMgrHelper) getWorkflowList(ctx context.Context, namespace string) (*workflowv1alpha1.WorkflowList, error) { + wfList := &workflowv1alpha1.WorkflowList{} + if err := h.client.List(ctx, wfList, &client.ListOptions{Namespace: namespace}); err != nil { + return nil, err + } + return wfList, nil +} + +func (h *remediationMgrHelper) getConfigMap(ctx context.Context, configmapName string, namespace string) (*v1.ConfigMap, error) { + cm := &v1.ConfigMap{} + err := h.client.Get(ctx, client.ObjectKey{ + Name: configmapName, + Namespace: namespace, + }, cm) + if err != nil { + return nil, err + } + return cm, nil +} + +func (h *remediationMgrHelper) createDefaultConfigMap(ctx context.Context, name string, namespace string) (*v1.ConfigMap, error) { + + workflowYaml := `- nodeCondition: "AMDGPUUnhealthy" + workflowTemplate: "default-template" + validationTestsProfile: + framework: "AGFHC" + recipe: "all_lvl4" + iterations: 1 + stopOnFailure: true + timeoutSeconds: 4800` + + defaultCfgMap := &v1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string]string{ + "workflow": workflowYaml, + }, + } + + err := h.client.Create(ctx, defaultCfgMap) + if err != nil { + return nil, err + } + return defaultCfgMap, nil +} + +func (h *remediationMgrHelper) deleteConfigMap(ctx context.Context, name, namespace string) error { + + cm := &v1.ConfigMap{} + cm.Name = name + cm.Namespace = namespace + return h.client.Delete(ctx, cm) +} + +func (h *remediationMgrHelper) createDefaultWorkflowTemplate(ctx context.Context, namespace string) (*workflowv1alpha1.WorkflowTemplate, error) { + + template := &workflowv1alpha1.WorkflowTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-template", + Namespace: namespace, + }, + Spec: workflowv1alpha1.WorkflowSpec{ + Entrypoint: "inbuilt", + Templates: []workflowv1alpha1.Template{ + { + Name: "inbuilt", + Steps: []workflowv1alpha1.ParallelSteps{ + {Steps: []workflowv1alpha1.WorkflowStep{{Name: "taint", Template: "taint"}}}, + {Steps: []workflowv1alpha1.WorkflowStep{{Name: "suspend", Template: "suspend"}}}, + {Steps: []workflowv1alpha1.WorkflowStep{{Name: "drain", Template: "drain"}}}, + {Steps: []workflowv1alpha1.WorkflowStep{{Name: "reboot", Template: "reboot", ContinueOn: &workflowv1alpha1.ContinueOn{Failed: true}}}}, + {Steps: []workflowv1alpha1.WorkflowStep{{Name: "test", Template: "test"}}}, + {Steps: []workflowv1alpha1.WorkflowStep{{Name: "wait", Template: "wait"}}}, + {Steps: []workflowv1alpha1.WorkflowStep{{Name: "untaint", Template: "untaint"}}}, + }, + }, + { + Name: "taint", + Inputs: workflowv1alpha1.Inputs{ + Parameters: []workflowv1alpha1.Parameter{ + { + Name: "node_condition", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.node_condition}}"), + }, + { + Name: "node_name", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.node_name}}"), + }, + }, + }, + Script: &workflowv1alpha1.ScriptTemplate{ + Source: ` +set -e +NODE_NAME="{{inputs.parameters.node_name}}" +echo "Tainting node $NODE_NAME" +kubectl taint node "$NODE_NAME" amd-gpu-unhealthy="{{inputs.parameters.node_condition}}":NoSchedule --overwrite +`, + Container: v1.Container{ + Image: "bitnami/kubectl:1.29.0", + Command: []string{"sh"}, + }, + }, + }, + { + Name: "suspend", + Suspend: &workflowv1alpha1.SuspendTemplate{}, + }, + { + Name: "drain", + Inputs: workflowv1alpha1.Inputs{ + Parameters: []workflowv1alpha1.Parameter{ + { + Name: "node_name", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.node_name}}"), + }, + }, + }, + Script: &workflowv1alpha1.ScriptTemplate{ + Source: ` +set -e +echo "Fetching node name..." +NODE_NAME="{{inputs.parameters.node_name}}" +echo "Identified node: $NODE_NAME" +echo "Finding pods on node $NODE_NAME with volume mount path starting with /dev/dri..." +PODS=$(kubectl get pods --all-namespaces -o json | jq -r ' + .items[] | + select(.spec.nodeName == "'"$NODE_NAME"'") | + select( + ( + [.spec.volumes[]? | select(.hostPath?.path != null and (.hostPath.path | startswith("/dev/dri")))] + | length > 0 + ) or ( + [.spec.containers[]? | select(.resources.requests["amd.com/gpu"] != null)] + | length > 0 + ) + ) | + "\(.metadata.namespace) \(.metadata.name)" +') +if [ -z "$PODS" ]; then + echo "No pods with /dev/dri mounts found on node $NODE_NAME." +else + echo "Evicting pods:" + echo "$PODS" + echo "$PODS" | while read -r ns name; do + echo "Deleting pod $name in namespace $ns" + kubectl delete pod "$name" -n "$ns" --grace-period=0 --force || true + done +fi +`, + Container: v1.Container{ + Image: "bitnami/kubectl:1.29.0", + Command: []string{"sh"}, + }, + }, + }, + { + Name: "reboot", + Container: &v1.Container{ + Image: "docker.io/rocm/gpu-operator-utils:latest", + Command: []string{"/nsenter", "--all", "--target=1", "--", "/sbin/reboot", "-f"}, + SecurityContext: &v1.SecurityContext{Privileged: ptr.To(true)}, + }, + PodSpecPatch: ` +hostPID: true +hostNetwork: true +containers: +- name: main + stdin: true + tty: true +`, + }, + { + Name: "test", + Inputs: workflowv1alpha1.Inputs{ + Parameters: []workflowv1alpha1.Parameter{ + { + Name: "node_name", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.node_name}}"), + }, + { + Name: "framework", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.framework}}"), + }, + { + Name: "recipe", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.recipe}}"), + }, + { + Name: "iterations", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.iterations}}"), + }, + { + Name: "stopOnFailure", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.stopOnFailure}}"), + }, + { + Name: "timeoutSeconds", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.timeoutSeconds}}"), + }, + { + Name: "testRunnerImage", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.testRunnerImage}}"), + }, + { + Name: "testRunnerServiceAccount", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.testRunnerServiceAccount}}"), + }, + { + Name: "namespace", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.namespace}}"), + }, + }, + }, + Script: &workflowv1alpha1.ScriptTemplate{ + Source: ` +set -e +NODE_NAME="{{inputs.parameters.node_name}}" +JOB_NAME="test-runner-manual-trigger-${NODE_NAME}" +CM_NAME="manual-config-map-${NODE_NAME}" +FRAMEWORK="{{inputs.parameters.framework}}" +RECIPE="{{inputs.parameters.recipe}}" +ITERATIONS="{{inputs.parameters.iterations}}" +STOPONFAILURE="{{inputs.parameters.stopOnFailure}}" +TIMEOUTSECONDS="{{inputs.parameters.timeoutSeconds}}" +TESTRUNNERIMAGE="{{inputs.parameters.testRunnerImage}}" +TESTRUNNERSA="{{inputs.parameters.testRunnerServiceAccount}}" +NAMESPACE="{{inputs.parameters.namespace}}" + +if [ -z "$FRAMEWORK" ] || [ -z "$RECIPE" ] || [ -z "$ITERATIONS" ] || [ -z "$STOPONFAILURE" ] || [ -z "$TIMEOUTSECONDS" ]; then + echo "Validation profile incomplete, skipping configmap and job creation. Please enter framework, recipe, iterations, stopOnFailure, timeoutSeconds as per testrunner requirements" + exit 0 +fi + +echo "Creating test runner Job $JOB_NAME and ConfigMap $CM_NAME..." + +cat </dev/null || true) + if [ "$job_status" = "Complete" ]; then + echo "Test runner job completed successfully." + kubectl logs -n $NAMESPACE job/$JOB_NAME + echo "Detailed run report can be found at /var/log/amd-test-runner" + break + elif [ "$job_status" = "Failed" ]; then + echo "Test runner job failed." + kubectl logs -n $NAMESPACE job/$JOB_NAME + echo "Detailed run report can be found at /var/log/amd-test-runner" + exit 1 + else + echo "Test runner job is still running. Waiting..." + sleep 60 + fi +done +`, + Container: v1.Container{ + Image: "bitnami/kubectl:1.29.0", + Command: []string{"sh"}, + }, + }, + }, + { + Name: "wait", + Inputs: workflowv1alpha1.Inputs{ + Parameters: []workflowv1alpha1.Parameter{ + { + Name: "node_condition", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.node_condition}}"), + }, + { + Name: "node_name", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.node_name}}"), + }, + }, + }, + Script: &workflowv1alpha1.ScriptTemplate{ + Source: ` +set -e +NODE_NAME="{{inputs.parameters.node_name}}" +echo "Waiting for {{inputs.parameters.node_condition}} condition to be False on node $NODE_NAME for 2 consecutive minutes (timeout: 15 minutes)" +STABLE_COUNT=0 +TOTAL_WAIT=0 +while [ "$TOTAL_WAIT" -lt 15 ]; do + STATUS=$(kubectl get node "$NODE_NAME" -o jsonpath="{.status.conditions[?(@.type=='{{inputs.parameters.node_condition}}')].status}") + echo "[$(date)] {{inputs.parameters.node_condition}} status: $STATUS" + if [ "$STATUS" = "False" ]; then + STABLE_COUNT=$((STABLE_COUNT + 1)) + echo "Condition is stable (False) for $STABLE_COUNT minute(s)" + if [ "$STABLE_COUNT" -ge 2 ]; then + echo "Condition has been False for 2 consecutive checks (~2 minutes). Proceeding..." + exit 0 + fi + else + STABLE_COUNT=0 + echo "Condition is not stable (status: $STATUS)." + fi + sleep 60 + TOTAL_WAIT=$((TOTAL_WAIT + 1)) +done +echo "{{inputs.parameters.node_condition}} did not remain False for 2 consecutive minutes within 15 minutes. Exiting with failure." +exit 1 +`, + Container: v1.Container{ + Image: "bitnami/kubectl:1.29.0", + Command: []string{"sh"}, + }, + }, + }, + { + Name: "untaint", + Inputs: workflowv1alpha1.Inputs{ + Parameters: []workflowv1alpha1.Parameter{ + { + Name: "node_name", + Value: workflowv1alpha1.AnyStringPtr("{{workflow.parameters.node_name}}"), + }, + }, + }, + Script: &workflowv1alpha1.ScriptTemplate{ + Source: ` +set -e +NODE_NAME="{{inputs.parameters.node_name}}" +echo "Untainting node $NODE_NAME" +kubectl taint node "$NODE_NAME" amd-gpu-unhealthy:NoSchedule- +`, + Container: v1.Container{ + Image: "bitnami/kubectl:1.29.0", + Command: []string{"sh"}, + }, + }, + }, + }, + }, + } + + if err := h.client.Create(ctx, template); err != nil { + return nil, err + } + + return template, nil +} + +func (h *remediationMgrHelper) createDefaultObjects(ctx context.Context, devConfig *amdv1alpha1.DeviceConfig) (*v1.ConfigMap, error) { + + logger := log.FromContext(ctx) + var cfgMapName string + if devConfig.Spec.RemediationWorkflow.ConditionalWorkflows != nil { + cfgMapName = devConfig.Spec.RemediationWorkflow.ConditionalWorkflows.Name + } else { + cfgMapName = devConfig.Name + "-" + DefaultConfigMapSuffix + } + + // Create default configmap if required + cm, err := h.getConfigMap(ctx, cfgMapName, devConfig.Namespace) + if err != nil { + if devConfig.Spec.RemediationWorkflow.ConditionalWorkflows == nil { + cm, err = h.createDefaultConfigMap(ctx, cfgMapName, devConfig.Namespace) + if err != nil { + logger.Error(err, "Failed to create default configmap") + return nil, err + } + logger.Info("Created default configmap successfully") + } else { + logger.Error(err, fmt.Sprintf("Configmap: %s not found", cfgMapName)) + return nil, err + } + } + + // Create Default WorkflowTemplate if required + _, err = h.getWorkflowTemplate(ctx, DefaultTemplate, devConfig.Namespace) + if err != nil { + logger.Error(err, fmt.Sprintf("Failed to fetch WorkflowTemplate %s", DefaultTemplate)) + if _, err = h.createDefaultWorkflowTemplate(ctx, devConfig.Namespace); err != nil { + logger.Error(err, "Failed to create default workflow template") + return nil, err + } + logger.Info("Created default workflow template successfully") + } + + return cm, nil +} + +func (h *remediationMgrHelper) populateWorkflow(ctx context.Context, wfTemplate *workflowv1alpha1.WorkflowTemplate, mapping *ConditionWorkflowMapping, nodeName string, devConfig *amdv1alpha1.DeviceConfig) *workflowv1alpha1.Workflow { + wf := &workflowv1alpha1.Workflow{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: fmt.Sprintf("%s-%s-", nodeName, mapping.WorkflowTemplate), + Namespace: devConfig.Namespace, + }, + Spec: *wfTemplate.Spec.DeepCopy(), + } + + wf.Spec.Entrypoint = wfTemplate.Spec.Entrypoint + wf.Spec.ServiceAccountName = "amd-gpu-operator-gpu-operator-charts-controller-manager" + ttlHours := devConfig.Spec.RemediationWorkflow.TtlForFailedWorkflows + ttlSeconds := int32(ttlHours * 3600) + wf.Spec.TTLStrategy = &workflowv1alpha1.TTLStrategy{ + SecondsAfterCompletion: &ttlSeconds, + } + + for i := range wf.Spec.Templates { + if wf.Spec.Templates[i].NodeSelector == nil { + wf.Spec.Templates[i].NodeSelector = map[string]string{} + } + wf.Spec.Templates[i].NodeSelector["kubernetes.io/hostname"] = nodeName + + toleration := v1.Toleration{ + Key: RemediationTaintKey, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + + if wf.Spec.Templates[i].Tolerations == nil { + wf.Spec.Templates[i].Tolerations = []v1.Toleration{} + } + wf.Spec.Templates[i].Tolerations = append(wf.Spec.Templates[i].Tolerations, toleration) + } + + // Pass the args required to be used in the template + wf.Spec.Arguments = workflowv1alpha1.Arguments{ + Parameters: []workflowv1alpha1.Parameter{ + { + Name: "node_condition", + Value: workflowv1alpha1.AnyStringPtr(mapping.NodeCondition), + }, + { + Name: "node_name", + Value: workflowv1alpha1.AnyStringPtr(nodeName), + }, + { + Name: "framework", + Value: workflowv1alpha1.AnyStringPtr(mapping.ValidationTests.Framework), + }, + { + Name: "recipe", + Value: workflowv1alpha1.AnyStringPtr(mapping.ValidationTests.Recipe), + }, + { + Name: "iterations", + Value: workflowv1alpha1.AnyStringPtr(mapping.ValidationTests.Iterations), + }, + { + Name: "stopOnFailure", + Value: workflowv1alpha1.AnyStringPtr(mapping.ValidationTests.StopOnFailure), + }, + { + Name: "timeoutSeconds", + Value: workflowv1alpha1.AnyStringPtr(mapping.ValidationTests.TimeoutSeconds), + }, + { + Name: "testRunnerImage", + Value: workflowv1alpha1.AnyStringPtr(TestRunnerImage), + }, + { + Name: "testRunnerServiceAccount", + Value: workflowv1alpha1.AnyStringPtr(TestRunnerServiceAccount), + }, + { + Name: "namespace", + Value: workflowv1alpha1.AnyStringPtr(devConfig.Namespace), + }, + }, + } + + return wf + +} + +func (h *remediationMgrHelper) createWorkflow(ctx context.Context, workflow *workflowv1alpha1.Workflow) error { + if err := h.client.Create(ctx, workflow); err != nil { + return err + } + return nil +} + +func (h *remediationMgrHelper) deleteWorkflow(ctx context.Context, workflow *workflowv1alpha1.Workflow) error { + if err := h.client.Delete(ctx, workflow); err != nil { + return err + } + return nil +} + +func (h *remediationMgrHelper) getWorkflowTemplate(ctx context.Context, workflowTemplateName, namespace string) (*workflowv1alpha1.WorkflowTemplate, error) { + wfTemplate := &workflowv1alpha1.WorkflowTemplate{} + err := h.client.Get(ctx, client.ObjectKey{ + Name: workflowTemplateName, + Namespace: namespace, + }, wfTemplate) + if err != nil { + return nil, err + } + return wfTemplate, nil +} diff --git a/internal/kmmmodule/kmmmodule.go b/internal/kmmmodule/kmmmodule.go index f318acab..2a77f195 100644 --- a/internal/kmmmodule/kmmmodule.go +++ b/internal/kmmmodule/kmmmodule.go @@ -483,6 +483,11 @@ func setKMMModuleLoader(ctx context.Context, mod *kmmv1beta1.Module, devConfig * Value: "up", Operator: v1.TolerationOpEqual, }, + v1.Toleration{ + Key: "amd-gpu-unhealthy", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, ) return nil } diff --git a/internal/kmmmodule/kmmmodule_test.go b/internal/kmmmodule/kmmmodule_test.go index 54baa866..277821b8 100644 --- a/internal/kmmmodule/kmmmodule_test.go +++ b/internal/kmmmodule/kmmmodule_test.go @@ -132,6 +132,11 @@ var _ = Describe("setKMMModuleLoader", func() { Value: "up", Operator: v1.TolerationOpEqual, }, + { + Key: "amd-gpu-unhealthy", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, } err = setKMMModuleLoader(context.TODO(), &mod, &input, false, testNodeList) @@ -201,6 +206,11 @@ var _ = Describe("setKMMModuleLoader", func() { Value: "up", Operator: v1.TolerationOpEqual, }, + { + Key: "amd-gpu-unhealthy", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, } err = setKMMModuleLoader(context.TODO(), &mod, &input, false, testNodeList) diff --git a/internal/metricsexporter/metricsexporter.go b/internal/metricsexporter/metricsexporter.go index 74ca5332..5170e3ba 100644 --- a/internal/metricsexporter/metricsexporter.go +++ b/internal/metricsexporter/metricsexporter.go @@ -442,6 +442,15 @@ func (nl *metricsExporter) SetMetricsExporterAsDesired(ds *appsv1.DaemonSet, dev } else { ds.Spec.Template.Spec.Tolerations = nil } + // Add tolerations for the node unhealthy conditions + gpuUnhealthyTolerations := []v1.Toleration{ + { + Key: "amd-gpu-unhealthy", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + } + ds.Spec.Template.Spec.Tolerations = append(ds.Spec.Template.Spec.Tolerations, gpuUnhealthyTolerations...) return controllerutil.SetControllerReference(devConfig, ds, nl.scheme) } diff --git a/tests/e2e/cluster_test.go b/tests/e2e/cluster_test.go index c25255b5..cef16074 100644 --- a/tests/e2e/cluster_test.go +++ b/tests/e2e/cluster_test.go @@ -41,6 +41,7 @@ import ( "github.com/ROCm/gpu-operator/internal/conditions" "github.com/ROCm/gpu-operator/internal/kmmmodule" "github.com/ROCm/gpu-operator/tests/e2e/utils" + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/stretchr/testify/assert" . "gopkg.in/check.v1" @@ -54,7 +55,11 @@ import ( ) const ( - serviceMonitorCRDURL = "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml" + serviceMonitorCRDURL = "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml" + amdGpuResourceLabel = "amd.com/gpu" + resourceNamingStrategy = "resource_naming_strategy" + namingStrategySingle = "single" + namingStrategyMixed = "mixed" ) func (s *E2ESuite) getDeviceConfigForDCM(c *C) *v1alpha1.DeviceConfig { @@ -112,6 +117,7 @@ func (s *E2ESuite) getDeviceConfig(c *C) *v1alpha1.DeviceConfig { //SkipDrivers: true, MetricsExporter: v1alpha1.MetricsExporterSpec{ Enable: &metricsExporterEnable, + Image: exporterImage, NodePort: 32501, Port: 5001, }, @@ -123,7 +129,7 @@ func (s *E2ESuite) getDeviceConfig(c *C) *v1alpha1.DeviceConfig { devCfg.Spec.DevicePlugin.DevicePluginImage = devicePluginImage devCfg.Spec.DevicePlugin.NodeLabellerImage = nodeLabellerImage if s.simEnable { - devCfg.Spec.MetricsExporter.Image = exporterImage + devCfg.Spec.MetricsExporter.Image = exporterMockImage } if s.openshift { devCfg.Spec.Driver.Version = "6.1.1" @@ -132,6 +138,7 @@ func (s *E2ESuite) getDeviceConfig(c *C) *v1alpha1.DeviceConfig { } func (s *E2ESuite) createDeviceConfig(devCfg *v1alpha1.DeviceConfig, c *C) { + logger.Infof("Creating DeviceConfig %+v", devCfg) _, err := s.dClient.DeviceConfigs(s.ns).Create(devCfg) assert.NoError(c, err, "failed to create %v", s.cfgName) } @@ -286,7 +293,7 @@ func (s *E2ESuite) verifyDevicePluginStatus(ns string, c *C, devCfg *v1alpha1.De } logger.Infof(" Device Plugin Not found for deviceconfig %v", devCfg.Name) return false - }, 25*time.Minute, 5*time.Second) + }, 20*time.Minute, 5*time.Second) } func (s *E2ESuite) checkNodeLabellerStatus(ns string, c *C, devCfg *v1alpha1.DeviceConfig) { @@ -299,7 +306,7 @@ func (s *E2ESuite) checkNodeLabellerStatus(ns string, c *C, devCfg *v1alpha1.Dev logger.Infof(" node-labeller: %s status %+v", ds.Name, ds.Status) return ds.Status.NumberReady > 0 && ds.Status.NumberReady == ds.Status.DesiredNumberScheduled - }, 45*time.Minute, 5*time.Second) + }, 20*time.Minute, 5*time.Second) } func (s *E2ESuite) checkMetricsExporterStatus(devCfg *v1alpha1.DeviceConfig, ns string, serviceType v1.ServiceType, c *C) { @@ -326,7 +333,7 @@ func (s *E2ESuite) checkMetricsExporterStatus(devCfg *v1alpha1.DeviceConfig, ns } return ready - }, 45*time.Minute, 5*time.Second) + }, 20*time.Minute, 5*time.Second) } func (s *E2ESuite) checkDeviceConfigManagerStatus(devCfg *v1alpha1.DeviceConfig, ns string, c *C) { @@ -391,6 +398,42 @@ func (s *E2ESuite) patchMetricsExporterImage(devCfg *v1alpha1.DeviceConfig, c *C logger.Info(fmt.Sprintf("updated device config %+v", result)) } +func (s *E2ESuite) patchNodeCondition(c *C, nodeName, condType string, status v1.ConditionStatus) { + patch := fmt.Sprintf(`{"status":{"conditions":[{"type":"%s","status":"%s","reason":"e2e-test","message":"set by e2e test"}]}}`, condType, status) + _, err := s.clientSet.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.MergePatchType, []byte(patch), metav1.PatchOptions{}, "status") + c.Assert(err, IsNil, Commentf("failed to patch condition %s=%s for node %s", condType, status, nodeName)) +} + +func (s *E2ESuite) getWorkflowForNode(c *C, nodeName string) *wfv1.Workflow { + wfList, err := s.wfClient.ArgoprojV1alpha1().Workflows(s.ns).List(context.TODO(), metav1.ListOptions{}) + c.Assert(err, IsNil) + + for _, wf := range wfList.Items { + if strings.Contains(wf.Name, nodeName) { + return &wf + } + } + c.Fatalf("workflow for node %s not found", nodeName) + return nil +} + +func (s *E2ESuite) verifyWorkflowSucceeded(c *C, wf *wfv1.Workflow) { + assert.Eventually(c, func() bool { + updated, err := s.wfClient.ArgoprojV1alpha1().Workflows(wf.Namespace).Get(context.TODO(), wf.Name, metav1.GetOptions{}) + if err != nil { + logger.Errorf("failed to get workflow %s: %v", wf.Name, err) + return false + } + logger.Infof("workflow %s current phase: %s", wf.Name, updated.Status.Phase) + return updated.Status.Phase == wfv1.WorkflowSucceeded + }, 15*time.Minute, 10*time.Second) +} + +func (s *E2ESuite) deleteWorkflowForNode(c *C, wf *wfv1.Workflow) { + err := s.wfClient.ArgoprojV1alpha1().Workflows(wf.Namespace).Delete(context.TODO(), wf.Name, metav1.DeleteOptions{}) + c.Assert(err, IsNil, Commentf("failed to delete workflow %s", wf.Name)) +} + func (s *E2ESuite) isUpgradeInProgress(devCfg *v1alpha1.DeviceConfig) bool { // Define the blocked states that indicate an upgrade is in progress blockedStates := map[v1alpha1.UpgradeState]bool{ @@ -456,10 +499,10 @@ func (s *E2ESuite) verifyDeviceConfigStatus(devCfg *v1alpha1.DeviceConfig, c *C) devCfg.Status.Drivers.DesiredNumber == devCfg.Status.Drivers.AvailableNumber && devCfg.Status.DevicePlugin.NodesMatchingSelectorNumber == devCfg.Status.DevicePlugin.AvailableNumber && devCfg.Status.DevicePlugin.DesiredNumber == devCfg.Status.DevicePlugin.AvailableNumber - }, 45*time.Minute, 5*time.Second) + }, 20*time.Minute, 5*time.Second) } -func (s *E2ESuite) verifyNodeGPULabel(devCfg *v1alpha1.DeviceConfig, c *C) { +func (s *E2ESuite) verifyNodeGPULabel(devCfg *v1alpha1.DeviceConfig, label string, c *C) { assert.Eventually(c, func() bool { nodes, err := s.clientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ LabelSelector: func() string { @@ -476,13 +519,13 @@ func (s *E2ESuite) verifyNodeGPULabel(devCfg *v1alpha1.DeviceConfig, c *C) { } for _, node := range nodes.Items { - if !utils.CheckGpuLabel(node.Status.Capacity) { + if !utils.CheckGpuLabel(node.Status.Capacity, label) { logger.Infof("gpu not found in %v, %v ", node.Name, node.Status.Capacity) return false } } for _, node := range nodes.Items { - if !utils.CheckGpuLabel(node.Status.Allocatable) { + if !utils.CheckGpuLabel(node.Status.Allocatable, label) { logger.Infof("allocatable gpu not found in %v, %v ", node.Name, node.Status.Allocatable) return false } @@ -551,6 +594,9 @@ func (s *E2ESuite) verifyNodeDriverVersionLabel(devCfg *v1alpha1.DeviceConfig, c allMatched := true for _, node := range nodes.Items { versionLabelKey, versionLabelValue := kmmmodule.GetVersionLabelKV(devCfg) + if versionLabelValue == "" { + versionLabelValue = s.defaultDriverVersion + } if ver, ok := node.Labels[versionLabelKey]; !ok { logger.Errorf("failed to find driver version label %+v on node %+v", versionLabelKey, node.Name) allMatched = false @@ -613,6 +659,7 @@ func (s *E2ESuite) updateNodeDriverVersionLabel(devCfg *v1alpha1.DeviceConfig, c func (s *E2ESuite) verifyROCMPOD(driverInstalled bool, c *C) { pods, err := utils.ListRocmPods(context.TODO(), s.clientSet) assert.NoError(c, err, "failed to deploy pods") + logger.Infof("rocm pods %v", pods) for _, p := range pods { if driverInstalled { v, err := utils.GetRocmInfo(p) @@ -668,6 +715,14 @@ func (s *E2ESuite) TestBasicSkipDriverInstall(c *C) { logger.Infof("create %v", s.cfgName) s.createDeviceConfig(devCfg, c) s.verifyDevicePluginStatus(s.ns, c, devCfg) + // delete + s.deleteDeviceConfig(devCfg, c) + + if !s.simEnable { + nodes := utils.GetAMDGpuWorker(s.clientSet, s.openshift) + err := utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) + assert.NoError(c, err, "failed to reboot nodes") + } } func (s *E2ESuite) TestDeployment(c *C) { @@ -682,7 +737,7 @@ func (s *E2ESuite) TestDeployment(c *C) { s.checkMetricsExporterStatus(devCfg, s.ns, v1.ServiceTypeClusterIP, c) s.verifyDeviceConfigStatus(devCfg, c) if !s.simEnable { - s.verifyNodeGPULabel(devCfg, c) + s.verifyNodeGPULabel(devCfg, amdGpuResourceLabel, c) } if !s.simEnable { @@ -723,7 +778,7 @@ func (s *E2ESuite) TestDriverUpgradeByUpdatingCR(c *C) { s.checkNodeLabellerStatus(s.ns, c, devCfg) s.verifyDeviceConfigStatus(devCfg, c) if !s.simEnable { - s.verifyNodeGPULabel(devCfg, c) + s.verifyNodeGPULabel(devCfg, amdGpuResourceLabel, c) } s.verifyNodeDriverVersionLabel(devCfg, c) if !s.simEnable { @@ -736,7 +791,7 @@ func (s *E2ESuite) TestDriverUpgradeByUpdatingCR(c *C) { // upgrade // update the CR's driver version config - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" s.patchDriversVersion(devCfg, c) // update the node resources version labels s.updateNodeDriverVersionLabel(devCfg, c) @@ -781,7 +836,7 @@ func (s *E2ESuite) TestDriverUpgradeByPushingNewCR(c *C) { s.checkNodeLabellerStatus(s.ns, c, devCfg) s.verifyDeviceConfigStatus(devCfg, c) if !s.simEnable { - s.verifyNodeGPULabel(devCfg, c) + s.verifyNodeGPULabel(devCfg, amdGpuResourceLabel, c) s.verifyNodeDriverVersionLabel(devCfg, c) } @@ -800,13 +855,13 @@ func (s *E2ESuite) TestDriverUpgradeByPushingNewCR(c *C) { s.deleteDeviceConfig(devCfg, c) } // upgrade by pushing new CR with new version - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" s.createDeviceConfig(devCfg, c) s.checkNFDWorkerStatus(s.ns, c, "") s.checkNodeLabellerStatus(s.ns, c, devCfg) s.verifyDeviceConfigStatus(devCfg, c) if !s.simEnable { - s.verifyNodeGPULabel(devCfg, c) + s.verifyNodeGPULabel(devCfg, amdGpuResourceLabel, c) s.verifyNodeDriverVersionLabel(devCfg, c) err = utils.DeployRocmPods(context.TODO(), s.clientSet, nil) assert.NoError(c, err, "failed to deploy pods") @@ -971,6 +1026,7 @@ func (s *E2ESuite) TestDeploymentWithPreInstalledKMMAndNFD(c *C) { if s.simEnable { c.Skip("Skipping for non amd gpu testbed") } + c.Skip("Skipping for non amd gpu testbed") var deployCommand, undeployCommand, deployWithoutNFDKMMCommand string var nfdInstallCommands, nfdUnInstallCommands []string var kmmInstallCommand, kmmUnInstallCommand string @@ -1230,6 +1286,12 @@ func (s *E2ESuite) TestEnableBlacklist(c *C) { s.checkNFDWorkerStatus(s.ns, c, "") s.checkNodeLabellerStatus(s.ns, c, devCfg) s.verifyDeviceConfigStatus(devCfg, c) + + // delete + s.deleteDeviceConfig(devCfg, c) + nodes := utils.GetAMDGpuWorker(s.clientSet, s.openshift) + err := utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) + assert.NoError(c, err, "failed to reboot nodes") } func (s *E2ESuite) TestWorkloadRequestedGPUs(c *C) { @@ -1247,7 +1309,7 @@ func (s *E2ESuite) TestWorkloadRequestedGPUs(c *C) { s.checkNFDWorkerStatus(s.ns, c, "") s.checkNodeLabellerStatus(s.ns, c, devCfg) s.verifyDeviceConfigStatus(devCfg, c) - s.verifyNodeGPULabel(devCfg, c) + s.verifyNodeGPULabel(devCfg, amdGpuResourceLabel, c) ret, err := utils.GetAMDGPUCount(ctx, s.clientSet, "gpu") if err != nil { @@ -1266,10 +1328,10 @@ func (s *E2ESuite) TestWorkloadRequestedGPUs(c *C) { res := &v1.ResourceRequirements{ Limits: v1.ResourceList{ - "amd.com/gpu": resource.MustParse(fmt.Sprintf("%d", gpuLimitCount)), + amdGpuResourceLabel: resource.MustParse(fmt.Sprintf("%d", gpuLimitCount)), }, Requests: v1.ResourceList{ - "amd.com/gpu": resource.MustParse(fmt.Sprintf("%d", gpuReqCount)), + amdGpuResourceLabel: resource.MustParse(fmt.Sprintf("%d", gpuReqCount)), }, } @@ -1332,7 +1394,7 @@ func (s *E2ESuite) TestWorkloadRequestedGPUsHomogeneousSingle(c *C) { s.checkNFDWorkerStatus(s.ns, c, "") s.checkNodeLabellerStatus(s.ns, c, devCfg) s.verifyDeviceConfigStatus(devCfg, c) - s.verifyNodeGPULabel(devCfg, c) + s.verifyNodeGPULabel(devCfg, amdGpuResourceLabel, c) ret, err := utils.GetAMDGPUCount(ctx, s.clientSet, "gpu") if err != nil { @@ -1351,10 +1413,10 @@ func (s *E2ESuite) TestWorkloadRequestedGPUsHomogeneousSingle(c *C) { res := &v1.ResourceRequirements{ Limits: v1.ResourceList{ - "amd.com/gpu": resource.MustParse(fmt.Sprintf("%d", gpuLimitCount)), + amdGpuResourceLabel: resource.MustParse(fmt.Sprintf("%d", gpuLimitCount)), }, Requests: v1.ResourceList{ - "amd.com/gpu": resource.MustParse(fmt.Sprintf("%d", gpuReqCount)), + amdGpuResourceLabel: resource.MustParse(fmt.Sprintf("%d", gpuReqCount)), }, } @@ -1406,7 +1468,7 @@ func (s *E2ESuite) TestWorkloadRequestedGPUsHomogeneousMixed(c *C) { devCfg := s.getDeviceConfig(c) driverEnable := false devCfg.Spec.Driver.Enable = &driverEnable - devCfg.Spec.DevicePlugin.DevicePluginArguments = map[string]string{"resource_naming_strategy": "mixed"} + devCfg.Spec.DevicePlugin.DevicePluginArguments = map[string]string{resourceNamingStrategy: namingStrategyMixed} s.createDeviceConfig(devCfg, c) s.checkNFDWorkerStatus(s.ns, c, "") s.checkNodeLabellerStatus(s.ns, c, devCfg) @@ -1486,7 +1548,7 @@ func (s *E2ESuite) TestWorkloadRequestedGPUsHeterogeneousMixed(c *C) { devCfg := s.getDeviceConfig(c) driverEnable := false devCfg.Spec.Driver.Enable = &driverEnable - devCfg.Spec.DevicePlugin.DevicePluginArguments = map[string]string{"resource_naming_strategy": "mixed"} + devCfg.Spec.DevicePlugin.DevicePluginArguments = map[string]string{resourceNamingStrategy: namingStrategyMixed} s.createDeviceConfig(devCfg, c) s.checkNFDWorkerStatus(s.ns, c, "") s.checkNodeLabellerStatus(s.ns, c, devCfg) @@ -1572,6 +1634,9 @@ func (s *E2ESuite) TestNodeLabellerPartitionLabelsAbsent(c *C) { } func (s *E2ESuite) TestKubeRbacProxyClusterIP(c *C) { + if !s.simEnable { + c.Skip("Skipping for amd gpu testbed") + } _, err := s.dClient.DeviceConfigs(s.ns).Get("deviceconfig-kuberbac-clusterip", metav1.GetOptions{}) assert.Errorf(c, err, "config deviceconfig-kuberbac-clusterip exists") @@ -1601,7 +1666,7 @@ func (s *E2ESuite) TestKubeRbacProxyClusterIP(c *C) { Enable: &enableExporter, SvcType: "ClusterIP", Port: 5000, - Image: exporterImage, + Image: exporterMockImage, RbacConfig: v1alpha1.KubeRbacConfig{ Enable: &enableKubeRbacProxy, DisableHttps: &disableHTTPs, @@ -1666,7 +1731,7 @@ func (s *E2ESuite) TestKubeRbacProxyNodePort(c *C) { SvcType: "NodePort", Port: 5000, NodePort: 31000, - Image: exporterImage, + Image: exporterMockImage, RbacConfig: v1alpha1.KubeRbacConfig{ Enable: &enableKubeRbacProxy, DisableHttps: &disableHTTPs, @@ -1790,7 +1855,7 @@ func (s *E2ESuite) TestKubeRbacProxyNodePortCerts(c *C) { SvcType: "NodePort", Port: 5000, NodePort: 31000, - Image: exporterImage, + Image: exporterMockImage, RbacConfig: v1alpha1.KubeRbacConfig{ Enable: &enableKubeRbacProxy, DisableHttps: &disableHTTPs, @@ -1882,14 +1947,10 @@ func (s *E2ESuite) TestKubeRbacProxyNodePortMTLS(c *C) { // Client CA ConfigMap cmName := "client-ca-cm" - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: s.ns}, - Data: map[string]string{"ca.crt": string(caCert)}, - } - _, err = s.clientSet.CoreV1().ConfigMaps(s.ns).Create(context.TODO(), cm, metav1.CreateOptions{}) + err = utils.CreateConfigMap(context.TODO(), s.clientSet, s.ns, cmName, map[string]string{"ca.crt": string(caCert)}) assert.NoError(c, err) defer func() { - if errDel := s.clientSet.CoreV1().ConfigMaps(s.ns).Delete(context.TODO(), cmName, metav1.DeleteOptions{}); errDel != nil { + if errDel := utils.DeleteConfigMap(context.TODO(), s.clientSet, cmName, s.ns); errDel != nil { logger.Errorf("failed to delete ConfigMap %s: %+v", cmName, errDel) } }() @@ -1968,7 +2029,7 @@ func (s *E2ESuite) TestKubeRbacProxyNodePortMTLS(c *C) { SvcType: "NodePort", Port: 5000, NodePort: 31000, - Image: exporterImage, + Image: exporterMockImage, RbacConfig: v1alpha1.KubeRbacConfig{ Enable: &enableKubeRbacProxy, DisableHttps: &disableHTTPs, @@ -2019,11 +2080,10 @@ func (s *E2ESuite) TestKubeRbacProxyNodePortMTLSWithStaticAuth(c *C) { }() cmName := "client-ca-cm" - cm := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: s.ns}, Data: map[string]string{"ca.crt": string(caPEM)}} - _, err = s.clientSet.CoreV1().ConfigMaps(s.ns).Create(context.TODO(), cm, metav1.CreateOptions{}) + err = utils.CreateConfigMap(context.TODO(), s.clientSet, s.ns, cmName, map[string]string{"ca.crt": string(caPEM)}) assert.NoError(c, err) defer func() { - if errDel := s.clientSet.CoreV1().ConfigMaps(s.ns).Delete(context.TODO(), cmName, metav1.DeleteOptions{}); errDel != nil { + if errDel := utils.DeleteConfigMap(context.TODO(), s.clientSet, cmName, s.ns); errDel != nil { logger.Errorf("failed to delete ConfigMap %s: %+v", cmName, errDel) } }() @@ -2084,7 +2144,7 @@ func (s *E2ESuite) TestKubeRbacProxyNodePortMTLSWithStaticAuth(c *C) { SvcType: "NodePort", Port: 5000, NodePort: 31000, - Image: exporterImage, + Image: exporterMockImage, RbacConfig: v1alpha1.KubeRbacConfig{ Enable: &enableKubeRbacProxy, DisableHttps: &disableHTTPs, @@ -2156,7 +2216,7 @@ func (s *E2ESuite) TestServiceMonitorCreation(c *C) { SvcType: "NodePort", Port: 5000, NodePort: 31000, - Image: exporterImage, + Image: exporterMockImage, RbacConfig: v1alpha1.KubeRbacConfig{ Enable: &enableKubeRbacProxy, DisableHttps: &disableHTTPs, @@ -2237,7 +2297,7 @@ func (s *E2ESuite) TestServiceMonitorCRDFlow(c *C) { SvcType: "NodePort", Port: 5000, NodePort: 31000, - Image: exporterImage, + Image: exporterMockImage, RbacConfig: v1alpha1.KubeRbacConfig{ Enable: &enableKubeRbacProxy, DisableHttps: &disableHTTPs, @@ -2310,7 +2370,7 @@ func (s *E2ESuite) TestDeployDefaultDriver(c *C) { s.checkNFDWorkerStatus(s.ns, c, "") s.checkNodeLabellerStatus(s.ns, c, devCfg) s.verifyDeviceConfigStatus(devCfg, c) - s.verifyNodeGPULabel(devCfg, c) + s.verifyNodeGPULabel(devCfg, amdGpuResourceLabel, c) err = utils.DeployRocmPods(context.TODO(), s.clientSet, nil) assert.NoError(c, err, "failed to deploy pods") @@ -2342,7 +2402,7 @@ func (s *E2ESuite) TestDifferentCRsForDifferentNodes(c *C) { } // Deploying Different CR's for worker nodes using unique node selector with different Image Versions - driverVersions := []string{"6.1.3", "6.2.2"} + driverVersions := []string{"6.3.1", "6.3.3"} devCfgs := []*v1alpha1.DeviceConfig{} for i, nodeName := range nodeNames { cfgName := nodeName @@ -2417,7 +2477,7 @@ func (s *E2ESuite) TestMaxParallelUpgradePolicyDefaults(c *C) { if s.openshift { devCfg.Spec.Driver.Version = "el9-6.1.1b" } else { - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" } nodes := utils.GetAMDGpuWorker(s.clientSet, s.openshift) s.patchDriversVersion(devCfg, c) @@ -2425,8 +2485,6 @@ func (s *E2ESuite) TestMaxParallelUpgradePolicyDefaults(c *C) { s.verifyDeviceConfigStatus(devCfg, c) if !s.simEnable { - err = utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) - assert.NoError(c, err, "failed to reboot nodes") s.verifyNodeDriverVersionLabel(devCfg, c) err = utils.DeployRocmPods(context.TODO(), s.clientSet, nil) assert.NoError(c, err, "failed to deploy pods") @@ -2474,7 +2532,7 @@ func (s *E2ESuite) TestMaxParallelUpgradeTwoNodes(c *C) { if s.openshift { devCfg.Spec.Driver.Version = "el9-6.1.1b" } else { - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" } nodes := utils.GetAMDGpuWorker(s.clientSet, s.openshift) s.patchDriversVersion(devCfg, c) @@ -2483,8 +2541,6 @@ func (s *E2ESuite) TestMaxParallelUpgradeTwoNodes(c *C) { // Verify rocm pod deployment only for real amd gpu setup if !s.simEnable { - err = utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) - assert.NoError(c, err, "failed to reboot nodes") s.verifyNodeDriverVersionLabel(devCfg, c) err = utils.DeployRocmPods(context.TODO(), s.clientSet, nil) assert.NoError(c, err, "failed to deploy pods") @@ -2539,7 +2595,7 @@ func (s *E2ESuite) TestMaxParallelUpgradeWithDrainPolicy(c *C) { if s.openshift { devCfg.Spec.Driver.Version = "el9-6.1.1b" } else { - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" } nodes := utils.GetAMDGpuWorker(s.clientSet, s.openshift) s.patchDriversVersion(devCfg, c) @@ -2548,8 +2604,6 @@ func (s *E2ESuite) TestMaxParallelUpgradeWithDrainPolicy(c *C) { // Verify rocm pod deployment only for real amd gpu setup if !s.simEnable { - err = utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) - assert.NoError(c, err, "failed to reboot nodes") s.verifyNodeDriverVersionLabel(devCfg, c) err = utils.DeployRocmPods(context.TODO(), s.clientSet, nil) assert.NoError(c, err, "failed to deploy pods") @@ -2604,7 +2658,7 @@ func (s *E2ESuite) TestMaxParallelUpgradeWithPodDeletionPolicy(c *C) { if s.openshift { devCfg.Spec.Driver.Version = "el9-6.1.1b" } else { - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" } nodes := utils.GetAMDGpuWorker(s.clientSet, s.openshift) s.patchDriversVersion(devCfg, c) @@ -2613,8 +2667,6 @@ func (s *E2ESuite) TestMaxParallelUpgradeWithPodDeletionPolicy(c *C) { // Verify rocm pod deployment only for real amd gpu setup if !s.simEnable { - err = utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) - assert.NoError(c, err, "failed to reboot nodes") s.verifyNodeDriverVersionLabel(devCfg, c) err = utils.DeployRocmPods(context.TODO(), s.clientSet, nil) assert.NoError(c, err, "failed to deploy pods") @@ -2652,7 +2704,7 @@ func (s *E2ESuite) TestMaxParallelUpgradeBackToDefaultVersion(c *C) { MaxUnavailableNodes: intstr.FromString("100%"), } devCfg.Spec.Driver.UpgradePolicy = &upgradePolicy - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" s.createDeviceConfig(devCfg, c) s.checkNFDWorkerStatus(s.ns, c, "") s.checkNodeLabellerStatus(s.ns, c, devCfg) @@ -2669,8 +2721,6 @@ func (s *E2ESuite) TestMaxParallelUpgradeBackToDefaultVersion(c *C) { // Verify rocm pod deployment only for real amd gpu setup if !s.simEnable { - err = utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) - assert.NoError(c, err, "failed to reboot nodes") s.verifyNodeDriverVersionLabel(devCfg, c) err = utils.DeployRocmPods(context.TODO(), s.clientSet, nil) assert.NoError(c, err, "failed to deploy pods") @@ -2717,7 +2767,7 @@ func (s *E2ESuite) TestMaxParallelUpgradeFromDefaultVersion(c *C) { // upgrade // update the CR's driver version config - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" nodes := utils.GetAMDGpuWorker(s.clientSet, s.openshift) s.patchDriversVersion(devCfg, c) s.verifyNodeModuleStatus(devCfg, v1alpha1.UpgradeStateComplete, c) @@ -2725,8 +2775,6 @@ func (s *E2ESuite) TestMaxParallelUpgradeFromDefaultVersion(c *C) { // Verify rocm pod deployment only for real amd gpu setup if !s.simEnable { - err = utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) - assert.NoError(c, err, "failed to reboot nodes") s.verifyNodeDriverVersionLabel(devCfg, c) err = utils.DeployRocmPods(context.TODO(), s.clientSet, nil) assert.NoError(c, err, "failed to deploy pods") @@ -2773,7 +2821,7 @@ func (s *E2ESuite) TestMaxParallelChangeDuringUpgrade(c *C) { // update // update the CR's driver version config - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" s.patchDriversVersion(devCfg, c) // update upgradePolicy maxParallel upgradePolicy = v1alpha1.DriverUpgradePolicySpec{ @@ -2833,7 +2881,7 @@ func (s *E2ESuite) TestMaxUnavailableChangeDuringUpgrade(c *C) { // update // update the CR's driver version config - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" s.patchDriversVersion(devCfg, c) // update upgradePolicy maxUnavailable @@ -2853,8 +2901,6 @@ func (s *E2ESuite) TestMaxUnavailableChangeDuringUpgrade(c *C) { s.verifyDeviceConfigStatus(devCfg, c) if !s.simEnable { - err = utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) - assert.NoError(c, err, "failed to reboot nodes") s.verifyNodeDriverVersionLabel(devCfg, c) } @@ -2895,7 +2941,7 @@ func (s *E2ESuite) TestRebootRequiredChangeDuringUpgrade(c *C) { // update // update the CR's driver version config - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" s.patchDriversVersion(devCfg, c) // update upgradePolicy rebootRequired @@ -2914,9 +2960,6 @@ func (s *E2ESuite) TestRebootRequiredChangeDuringUpgrade(c *C) { s.checkNodeLabellerStatus(s.ns, c, devCfg) s.verifyNodeModuleStatus(devCfg, v1alpha1.UpgradeStateComplete, c) s.verifyDeviceConfigStatus(devCfg, c) - - err = utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) - assert.NoError(c, err, "failed to reboot nodes") s.verifyNodeDriverVersionLabel(devCfg, c) // delete @@ -2982,7 +3025,7 @@ func (s *E2ESuite) TestMetricsExporterDaemonSetUpgrade(c *C) { // upgrade // update the CR's device plugin with image - devCfg.Spec.MetricsExporter.Image = exporterImage2 + devCfg.Spec.MetricsExporter.Image = exporterMockImage2 s.patchMetricsExporterImage(devCfg, c) s.verifyDeviceConfigStatus(devCfg, c) s.checkMetricsExporterStatus(devCfg, s.ns, v1.ServiceTypeClusterIP, c) @@ -2993,8 +3036,8 @@ func (s *E2ESuite) TestMetricsExporterDaemonSetUpgrade(c *C) { } func (s *E2ESuite) TestKMMOperatorUpgrade(c *C) { - if s.openshift { - c.Skip("Skipping for openshift testbed") + if s.openshift || !s.simEnable { + c.Skip("Skipping for openshift testbed/non amd gpu testbed") } _, err := s.dClient.DeviceConfigs(s.ns).Get(s.cfgName, metav1.GetOptions{}) assert.Errorf(c, err, fmt.Sprintf("config %v exists", s.cfgName)) @@ -3049,8 +3092,9 @@ func (s *E2ESuite) TestPreUpgradeHookFailure(c *C) { rebootRequired = true } upgradePolicy := v1alpha1.DriverUpgradePolicySpec{ - Enable: &enable, - RebootRequired: &rebootRequired, + Enable: &enable, + RebootRequired: &rebootRequired, + MaxUnavailableNodes: intstr.FromString("100%"), } devCfg.Spec.Driver.UpgradePolicy = &upgradePolicy s.createDeviceConfig(devCfg, c) @@ -3062,7 +3106,7 @@ func (s *E2ESuite) TestPreUpgradeHookFailure(c *C) { if s.openshift { devCfg.Spec.Driver.Version = "el9-6.1.1b" } else { - devCfg.Spec.Driver.Version = "6.2.2" + devCfg.Spec.Driver.Version = "6.3.2" } nodes := utils.GetAMDGpuWorker(s.clientSet, s.openshift) @@ -3107,7 +3151,96 @@ func (s *E2ESuite) TestPreUpgradeHookFailure(c *C) { s.verifyROCMPOD(false, c) err = utils.DelRocmPods(context.TODO(), s.clientSet) assert.NoError(c, err, "failed to remove rocm pods") - err = utils.RebootNodesWithWait(context.TODO(), s.clientSet, nodes) + err = utils.HandleNodesReboot(context.TODO(), s.clientSet, nodes) assert.NoError(c, err, "failed to reboot nodes") } } + +func (s *E2ESuite) TestRemediationWorkflow(c *C) { + + _, err := s.dClient.DeviceConfigs(s.ns).Get(s.cfgName, metav1.GetOptions{}) + assert.Errorf(c, err, fmt.Sprintf("config %v exists", s.cfgName)) + + logger.Infof("create %v", s.cfgName) + devCfg := s.getDeviceConfig(c) + remediationEnable := true + devCfg.Spec.RemediationWorkflow.Enable = &remediationEnable + s.createDeviceConfig(devCfg, c) + s.verifyDeviceConfigStatus(devCfg, c) + + // Patch the default template to avoid rebooting for kind cluster in CI run. Still tests triggering of workflow on basis of node condition and configmap + if s.ciEnv { + template, err := s.wfClient.ArgoprojV1alpha1().WorkflowTemplates(s.ns).Get(context.TODO(), "default-template", metav1.GetOptions{}) + assert.NoError(c, err) + + template.Spec.Templates[0].Steps = []wfv1.ParallelSteps{ + {Steps: []wfv1.WorkflowStep{{Name: "taint", Template: "taint"}}}, + {Steps: []wfv1.WorkflowStep{{Name: "suspend", Template: "suspend"}}}, + {Steps: []wfv1.WorkflowStep{{Name: "drain", Template: "drain"}}}, + {Steps: []wfv1.WorkflowStep{{Name: "wait", Template: "wait"}}}, + {Steps: []wfv1.WorkflowStep{{Name: "untaint", Template: "untaint"}}}, + } + + _, err = s.wfClient.ArgoprojV1alpha1().WorkflowTemplates(s.ns).Update(context.TODO(), template, metav1.UpdateOptions{}) + assert.NoError(c, err) + } + + var nodes []v1.Node + if s.simEnable { + nodes = utils.GetNonAMDGpuWorker(s.clientSet) + } else { + nodes = utils.GetAMDGpuWorker(s.clientSet, s.openshift) + } + + if len(nodes) == 0 { + c.Fatalf("No nodes found for remediation") + } + + node := nodes[0] + nodeName := node.Name + + defer func() { + nodeObj, err := s.clientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + if err != nil { + logger.Errorf("Failed to fetch node %s for untainting: %v", nodeName, err) + return + } + + var newTaints []v1.Taint + for _, taint := range nodeObj.Spec.Taints { + if taint.Key != "amd-gpu-unhealthy" { + newTaints = append(newTaints, taint) + } + } + nodeObj.Spec.Taints = newTaints + + _, err = s.clientSet.CoreV1().Nodes().Update(context.TODO(), nodeObj, metav1.UpdateOptions{}) + if err != nil { + logger.Errorf("Failed to remove taint from node %s: %v", nodeName, err) + } else { + logger.Infof("Removed amd-gpu-unhealthy taint from node %s", nodeName) + } + }() + + // Patch node condition to True + s.patchNodeCondition(c, nodeName, "AMDGPUUnhealthy", v1.ConditionTrue) + logger.Info(fmt.Sprintf("Node condition AMDGPUUnhealthy hit on %+v", nodeName)) + + // Wait for the workflow to be triggered + logger.Info("Waiting for workflow to be triggered") + time.Sleep(60 * time.Second) + + // Patch node condition to False (simulate remediation completed) + s.patchNodeCondition(c, nodeName, "AMDGPUUnhealthy", v1.ConditionFalse) + + // Get and verify workflow + wf := s.getWorkflowForNode(c, nodeName) + s.verifyWorkflowSucceeded(c, wf) + + wf = s.getWorkflowForNode(c, nodeName) + logger.Infof("Workflow for node %s: %+v", nodeName, wf) + + // Delete workflow + s.deleteWorkflowForNode(c, wf) + +} diff --git a/tests/e2e/doc.go b/tests/e2e/doc.go index 0bf9ca35..0a1c5ff8 100644 --- a/tests/e2e/doc.go +++ b/tests/e2e/doc.go @@ -18,6 +18,7 @@ package e2e import ( "github.com/ROCm/gpu-operator/tests/e2e/client" + workflowclient "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" monitoringClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" apiextClient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/kubernetes" @@ -27,6 +28,7 @@ import ( type E2ESuite struct { clientSet *kubernetes.Clientset dClient *client.DeviceConfigClient + wfClient workflowclient.Interface cfgName string registry string helmChart string diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 987890e0..e52729a6 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -31,6 +31,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/ROCm/gpu-operator/tests/e2e/client" + workflowclient "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" monitoringClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" "github.com/sirupsen/logrus" . "gopkg.in/check.v1" @@ -123,6 +124,12 @@ func (s *E2ESuite) SetUpSuite(c *C) { } s.monClient = monClient + wfClient, err := workflowclient.NewForConfig(config) + if err != nil { + c.Fatalf("Failed to create workflow client: %v", err) + } + s.wfClient = wfClient + s.clusterType = utils.GetClusterType(config) if s.openshift == false { diff --git a/vendor/github.com/argoproj/argo-workflows/v3/LICENSE b/vendor/github.com/argoproj/argo-workflows/v3/LICENSE new file mode 100644 index 00000000..67e99b06 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017-2018 The Argo Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/argoproj/argo-workflows/v3/errors/errors.go b/vendor/github.com/argoproj/argo-workflows/v3/errors/errors.go new file mode 100644 index 00000000..35777b57 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/errors/errors.go @@ -0,0 +1,168 @@ +package errors + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" +) + +// Externally visible error codes +const ( + CodeUnauthorized = "ERR_UNAUTHORIZED" + CodeBadRequest = "ERR_BAD_REQUEST" + CodeForbidden = "ERR_FORBIDDEN" + CodeNotFound = "ERR_NOT_FOUND" + CodeNotImplemented = "ERR_NOT_IMPLEMENTED" + CodeTimeout = "ERR_TIMEOUT" + CodeInternal = "ERR_INTERNAL" +) + +// ArgoError is an error interface that additionally adds support for +// stack trace, error code, and a JSON representation of the error +type ArgoError interface { + Error() string + Code() string + HTTPCode() int + JSON() []byte +} + +// argoerr is the internal implementation of an Argo error which wraps the error from pkg/errors +type argoerr struct { + code string + message string + err error +} + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(code string, message string) error { + err := errors.New(message) + return argoerr{code, message, err} +} + +// Errorf returns an error and formats according to a format specifier +func Errorf(code string, format string, args ...interface{}) error { + return New(code, fmt.Sprintf(format, args...)) +} + +// InternalError is a convenience function to create a Internal error with a message +func InternalError(message string) error { + return New(CodeInternal, message) +} + +// InternalErrorf is a convenience function to format an Internal error +func InternalErrorf(format string, args ...interface{}) error { + return Errorf(CodeInternal, format, args...) +} + +// InternalWrapError annotates the error with the ERR_INTERNAL code and a stack trace, optional message +func InternalWrapError(err error, message ...string) error { + if len(message) == 0 { + return Wrap(err, CodeInternal, err.Error()) + } + return Wrap(err, CodeInternal, message[0]) +} + +// InternalWrapErrorf annotates the error with the ERR_INTERNAL code and a stack trace, optional message +func InternalWrapErrorf(err error, format string, args ...interface{}) error { + return Wrap(err, CodeInternal, fmt.Sprintf(format, args...)) +} + +// Wrap returns an error annotating err with a stack trace at the point Wrap is called, +// and a new supplied message. The previous original is preserved and accessible via Cause(). +// If err is nil, Wrap returns nil. +func Wrap(err error, code string, message string) error { + if err == nil { + return nil + } + err = fmt.Errorf(message+": %w", err) + return argoerr{code, message, err} +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + if argoErr, ok := err.(argoerr); ok { + return unwrapCauseArgoErr(argoErr.err) + } + return unwrapCause(err) +} + +func unwrapCauseArgoErr(err error) error { + innerErr := errors.Unwrap(err) + for innerErr != nil { + err = innerErr + innerErr = errors.Unwrap(err) + } + return err +} + +func unwrapCause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} + +func (e argoerr) Error() string { + return e.message +} + +func (e argoerr) Code() string { + return e.code +} + +func (e argoerr) JSON() []byte { + type errBean struct { + Code string `json:"code"` + Message string `json:"message"` + } + eb := errBean{e.code, e.message} + j, _ := json.Marshal(eb) + return j +} + +func (e argoerr) HTTPCode() int { + switch e.Code() { + case CodeUnauthorized: + return http.StatusUnauthorized + case CodeForbidden: + return http.StatusForbidden + case CodeNotFound: + return http.StatusNotFound + case CodeBadRequest: + return http.StatusBadRequest + case CodeNotImplemented: + return http.StatusNotImplemented + case CodeTimeout, CodeInternal: + return http.StatusInternalServerError + default: + return http.StatusInternalServerError + } +} + +// IsCode is a helper to determine if the error is of a specific code +func IsCode(code string, err error) bool { + if argoErr, ok := err.(argoerr); ok { + return argoErr.code == code + } + return false +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/common.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/common.go new file mode 100644 index 00000000..cc939aaa --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/common.go @@ -0,0 +1,53 @@ +package workflow + +import ( + "time" + + "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +type ClientConfig struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + // APIPath is a sub-path that points to an API root. + APIPath string + + // ContentConfig contains settings that affect how objects are transformed when + // sent to the server. + rest.ContentConfig + + // KubeService requires Basic authentication + Username string + Password string + + // KubeService requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // Impersonate is the configuration that RESTClient will use for impersonation. + Impersonate rest.ImpersonationConfig + + AuthProvider *clientcmdapi.AuthProviderConfig + + // TLSClientConfig contains settings to enable transport layer security + rest.TLSClientConfig + + // UserAgent is an optional field that specifies the caller of this request. + UserAgent string + + // QPS indicates the maximum QPS to the master from this client. + // If it's zero, the created RESTClient will use DefaultQPS: 5 + QPS float32 + + // Maximum burst for throttle. + // If it's zero, the created RESTClient will use DefaultBurst: 10. + Burst int + + // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. + Timeout time.Duration +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/register.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/register.go new file mode 100644 index 00000000..82f124aa --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/register.go @@ -0,0 +1,41 @@ +package workflow + +// Workflow constants +const ( + Group string = "argoproj.io" + Version string = "v1alpha1" + APIVersion string = Group + "/" + Version + WorkflowKind string = "Workflow" + WorkflowSingular string = "workflow" + WorkflowPlural string = "workflows" + WorkflowShortName string = "wf" + WorkflowFullName string = WorkflowPlural + "." + Group + WorkflowTemplateKind string = "WorkflowTemplate" + WorkflowTemplateSingular string = "workflowtemplate" + WorkflowTemplatePlural string = "workflowtemplates" + WorkflowTemplateShortName string = "wftmpl" + WorkflowTemplateFullName string = WorkflowTemplatePlural + "." + Group + WorkflowEventBindingPlural string = "workfloweventbindings" + CronWorkflowKind string = "CronWorkflow" + CronWorkflowSingular string = "cronworkflow" + CronWorkflowPlural string = "cronworkflows" + CronWorkflowShortName string = "cronwf" + CronWorkflowFullName string = CronWorkflowPlural + "." + Group + ClusterWorkflowTemplateKind string = "ClusterWorkflowTemplate" + ClusterWorkflowTemplateSingular string = "clusterworkflowtemplate" + ClusterWorkflowTemplatePlural string = "clusterworkflowtemplates" + ClusterWorkflowTemplateShortName string = "cwftmpl" + ClusterWorkflowTemplateFullName string = ClusterWorkflowTemplatePlural + "." + Group + WorkflowEventBindingKind string = "WorkflowEventBinding" + WorkflowTaskSetKind string = "WorkflowTaskSet" + WorkflowTaskSetSingular string = "workflowtaskset" + WorkflowTaskSetPlural string = "workflowtasksets" + WorkflowTaskSetShortName string = "wfts" + WorkflowTaskSetFullName string = WorkflowTaskSetPlural + "." + Group + WorkflowTaskResultKind string = "WorkflowTaskResult" + WorkflowArtifactGCTaskKind string = "WorkflowArtifactGCTask" + WorkflowArtifactGCTaskSingular string = "workflowartifactgctask" + WorkflowArtifactGCTaskPlural string = "workflowartifactgctasks" + WorkflowArtifactGCTaskShortName string = "wfat" + WorkflowArtifactGCTaskFullName string = WorkflowArtifactGCTaskPlural + "." + Group +) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/amount.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/amount.go new file mode 100644 index 00000000..7fb40985 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/amount.go @@ -0,0 +1,33 @@ +package v1alpha1 + +import ( + "encoding/json" + "strconv" +) + +// Amount represent a numeric amount. +// +kubebuilder:validation:Type=number +type Amount struct { + Value json.Number `json:"-" protobuf:"bytes,1,opt,name=value,casttype=encoding/json.Number"` +} + +func (a *Amount) UnmarshalJSON(data []byte) error { + a.Value = json.Number(data) + return nil +} + +func (a Amount) MarshalJSON() ([]byte, error) { + return []byte(a.Value), nil +} + +func (a Amount) OpenAPISchemaType() []string { + return []string{"number"} +} + +func (a Amount) OpenAPISchemaFormat() string { + return "" +} + +func (a *Amount) Float64() (float64, error) { + return strconv.ParseFloat(string(a.Value), 64) +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/anystring.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/anystring.go new file mode 100644 index 00000000..f3459f31 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/anystring.go @@ -0,0 +1,52 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "strconv" +) + +// * It's JSON type is just string. +// * It will unmarshall int64, int32, float64, float32, boolean, a plain string and represents it as string. +// * It will marshall back to string - marshalling is not symmetric. +type AnyString string + +func ParseAnyString(val interface{}) AnyString { + return AnyString(fmt.Sprintf("%v", val)) +} + +func AnyStringPtr(val interface{}) *AnyString { + i := ParseAnyString(val) + return &i +} + +func (i *AnyString) UnmarshalJSON(value []byte) error { + var v interface{} + err := json.Unmarshal(value, &v) + if err != nil { + return err + } + switch v := v.(type) { + case float64: + *i = AnyString(strconv.FormatFloat(v, 'f', -1, 64)) + case float32: + *i = AnyString(strconv.FormatFloat(float64(v), 'f', -1, 32)) + case int64: + *i = AnyString(strconv.FormatInt(v, 10)) + case int32: + *i = AnyString(strconv.FormatInt(int64(v), 10)) + case bool: + *i = AnyString(strconv.FormatBool(v)) + case string: + *i = AnyString(v) + } + return nil +} + +func (i AnyString) MarshalJSON() ([]byte, error) { + return json.Marshal(string(i)) +} + +func (i AnyString) String() string { + return string(i) +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_gc_task_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_gc_task_types.go new file mode 100644 index 00000000..dbc840d5 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_gc_task_types.go @@ -0,0 +1,63 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// WorkflowArtifactGCTask specifies the Artifacts that need to be deleted as well as the status of deletion +// +genclient +// +kubebuilder:resource:shortName=wfat +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status +type WorkflowArtifactGCTask struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Spec ArtifactGCSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + Status ArtifactGCStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ArtifactGCSpec specifies the Artifacts that need to be deleted +type ArtifactGCSpec struct { + // ArtifactsByNode maps Node name to information pertaining to Artifacts on that Node + ArtifactsByNode map[string]ArtifactNodeSpec `json:"artifactsByNode,omitempty" protobuf:"bytes,1,rep,name=artifactsByNode"` +} + +// ArtifactNodeSpec specifies the Artifacts that need to be deleted for a given Node +type ArtifactNodeSpec struct { + // ArchiveLocation is the template-level Artifact location specification + ArchiveLocation *ArtifactLocation `json:"archiveLocation,omitempty" protobuf:"bytes,1,opt,name=archiveLocation"` + // Artifacts maps artifact name to Artifact description + Artifacts map[string]Artifact `json:"artifacts,omitempty" protobuf:"bytes,2,rep,name=artifacts"` +} + +// ArtifactGCStatus describes the result of the deletion +type ArtifactGCStatus struct { + // ArtifactResultsByNode maps Node name to result + ArtifactResultsByNode map[string]ArtifactResultNodeStatus `json:"artifactResultsByNode,omitempty" protobuf:"bytes,1,rep,name=artifactResultsByNode"` +} + +// ArtifactResultNodeStatus describes the result of the deletion on a given node +type ArtifactResultNodeStatus struct { + // ArtifactResults maps Artifact name to result of the deletion + ArtifactResults map[string]ArtifactResult `json:"artifactResults,omitempty" protobuf:"bytes,1,rep,name=artifactResults"` +} + +// ArtifactResult describes the result of attempting to delete a given Artifact +type ArtifactResult struct { + // Name is the name of the Artifact + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Success describes whether the deletion succeeded + Success bool `json:"success,omitempty" protobuf:"varint,2,opt,name=success"` + + // Error is an optional error message which should be set if Success==false + Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} + +// WorkflowArtifactGCTaskList is list of WorkflowArtifactGCTask resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WorkflowArtifactGCTaskList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []WorkflowArtifactGCTask `json:"items" protobuf:"bytes,2,opt,name=items"` +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go new file mode 100644 index 00000000..8c2ae994 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go @@ -0,0 +1,181 @@ +package v1alpha1 + +import ( + "fmt" + "path" + "strings" +) + +var ( + // DefaultArchivePattern is the default pattern when storing artifacts in an archive repository + DefaultArchivePattern = "{{workflow.name}}/{{pod.name}}" +) + +// ArtifactRepository represents an artifact repository in which a controller will store its artifacts +type ArtifactRepository struct { + // ArchiveLogs enables log archiving + ArchiveLogs *bool `json:"archiveLogs,omitempty" protobuf:"varint,1,opt,name=archiveLogs"` + // S3 stores artifact in a S3-compliant object store + S3 *S3ArtifactRepository `json:"s3,omitempty" protobuf:"bytes,2,opt,name=s3"` + // Artifactory stores artifacts to JFrog Artifactory + Artifactory *ArtifactoryArtifactRepository `json:"artifactory,omitempty" protobuf:"bytes,3,opt,name=artifactory"` + // HDFS stores artifacts in HDFS + HDFS *HDFSArtifactRepository `json:"hdfs,omitempty" protobuf:"bytes,4,opt,name=hdfs"` + // OSS stores artifact in a OSS-compliant object store + OSS *OSSArtifactRepository `json:"oss,omitempty" protobuf:"bytes,5,opt,name=oss"` + // GCS stores artifact in a GCS object store + GCS *GCSArtifactRepository `json:"gcs,omitempty" protobuf:"bytes,6,opt,name=gcs"` + // Azure stores artifact in an Azure Storage account + Azure *AzureArtifactRepository `json:"azure,omitempty" protobuf:"bytes,7,opt,name=azure"` +} + +func (a *ArtifactRepository) IsArchiveLogs() bool { + return a != nil && a.ArchiveLogs != nil && *a.ArchiveLogs +} + +type ArtifactRepositoryType interface { + IntoArtifactLocation(l *ArtifactLocation) +} + +func (a *ArtifactRepository) Get() ArtifactRepositoryType { + if a == nil { + return nil + } else if a.Artifactory != nil { + return a.Artifactory + } else if a.Azure != nil { + return a.Azure + } else if a.GCS != nil { + return a.GCS + } else if a.HDFS != nil { + return a.HDFS + } else if a.OSS != nil { + return a.OSS + } else if a.S3 != nil { + return a.S3 + } + return nil +} + +// ToArtifactLocation returns the artifact location set with default template key: +// key = `{{workflow.name}}/{{pod.name}}` +func (a *ArtifactRepository) ToArtifactLocation() *ArtifactLocation { + if a == nil { + return nil + } + l := &ArtifactLocation{ArchiveLogs: a.ArchiveLogs} + v := a.Get() + if v != nil { + v.IntoArtifactLocation(l) + } + return l +} + +// S3ArtifactRepository defines the controller configuration for an S3 artifact repository +type S3ArtifactRepository struct { + S3Bucket `json:",inline" protobuf:"bytes,1,opt,name=s3Bucket"` + + // KeyFormat defines the format of how to store keys and can reference workflow variables. + KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,2,opt,name=keyFormat"` + + // KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts. + // DEPRECATED. Use KeyFormat instead + KeyPrefix string `json:"keyPrefix,omitempty" protobuf:"bytes,3,opt,name=keyPrefix"` +} + +func (r *S3ArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { + k := r.KeyFormat + if k == "" { + k = path.Join(r.KeyPrefix, DefaultArchivePattern) + } + l.S3 = &S3Artifact{S3Bucket: r.S3Bucket, Key: k} +} + +// OSSArtifactRepository defines the controller configuration for an OSS artifact repository +type OSSArtifactRepository struct { + OSSBucket `json:",inline" protobuf:"bytes,1,opt,name=oSSBucket"` + + // KeyFormat defines the format of how to store keys and can reference workflow variables. + KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,2,opt,name=keyFormat"` +} + +func (r *OSSArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { + k := r.KeyFormat + if k == "" { + k = DefaultArchivePattern + } + l.OSS = &OSSArtifact{OSSBucket: r.OSSBucket, Key: k} +} + +// GCSArtifactRepository defines the controller configuration for a GCS artifact repository +type GCSArtifactRepository struct { + GCSBucket `json:",inline" protobuf:"bytes,1,opt,name=gCSBucket"` + + // KeyFormat defines the format of how to store keys and can reference workflow variables. + KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,2,opt,name=keyFormat"` +} + +func (r *GCSArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { + k := r.KeyFormat + if k == "" { + k = DefaultArchivePattern + } + l.GCS = &GCSArtifact{GCSBucket: r.GCSBucket, Key: k} +} + +// ArtifactoryArtifactRepository defines the controller configuration for an artifactory artifact repository +type ArtifactoryArtifactRepository struct { + ArtifactoryAuth `json:",inline" protobuf:"bytes,1,opt,name=artifactoryAuth"` + // RepoURL is the url for artifactory repo. + RepoURL string `json:"repoURL,omitempty" protobuf:"bytes,2,opt,name=repoURL"` + // KeyFormat defines the format of how to store keys and can reference workflow variables. + KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,3,opt,name=keyFormat"` +} + +func (r *ArtifactoryArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { + url := r.RepoURL + if !strings.HasSuffix(url, "/") { + url = url + "/" + } + k := r.KeyFormat + if k == "" { + k = DefaultArchivePattern + } + l.Artifactory = &ArtifactoryArtifact{ArtifactoryAuth: r.ArtifactoryAuth, URL: fmt.Sprintf("%s%s", url, k)} +} + +// AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository +type AzureArtifactRepository struct { + AzureBlobContainer `json:",inline" protobuf:"bytes,1,opt,name=blobContainer"` + + // BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables + BlobNameFormat string `json:"blobNameFormat,omitempty" protobuf:"bytes,2,opt,name=blobNameFormat"` +} + +func (r *AzureArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { + k := r.BlobNameFormat + if k == "" { + k = DefaultArchivePattern + } + l.Azure = &AzureArtifact{AzureBlobContainer: r.AzureBlobContainer, Blob: k} +} + +// HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository +type HDFSArtifactRepository struct { + HDFSConfig `json:",inline" protobuf:"bytes,1,opt,name=hDFSConfig"` + + // PathFormat is defines the format of path to store a file. Can reference workflow variables + PathFormat string `json:"pathFormat,omitempty" protobuf:"bytes,2,opt,name=pathFormat"` + + // Force copies a file forcibly even if it exists + Force bool `json:"force,omitempty" protobuf:"varint,3,opt,name=force"` +} + +func (r *HDFSArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { + p := r.PathFormat + if p == "" { + p = DefaultArchivePattern + } + l.HDFS = &HDFSArtifact{HDFSConfig: r.HDFSConfig, Path: p, Force: r.Force} +} + +// MetricsConfig defines a config for a metrics server diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cluster_workflow_template_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cluster_workflow_template_types.go new file mode 100644 index 00000000..a9c27f62 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cluster_workflow_template_types.go @@ -0,0 +1,63 @@ +package v1alpha1 + +import ( + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope +// +genclient +// +genclient:noStatus +// +genclient:nonNamespaced +// +kubebuilder:resource:scope=Cluster,shortName=clusterwftmpl;cwft +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterWorkflowTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +type ClusterWorkflowTemplates []ClusterWorkflowTemplate + +func (w ClusterWorkflowTemplates) Len() int { + return len(w) +} + +func (w ClusterWorkflowTemplates) Less(i, j int) bool { + return strings.Compare(w[j].ObjectMeta.Name, w[i].ObjectMeta.Name) > 0 +} + +func (w ClusterWorkflowTemplates) Swap(i, j int) { + w[i], w[j] = w[j], w[i] +} + +// ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterWorkflowTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items ClusterWorkflowTemplates `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +var _ TemplateHolder = &ClusterWorkflowTemplate{} + +// GetTemplateByName retrieves a defined template by its name +func (cwftmpl *ClusterWorkflowTemplate) GetTemplateByName(name string) *Template { + for _, t := range cwftmpl.Spec.Templates { + if t.Name == name { + return &t + } + } + return nil +} + +// GetResourceScope returns the template scope of workflow template. +func (cwftmpl *ClusterWorkflowTemplate) GetResourceScope() ResourceScope { + return ResourceScopeCluster +} + +// GetWorkflowSpec returns the WorkflowSpec of cluster workflow template. +func (cwftmpl *ClusterWorkflowTemplate) GetWorkflowSpec() *WorkflowSpec { + return &cwftmpl.Spec +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/common.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/common.go new file mode 100644 index 00000000..6a7c584b --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/common.go @@ -0,0 +1,74 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type ResourceScope string + +const ( + ResourceScopeLocal ResourceScope = "local" + ResourceScopeNamespaced ResourceScope = "namespaced" + ResourceScopeCluster ResourceScope = "cluster" +) + +// TemplateHolder is an object that holds templates; e.g. Workflow, WorkflowTemplate, and ClusterWorkflowTemplate +type TemplateHolder interface { + GetNamespace() string + GetName() string + GroupVersionKind() schema.GroupVersionKind + GetTemplateByName(name string) *Template + GetResourceScope() ResourceScope +} + +// WorkflowSpecHolder is an object that holds a WorkflowSpec; e.g., WorkflowTemplate, and ClusterWorkflowTemplate +type WorkflowSpecHolder interface { + metav1.Object + GetWorkflowSpec() *WorkflowSpec +} + +// TemplateReferenceHolder is an object that holds a reference to other templates; e.g. WorkflowStep, DAGTask, and NodeStatus +type TemplateReferenceHolder interface { + // GetTemplate returns the template. This maybe nil. This is first precedence. + GetTemplate() *Template + // GetTemplateRef returns the template ref. This maybe nil. This is second precedence. + GetTemplateRef() *TemplateRef + // GetTemplateName returns the template name. This maybe empty. This is last precedence. + GetTemplateName() string + // GetName returns the name of the template reference holder. + GetName() string + // IsDAGTask returns true if the template reference is a DAGTask. + IsDAGTask() bool + // IsWorkflowStep returns true if the template reference is a WorkflowStep. + IsWorkflowStep() bool +} + +// SubmitOpts are workflow submission options +type SubmitOpts struct { + // Name overrides metadata.name + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // GenerateName overrides metadata.generateName + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + // Entrypoint overrides spec.entrypoint + Entrypoint string `json:"entryPoint,omitempty" protobuf:"bytes,4,opt,name=entrypoint"` + // Parameters passes input parameters to workflow + Parameters []string `json:"parameters,omitempty" protobuf:"bytes,5,rep,name=parameters"` + // ServiceAccount runs all pods in the workflow using specified ServiceAccount. + ServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,7,opt,name=serviceAccount"` + // DryRun validates the workflow on the client-side without creating it. This option is not supported in API + DryRun bool `json:"dryRun,omitempty" protobuf:"varint,8,opt,name=dryRun"` + // ServerDryRun validates the workflow on the server-side without creating it + ServerDryRun bool `json:"serverDryRun,omitempty" protobuf:"varint,9,opt,name=serverDryRun"` + // Labels adds to metadata.labels + Labels string `json:"labels,omitempty" protobuf:"bytes,10,opt,name=labels"` + // OwnerReference creates a metadata.ownerReference + OwnerReference *metav1.OwnerReference `json:"ownerReference,omitempty" protobuf:"bytes,11,opt,name=ownerReference"` + // Annotations adds to metadata.labels + Annotations string `json:"annotations,omitempty" protobuf:"bytes,12,opt,name=annotations"` + // Set the podPriorityClassName of the workflow + PodPriorityClassName string `json:"podPriorityClassName,omitempty" protobuf:"bytes,13,opt,name=podPriorityClassName"` + // Priority is used if controller is configured to process limited number of workflows in parallel, higher priority workflows + // are processed first. + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,14,opt,name=priority"` +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go new file mode 100644 index 00000000..ac1a4f44 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go @@ -0,0 +1,142 @@ +package v1alpha1 + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + intstr "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" +) + +type ContainerSetTemplate struct { + Containers []ContainerNode `json:"containers" protobuf:"bytes,4,rep,name=containers"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,3,rep,name=volumeMounts"` + // RetryStrategy describes how to retry container nodes if the container set fails. + // Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers. + RetryStrategy *ContainerSetRetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,5,opt,name=retryStrategy"` +} + +// ContainerSetRetryStrategy provides controls on how to retry a container set +type ContainerSetRetryStrategy struct { + // Duration is the time between each retry, examples values are "300ms", "1s" or "5m". + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + Duration string `json:"duration,omitempty" protobuf:"bytes,1,opt,name=duration"` + // Retries is the maximum number of retry attempts for each container. It does not include the + // first, original attempt; the maximum number of total attempts will be `retries + 1`. + Retries *intstr.IntOrString `json:"retries" protobuf:"bytes,2,rep,name=retries"` +} + +func (t *ContainerSetTemplate) GetRetryStrategy() (wait.Backoff, error) { + if t == nil || t.RetryStrategy == nil || t.RetryStrategy.Retries == nil { + return wait.Backoff{Steps: 1}, nil + } + + backoff := wait.Backoff{Steps: t.RetryStrategy.Retries.IntValue()} + + if t.RetryStrategy.Duration == "" { + return backoff, nil + } + + baseDuration, err := time.ParseDuration(t.RetryStrategy.Duration) + if err != nil { + return wait.Backoff{}, err + } + + if baseDuration < time.Duration(0) { + return wait.Backoff{}, fmt.Errorf("duration has to be positive, current duration: %v ", baseDuration) + } + + backoff.Duration = baseDuration + return backoff, nil +} + +func (in *ContainerSetTemplate) GetContainers() []corev1.Container { + var ctrs []corev1.Container + for _, t := range in.GetGraph() { + c := t.Container + c.VolumeMounts = append(c.VolumeMounts, in.VolumeMounts...) + ctrs = append(ctrs, c) + } + return ctrs +} + +func (in *ContainerSetTemplate) HasContainerNamed(n string) bool { + for _, c := range in.GetContainers() { + if n == c.Name { + return true + } + } + return false +} + +func (in *ContainerSetTemplate) GetGraph() []ContainerNode { + if in == nil { + return nil + } + return in.Containers +} + +func (in *ContainerSetTemplate) HasSequencedContainers() bool { + for _, n := range in.GetGraph() { + if len(n.Dependencies) > 0 { + return true + } + } + return false +} + +// Validate checks if the ContainerSetTemplate is valid +func (in *ContainerSetTemplate) Validate() error { + if len(in.Containers) == 0 { + return fmt.Errorf("containers must have at least one container") + } + + names := make([]string, 0) + for _, ctr := range in.Containers { + names = append(names, ctr.Name) + } + err := validateWorkflowFieldNames(names, false) + if err != nil { + return fmt.Errorf("containers%s", err.Error()) + } + + // Ensure there are no collisions with volume mountPaths and artifact load paths + mountPaths := make(map[string]string) + for i, volMount := range in.VolumeMounts { + if prev, ok := mountPaths[volMount.MountPath]; ok { + return fmt.Errorf("volumeMounts[%d].mountPath '%s' already mounted in %s", i, volMount.MountPath, prev) + } + mountPaths[volMount.MountPath] = fmt.Sprintf("volumeMounts.%s", volMount.Name) + } + + // Ensure the dependencies are defined + nameToContainer := make(map[string]ContainerNode) + for _, ctr := range in.Containers { + nameToContainer[ctr.Name] = ctr + } + for _, ctr := range in.Containers { + for _, depName := range ctr.Dependencies { + _, ok := nameToContainer[depName] + if !ok { + return fmt.Errorf("containers.%s dependency '%s' not defined", ctr.Name, depName) + } + } + } + + // Ensure there is no dependency cycle + depGraph := make(map[string][]string) + for _, ctr := range in.Containers { + depGraph[ctr.Name] = append(depGraph[ctr.Name], ctr.Dependencies...) + } + err = validateNoCycles(depGraph) + if err != nil { + return fmt.Errorf("containers %s", err.Error()) + } + return nil +} + +type ContainerNode struct { + corev1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"` + Dependencies []string `json:"dependencies,omitempty" protobuf:"bytes,2,rep,name=dependencies"` +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cron_workflow_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cron_workflow_types.go new file mode 100644 index 00000000..64932944 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cron_workflow_types.go @@ -0,0 +1,224 @@ +package v1alpha1 + +import ( + "context" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" + "github.com/argoproj/argo-workflows/v3/util/deprecation" +) + +// CronWorkflow is the definition of a scheduled workflow resource +// +genclient +// +genclient:noStatus +// +kubebuilder:resource:shortName=cwf;cronwf +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CronWorkflow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Spec CronWorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + Status CronWorkflowStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// CronWorkflowList is list of CronWorkflow resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CronWorkflowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []CronWorkflow `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +type ConcurrencyPolicy string + +const ( + AllowConcurrent ConcurrencyPolicy = "Allow" + ForbidConcurrent ConcurrencyPolicy = "Forbid" + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +const annotationKeyLatestSchedule = workflow.CronWorkflowFullName + "/last-used-schedule" + +// CronWorkflowSpec is the specification of a CronWorkflow +type CronWorkflowSpec struct { + // WorkflowSpec is the spec of the workflow to be run + WorkflowSpec WorkflowSpec `json:"workflowSpec" protobuf:"bytes,1,opt,name=workflowSpec,casttype=WorkflowSpec"` + // Schedule is a schedule to run the Workflow in Cron format. Deprecated, use Schedules + Schedule string `json:"schedule,omitempty" protobuf:"bytes,2,opt,name=schedule"` + // ConcurrencyPolicy is the K8s-style concurrency policy that will be used + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` + // Suspend is a flag that will stop new CronWorkflows from running if set to true + Suspend bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` + // StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its + // original scheduled time if it is missed. + StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=startingDeadlineSeconds"` + // SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time + SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"` + // FailedJobsHistoryLimit is the number of failed jobs to be kept at a time + FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"` + // Timezone is the timezone against which the cron schedule will be calculated, e.g. "Asia/Tokyo". Default is machine's local time. + Timezone string `json:"timezone,omitempty" protobuf:"bytes,8,opt,name=timezone"` + // WorkflowMetadata contains some metadata of the workflow to be run + WorkflowMetadata *metav1.ObjectMeta `json:"workflowMetadata,omitempty" protobuf:"bytes,9,opt,name=workflowMeta"` + // v3.6 and after: StopStrategy defines if the CronWorkflow should stop scheduling based on a condition + StopStrategy *StopStrategy `json:"stopStrategy,omitempty" protobuf:"bytes,10,opt,name=stopStrategy"` + // v3.6 and after: Schedules is a list of schedules to run the Workflow in Cron format + Schedules []string `json:"schedules,omitempty" protobuf:"bytes,11,opt,name=schedules"` + // v3.6 and after: When is an expression that determines if a run should be scheduled. + When string `json:"when,omitempty" protobuf:"bytes,12,opt,name=when"` +} + +// StopStrategy defines if the CronWorkflow should stop scheduling based on an expression. v3.6 and after +type StopStrategy struct { + // v3.6 and after: Expression is an expression that stops scheduling workflows when true. Use the variables + // `cronworkflow`.`failed` or `cronworkflow`.`succeeded` to access the number of failed or successful child workflows. + Expression string `json:"expression" protobuf:"bytes,1,opt,name=expression"` +} + +// CronWorkflowStatus is the status of a CronWorkflow +type CronWorkflowStatus struct { + // Active is a list of active workflows stemming from this CronWorkflow + Active []v1.ObjectReference `json:"active" protobuf:"bytes,1,rep,name=active"` + // LastScheduleTime is the last time the CronWorkflow was scheduled + LastScheduledTime *metav1.Time `json:"lastScheduledTime" protobuf:"bytes,2,opt,name=lastScheduledTime"` + // Conditions is a list of conditions the CronWorkflow may have + Conditions Conditions `json:"conditions" protobuf:"bytes,3,rep,name=conditions"` + // v3.6 and after: Succeeded counts how many times child workflows succeeded + Succeeded int64 `json:"succeeded" protobuf:"varint,4,rep,name=succeeded"` + // v3.6 and after: Failed counts how many times child workflows failed + Failed int64 `json:"failed" protobuf:"varint,5,rep,name=failed"` + // v3.6 and after: Phase is an enum of Active or Stopped. It changes to Stopped when stopStrategy.expression is true + Phase CronWorkflowPhase `json:"phase" protobuf:"varint,6,rep,name=phase"` +} + +type CronWorkflowPhase string + +const ( + ActivePhase CronWorkflowPhase = "Active" + StoppedPhase CronWorkflowPhase = "Stopped" +) + +func (c *CronWorkflow) IsUsingNewSchedule() bool { + lastUsedSchedule, exists := c.Annotations[annotationKeyLatestSchedule] + // If last-used-schedule does not exist, or if it does not match the current schedule then the CronWorkflow schedule + // was just updated + return !exists || lastUsedSchedule != c.Spec.GetScheduleWithTimezoneString() +} + +func (c *CronWorkflow) SetSchedule(schedule string) { + if c.Annotations == nil { + c.Annotations = map[string]string{} + } + c.Annotations[annotationKeyLatestSchedule] = schedule +} + +func (c *CronWorkflow) SetSchedules(schedules []string) { + if c.Annotations == nil { + c.Annotations = map[string]string{} + } + var scheduleString strings.Builder + for i, schedule := range schedules { + scheduleString.WriteString(schedule) + if i != len(schedules)-1 { + scheduleString.WriteString(",") + } + } + c.Annotations[annotationKeyLatestSchedule] = scheduleString.String() +} + +func (c *CronWorkflow) GetLatestSchedule() string { + return c.Annotations[annotationKeyLatestSchedule] +} + +// GetScheduleString returns the schedule expression without timezone. If multiple +// expressions are configured it returns a comma separated list of cron expressions +func (c *CronWorkflowSpec) GetScheduleString() string { + return c.getScheduleString(false) +} + +// GetScheduleString returns the schedule expression with timezone, if available. If multiple +// expressions are configured it returns a comma separated list of cron expressions +func (c *CronWorkflowSpec) GetScheduleWithTimezoneString() string { + return c.getScheduleString(true) +} + +func (c *CronWorkflowSpec) getScheduleString(withTimezone bool) string { + var scheduleString string + if c.Schedule != "" { + if withTimezone { + scheduleString = c.withTimezone(c.Schedule) + } else { + scheduleString = c.Schedule + } + } else { + var sb strings.Builder + for i, schedule := range c.Schedules { + if withTimezone { + schedule = c.withTimezone(schedule) + } + sb.WriteString(schedule) + if i != len(c.Schedules)-1 { + sb.WriteString(",") + } + } + scheduleString = sb.String() + } + return scheduleString +} + +// GetSchedulesWithTimezone returns all schedules configured for the CronWorkflow with a timezone. It handles +// both Spec.Schedules and Spec.Schedule for backwards compatibility +func (c *CronWorkflowSpec) GetSchedulesWithTimezone(ctx context.Context) []string { + return c.getSchedules(ctx, true) +} + +// GetSchedules returns all schedules configured for the CronWorkflow. It handles both Spec.Schedules +// and Spec.Schedule for backwards compatibility +func (c *CronWorkflowSpec) GetSchedules(ctx context.Context) []string { + return c.getSchedules(ctx, false) +} + +func (c *CronWorkflowSpec) getSchedules(ctx context.Context, withTimezone bool) []string { + var schedules []string + if c.Schedule != "" { + schedule := c.Schedule + if withTimezone { + schedule = c.withTimezone(c.Schedule) + } + schedules = append(schedules, schedule) + deprecation.Record(ctx, deprecation.Schedule) + } else { + schedules = make([]string, len(c.Schedules)) + for i, schedule := range c.Schedules { + if withTimezone { + schedule = c.withTimezone(schedule) + } + schedules[i] = schedule + } + } + return schedules +} + +func (c *CronWorkflowSpec) withTimezone(scheduleString string) string { + if c.Timezone != "" { + scheduleString = "CRON_TZ=" + c.Timezone + " " + scheduleString + } + return scheduleString +} + +func (c *CronWorkflowStatus) HasActiveUID(uid types.UID) bool { + for _, ref := range c.Active { + if uid == ref.UID { + return true + } + } + return false +} + +const ( + // ConditionTypeSubmissionError signifies that there was an error when submitting the CronWorkflow as a Workflow + ConditionTypeSubmissionError ConditionType = "SubmissionError" +) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/data_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/data_types.go new file mode 100644 index 00000000..135cae55 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/data_types.go @@ -0,0 +1,40 @@ +package v1alpha1 + +// Data is a data template +type Data struct { + // Source sources external data into a data template + Source DataSource `json:"source" protobuf:"bytes,1,opt,name=source"` + + // Transformation applies a set of transformations + Transformation Transformation `json:"transformation" protobuf:"bytes,2,rep,name=transformation"` +} + +func (ds *DataSource) GetArtifactIfNeeded() (*Artifact, bool) { + if ds.ArtifactPaths != nil { + return &ds.ArtifactPaths.Artifact, true + } + return nil, false +} + +type Transformation []TransformationStep + +type TransformationStep struct { + // Expression defines an expr expression to apply + Expression string `json:"expression" protobuf:"bytes,1,opt,name=expression"` +} + +// DataSource sources external data into a data template +type DataSource struct { + // ArtifactPaths is a data transformation that collects a list of artifact paths + ArtifactPaths *ArtifactPaths `json:"artifactPaths,omitempty" protobuf:"bytes,1,opt,name=artifactPaths"` +} + +// ArtifactPaths expands a step from a collection of artifacts +type ArtifactPaths struct { + // Artifact is the artifact location from which to source the artifacts, it can be a directory + Artifact `json:",inline" protobuf:"bytes,1,opt,name=artifact"` +} + +type DataSourceProcessor interface { + ProcessArtifactPaths(*ArtifactPaths) (interface{}, error) +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/doc.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/doc.go new file mode 100644 index 00000000..c418575a --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/doc.go @@ -0,0 +1,5 @@ +// Package v1alpha1 is the v1alpha1 version of the API. +// +groupName=argoproj.io +// +k8s:deepcopy-gen=package,register +// +k8s:openapi-gen=true +package v1alpha1 diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/estimated_duration.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/estimated_duration.go new file mode 100644 index 00000000..cebc7087 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/estimated_duration.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import "time" + +// EstimatedDuration is in seconds. +type EstimatedDuration int + +func (d EstimatedDuration) ToDuration() time.Duration { + return time.Second * time.Duration(d) +} + +func NewEstimatedDuration(d time.Duration) EstimatedDuration { + return EstimatedDuration(d.Seconds()) +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go new file mode 100644 index 00000000..d6f49b0d --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go @@ -0,0 +1,48 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// WorkflowEventBinding is the definition of an event resource +// +genclient +// +genclient:noStatus +// +kubebuilder:resource:shortName=wfeb +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WorkflowEventBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Spec WorkflowEventBindingSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// WorkflowEventBindingList is list of event resources +// +kubebuilder:resource:shortName=wfebs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WorkflowEventBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []WorkflowEventBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +type WorkflowEventBindingSpec struct { + // Event is the event to bind to + Event Event `json:"event" protobuf:"bytes,1,opt,name=event"` + // Submit is the workflow template to submit + Submit *Submit `json:"submit,omitempty" protobuf:"bytes,2,opt,name=submit"` +} + +type Event struct { + // Selector (https://github.com/expr-lang/expr) that we must must match the event. E.g. `payload.message == "test"` + Selector string `json:"selector" protobuf:"bytes,1,opt,name=selector"` +} + +type Submit struct { + // WorkflowTemplateRef the workflow template to submit + WorkflowTemplateRef WorkflowTemplateRef `json:"workflowTemplateRef" protobuf:"bytes,1,opt,name=workflowTemplateRef"` + + // Metadata optional means to customize select fields of the workflow metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` + + // Arguments extracted from the event and then set as arguments to the workflow created. + Arguments *Arguments `json:"arguments,omitempty" protobuf:"bytes,2,opt,name=arguments"` +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go new file mode 100644 index 00000000..90ddb8a5 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go @@ -0,0 +1,48009 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto + +package v1alpha1 + +import ( + encoding_json "encoding/json" + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + v12 "k8s.io/api/policy/v1" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Amount) Reset() { *m = Amount{} } +func (*Amount) ProtoMessage() {} +func (*Amount) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{0} +} +func (m *Amount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Amount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Amount) XXX_Merge(src proto.Message) { + xxx_messageInfo_Amount.Merge(m, src) +} +func (m *Amount) XXX_Size() int { + return m.Size() +} +func (m *Amount) XXX_DiscardUnknown() { + xxx_messageInfo_Amount.DiscardUnknown(m) +} + +var xxx_messageInfo_Amount proto.InternalMessageInfo + +func (m *ArchiveStrategy) Reset() { *m = ArchiveStrategy{} } +func (*ArchiveStrategy) ProtoMessage() {} +func (*ArchiveStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{1} +} +func (m *ArchiveStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArchiveStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArchiveStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArchiveStrategy.Merge(m, src) +} +func (m *ArchiveStrategy) XXX_Size() int { + return m.Size() +} +func (m *ArchiveStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_ArchiveStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_ArchiveStrategy proto.InternalMessageInfo + +func (m *Arguments) Reset() { *m = Arguments{} } +func (*Arguments) ProtoMessage() {} +func (*Arguments) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{2} +} +func (m *Arguments) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Arguments) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Arguments) XXX_Merge(src proto.Message) { + xxx_messageInfo_Arguments.Merge(m, src) +} +func (m *Arguments) XXX_Size() int { + return m.Size() +} +func (m *Arguments) XXX_DiscardUnknown() { + xxx_messageInfo_Arguments.DiscardUnknown(m) +} + +var xxx_messageInfo_Arguments proto.InternalMessageInfo + +func (m *ArtGCStatus) Reset() { *m = ArtGCStatus{} } +func (*ArtGCStatus) ProtoMessage() {} +func (*ArtGCStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{3} +} +func (m *ArtGCStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtGCStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtGCStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtGCStatus.Merge(m, src) +} +func (m *ArtGCStatus) XXX_Size() int { + return m.Size() +} +func (m *ArtGCStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ArtGCStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtGCStatus proto.InternalMessageInfo + +func (m *Artifact) Reset() { *m = Artifact{} } +func (*Artifact) ProtoMessage() {} +func (*Artifact) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{4} +} +func (m *Artifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Artifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Artifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_Artifact.Merge(m, src) +} +func (m *Artifact) XXX_Size() int { + return m.Size() +} +func (m *Artifact) XXX_DiscardUnknown() { + xxx_messageInfo_Artifact.DiscardUnknown(m) +} + +var xxx_messageInfo_Artifact proto.InternalMessageInfo + +func (m *ArtifactGC) Reset() { *m = ArtifactGC{} } +func (*ArtifactGC) ProtoMessage() {} +func (*ArtifactGC) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{5} +} +func (m *ArtifactGC) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactGC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactGC) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactGC.Merge(m, src) +} +func (m *ArtifactGC) XXX_Size() int { + return m.Size() +} +func (m *ArtifactGC) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactGC.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactGC proto.InternalMessageInfo + +func (m *ArtifactGCSpec) Reset() { *m = ArtifactGCSpec{} } +func (*ArtifactGCSpec) ProtoMessage() {} +func (*ArtifactGCSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{6} +} +func (m *ArtifactGCSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactGCSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactGCSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactGCSpec.Merge(m, src) +} +func (m *ArtifactGCSpec) XXX_Size() int { + return m.Size() +} +func (m *ArtifactGCSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactGCSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactGCSpec proto.InternalMessageInfo + +func (m *ArtifactGCStatus) Reset() { *m = ArtifactGCStatus{} } +func (*ArtifactGCStatus) ProtoMessage() {} +func (*ArtifactGCStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{7} +} +func (m *ArtifactGCStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactGCStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactGCStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactGCStatus.Merge(m, src) +} +func (m *ArtifactGCStatus) XXX_Size() int { + return m.Size() +} +func (m *ArtifactGCStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactGCStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactGCStatus proto.InternalMessageInfo + +func (m *ArtifactLocation) Reset() { *m = ArtifactLocation{} } +func (*ArtifactLocation) ProtoMessage() {} +func (*ArtifactLocation) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{8} +} +func (m *ArtifactLocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactLocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactLocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactLocation.Merge(m, src) +} +func (m *ArtifactLocation) XXX_Size() int { + return m.Size() +} +func (m *ArtifactLocation) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactLocation.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactLocation proto.InternalMessageInfo + +func (m *ArtifactNodeSpec) Reset() { *m = ArtifactNodeSpec{} } +func (*ArtifactNodeSpec) ProtoMessage() {} +func (*ArtifactNodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{9} +} +func (m *ArtifactNodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactNodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactNodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactNodeSpec.Merge(m, src) +} +func (m *ArtifactNodeSpec) XXX_Size() int { + return m.Size() +} +func (m *ArtifactNodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactNodeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactNodeSpec proto.InternalMessageInfo + +func (m *ArtifactPaths) Reset() { *m = ArtifactPaths{} } +func (*ArtifactPaths) ProtoMessage() {} +func (*ArtifactPaths) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{10} +} +func (m *ArtifactPaths) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactPaths) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactPaths.Merge(m, src) +} +func (m *ArtifactPaths) XXX_Size() int { + return m.Size() +} +func (m *ArtifactPaths) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactPaths.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactPaths proto.InternalMessageInfo + +func (m *ArtifactRepository) Reset() { *m = ArtifactRepository{} } +func (*ArtifactRepository) ProtoMessage() {} +func (*ArtifactRepository) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{11} +} +func (m *ArtifactRepository) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactRepository) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactRepository.Merge(m, src) +} +func (m *ArtifactRepository) XXX_Size() int { + return m.Size() +} +func (m *ArtifactRepository) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactRepository.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactRepository proto.InternalMessageInfo + +func (m *ArtifactRepositoryRef) Reset() { *m = ArtifactRepositoryRef{} } +func (*ArtifactRepositoryRef) ProtoMessage() {} +func (*ArtifactRepositoryRef) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{12} +} +func (m *ArtifactRepositoryRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactRepositoryRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactRepositoryRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactRepositoryRef.Merge(m, src) +} +func (m *ArtifactRepositoryRef) XXX_Size() int { + return m.Size() +} +func (m *ArtifactRepositoryRef) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactRepositoryRef.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactRepositoryRef proto.InternalMessageInfo + +func (m *ArtifactRepositoryRefStatus) Reset() { *m = ArtifactRepositoryRefStatus{} } +func (*ArtifactRepositoryRefStatus) ProtoMessage() {} +func (*ArtifactRepositoryRefStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{13} +} +func (m *ArtifactRepositoryRefStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactRepositoryRefStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactRepositoryRefStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactRepositoryRefStatus.Merge(m, src) +} +func (m *ArtifactRepositoryRefStatus) XXX_Size() int { + return m.Size() +} +func (m *ArtifactRepositoryRefStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactRepositoryRefStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactRepositoryRefStatus proto.InternalMessageInfo + +func (m *ArtifactResult) Reset() { *m = ArtifactResult{} } +func (*ArtifactResult) ProtoMessage() {} +func (*ArtifactResult) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{14} +} +func (m *ArtifactResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactResult.Merge(m, src) +} +func (m *ArtifactResult) XXX_Size() int { + return m.Size() +} +func (m *ArtifactResult) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactResult.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactResult proto.InternalMessageInfo + +func (m *ArtifactResultNodeStatus) Reset() { *m = ArtifactResultNodeStatus{} } +func (*ArtifactResultNodeStatus) ProtoMessage() {} +func (*ArtifactResultNodeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{15} +} +func (m *ArtifactResultNodeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactResultNodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactResultNodeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactResultNodeStatus.Merge(m, src) +} +func (m *ArtifactResultNodeStatus) XXX_Size() int { + return m.Size() +} +func (m *ArtifactResultNodeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactResultNodeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactResultNodeStatus proto.InternalMessageInfo + +func (m *ArtifactSearchQuery) Reset() { *m = ArtifactSearchQuery{} } +func (*ArtifactSearchQuery) ProtoMessage() {} +func (*ArtifactSearchQuery) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{16} +} +func (m *ArtifactSearchQuery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactSearchQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactSearchQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactSearchQuery.Merge(m, src) +} +func (m *ArtifactSearchQuery) XXX_Size() int { + return m.Size() +} +func (m *ArtifactSearchQuery) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactSearchQuery.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactSearchQuery proto.InternalMessageInfo + +func (m *ArtifactSearchResult) Reset() { *m = ArtifactSearchResult{} } +func (*ArtifactSearchResult) ProtoMessage() {} +func (*ArtifactSearchResult) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{17} +} +func (m *ArtifactSearchResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactSearchResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactSearchResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactSearchResult.Merge(m, src) +} +func (m *ArtifactSearchResult) XXX_Size() int { + return m.Size() +} +func (m *ArtifactSearchResult) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactSearchResult.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactSearchResult proto.InternalMessageInfo + +func (m *ArtifactoryArtifact) Reset() { *m = ArtifactoryArtifact{} } +func (*ArtifactoryArtifact) ProtoMessage() {} +func (*ArtifactoryArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{18} +} +func (m *ArtifactoryArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactoryArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactoryArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactoryArtifact.Merge(m, src) +} +func (m *ArtifactoryArtifact) XXX_Size() int { + return m.Size() +} +func (m *ArtifactoryArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactoryArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactoryArtifact proto.InternalMessageInfo + +func (m *ArtifactoryArtifactRepository) Reset() { *m = ArtifactoryArtifactRepository{} } +func (*ArtifactoryArtifactRepository) ProtoMessage() {} +func (*ArtifactoryArtifactRepository) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{19} +} +func (m *ArtifactoryArtifactRepository) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactoryArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactoryArtifactRepository) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactoryArtifactRepository.Merge(m, src) +} +func (m *ArtifactoryArtifactRepository) XXX_Size() int { + return m.Size() +} +func (m *ArtifactoryArtifactRepository) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactoryArtifactRepository.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactoryArtifactRepository proto.InternalMessageInfo + +func (m *ArtifactoryAuth) Reset() { *m = ArtifactoryAuth{} } +func (*ArtifactoryAuth) ProtoMessage() {} +func (*ArtifactoryAuth) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{20} +} +func (m *ArtifactoryAuth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactoryAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactoryAuth) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactoryAuth.Merge(m, src) +} +func (m *ArtifactoryAuth) XXX_Size() int { + return m.Size() +} +func (m *ArtifactoryAuth) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactoryAuth.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactoryAuth proto.InternalMessageInfo + +func (m *AzureArtifact) Reset() { *m = AzureArtifact{} } +func (*AzureArtifact) ProtoMessage() {} +func (*AzureArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{21} +} +func (m *AzureArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureArtifact.Merge(m, src) +} +func (m *AzureArtifact) XXX_Size() int { + return m.Size() +} +func (m *AzureArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_AzureArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_AzureArtifact proto.InternalMessageInfo + +func (m *AzureArtifactRepository) Reset() { *m = AzureArtifactRepository{} } +func (*AzureArtifactRepository) ProtoMessage() {} +func (*AzureArtifactRepository) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{22} +} +func (m *AzureArtifactRepository) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureArtifactRepository) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureArtifactRepository.Merge(m, src) +} +func (m *AzureArtifactRepository) XXX_Size() int { + return m.Size() +} +func (m *AzureArtifactRepository) XXX_DiscardUnknown() { + xxx_messageInfo_AzureArtifactRepository.DiscardUnknown(m) +} + +var xxx_messageInfo_AzureArtifactRepository proto.InternalMessageInfo + +func (m *AzureBlobContainer) Reset() { *m = AzureBlobContainer{} } +func (*AzureBlobContainer) ProtoMessage() {} +func (*AzureBlobContainer) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{23} +} +func (m *AzureBlobContainer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureBlobContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureBlobContainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureBlobContainer.Merge(m, src) +} +func (m *AzureBlobContainer) XXX_Size() int { + return m.Size() +} +func (m *AzureBlobContainer) XXX_DiscardUnknown() { + xxx_messageInfo_AzureBlobContainer.DiscardUnknown(m) +} + +var xxx_messageInfo_AzureBlobContainer proto.InternalMessageInfo + +func (m *Backoff) Reset() { *m = Backoff{} } +func (*Backoff) ProtoMessage() {} +func (*Backoff) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{24} +} +func (m *Backoff) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backoff) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Backoff) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backoff.Merge(m, src) +} +func (m *Backoff) XXX_Size() int { + return m.Size() +} +func (m *Backoff) XXX_DiscardUnknown() { + xxx_messageInfo_Backoff.DiscardUnknown(m) +} + +var xxx_messageInfo_Backoff proto.InternalMessageInfo + +func (m *BasicAuth) Reset() { *m = BasicAuth{} } +func (*BasicAuth) ProtoMessage() {} +func (*BasicAuth) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{25} +} +func (m *BasicAuth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BasicAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BasicAuth) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicAuth.Merge(m, src) +} +func (m *BasicAuth) XXX_Size() int { + return m.Size() +} +func (m *BasicAuth) XXX_DiscardUnknown() { + xxx_messageInfo_BasicAuth.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicAuth proto.InternalMessageInfo + +func (m *Cache) Reset() { *m = Cache{} } +func (*Cache) ProtoMessage() {} +func (*Cache) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{26} +} +func (m *Cache) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Cache) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Cache) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cache.Merge(m, src) +} +func (m *Cache) XXX_Size() int { + return m.Size() +} +func (m *Cache) XXX_DiscardUnknown() { + xxx_messageInfo_Cache.DiscardUnknown(m) +} + +var xxx_messageInfo_Cache proto.InternalMessageInfo + +func (m *ClientCertAuth) Reset() { *m = ClientCertAuth{} } +func (*ClientCertAuth) ProtoMessage() {} +func (*ClientCertAuth) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{27} +} +func (m *ClientCertAuth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientCertAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClientCertAuth) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientCertAuth.Merge(m, src) +} +func (m *ClientCertAuth) XXX_Size() int { + return m.Size() +} +func (m *ClientCertAuth) XXX_DiscardUnknown() { + xxx_messageInfo_ClientCertAuth.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientCertAuth proto.InternalMessageInfo + +func (m *ClusterWorkflowTemplate) Reset() { *m = ClusterWorkflowTemplate{} } +func (*ClusterWorkflowTemplate) ProtoMessage() {} +func (*ClusterWorkflowTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{28} +} +func (m *ClusterWorkflowTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterWorkflowTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplate.Merge(m, src) +} +func (m *ClusterWorkflowTemplate) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplate proto.InternalMessageInfo + +func (m *ClusterWorkflowTemplateList) Reset() { *m = ClusterWorkflowTemplateList{} } +func (*ClusterWorkflowTemplateList) ProtoMessage() {} +func (*ClusterWorkflowTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{29} +} +func (m *ClusterWorkflowTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterWorkflowTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplateList.Merge(m, src) +} +func (m *ClusterWorkflowTemplateList) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplateList proto.InternalMessageInfo + +func (m *Column) Reset() { *m = Column{} } +func (*Column) ProtoMessage() {} +func (*Column) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{30} +} +func (m *Column) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Column) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Column) XXX_Merge(src proto.Message) { + xxx_messageInfo_Column.Merge(m, src) +} +func (m *Column) XXX_Size() int { + return m.Size() +} +func (m *Column) XXX_DiscardUnknown() { + xxx_messageInfo_Column.DiscardUnknown(m) +} + +var xxx_messageInfo_Column proto.InternalMessageInfo + +func (m *Condition) Reset() { *m = Condition{} } +func (*Condition) ProtoMessage() {} +func (*Condition) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{31} +} +func (m *Condition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Condition) XXX_Merge(src proto.Message) { + xxx_messageInfo_Condition.Merge(m, src) +} +func (m *Condition) XXX_Size() int { + return m.Size() +} +func (m *Condition) XXX_DiscardUnknown() { + xxx_messageInfo_Condition.DiscardUnknown(m) +} + +var xxx_messageInfo_Condition proto.InternalMessageInfo + +func (m *ContainerNode) Reset() { *m = ContainerNode{} } +func (*ContainerNode) ProtoMessage() {} +func (*ContainerNode) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{32} +} +func (m *ContainerNode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerNode) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerNode.Merge(m, src) +} +func (m *ContainerNode) XXX_Size() int { + return m.Size() +} +func (m *ContainerNode) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerNode.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerNode proto.InternalMessageInfo + +func (m *ContainerSetRetryStrategy) Reset() { *m = ContainerSetRetryStrategy{} } +func (*ContainerSetRetryStrategy) ProtoMessage() {} +func (*ContainerSetRetryStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{33} +} +func (m *ContainerSetRetryStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerSetRetryStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerSetRetryStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerSetRetryStrategy.Merge(m, src) +} +func (m *ContainerSetRetryStrategy) XXX_Size() int { + return m.Size() +} +func (m *ContainerSetRetryStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerSetRetryStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerSetRetryStrategy proto.InternalMessageInfo + +func (m *ContainerSetTemplate) Reset() { *m = ContainerSetTemplate{} } +func (*ContainerSetTemplate) ProtoMessage() {} +func (*ContainerSetTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{34} +} +func (m *ContainerSetTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerSetTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerSetTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerSetTemplate.Merge(m, src) +} +func (m *ContainerSetTemplate) XXX_Size() int { + return m.Size() +} +func (m *ContainerSetTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerSetTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerSetTemplate proto.InternalMessageInfo + +func (m *ContinueOn) Reset() { *m = ContinueOn{} } +func (*ContinueOn) ProtoMessage() {} +func (*ContinueOn) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{35} +} +func (m *ContinueOn) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContinueOn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContinueOn) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContinueOn.Merge(m, src) +} +func (m *ContinueOn) XXX_Size() int { + return m.Size() +} +func (m *ContinueOn) XXX_DiscardUnknown() { + xxx_messageInfo_ContinueOn.DiscardUnknown(m) +} + +var xxx_messageInfo_ContinueOn proto.InternalMessageInfo + +func (m *Counter) Reset() { *m = Counter{} } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{36} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return m.Size() +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *CreateS3BucketOptions) Reset() { *m = CreateS3BucketOptions{} } +func (*CreateS3BucketOptions) ProtoMessage() {} +func (*CreateS3BucketOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{37} +} +func (m *CreateS3BucketOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateS3BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CreateS3BucketOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateS3BucketOptions.Merge(m, src) +} +func (m *CreateS3BucketOptions) XXX_Size() int { + return m.Size() +} +func (m *CreateS3BucketOptions) XXX_DiscardUnknown() { + xxx_messageInfo_CreateS3BucketOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateS3BucketOptions proto.InternalMessageInfo + +func (m *CronWorkflow) Reset() { *m = CronWorkflow{} } +func (*CronWorkflow) ProtoMessage() {} +func (*CronWorkflow) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{38} +} +func (m *CronWorkflow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronWorkflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronWorkflow) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronWorkflow.Merge(m, src) +} +func (m *CronWorkflow) XXX_Size() int { + return m.Size() +} +func (m *CronWorkflow) XXX_DiscardUnknown() { + xxx_messageInfo_CronWorkflow.DiscardUnknown(m) +} + +var xxx_messageInfo_CronWorkflow proto.InternalMessageInfo + +func (m *CronWorkflowList) Reset() { *m = CronWorkflowList{} } +func (*CronWorkflowList) ProtoMessage() {} +func (*CronWorkflowList) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{39} +} +func (m *CronWorkflowList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronWorkflowList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronWorkflowList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronWorkflowList.Merge(m, src) +} +func (m *CronWorkflowList) XXX_Size() int { + return m.Size() +} +func (m *CronWorkflowList) XXX_DiscardUnknown() { + xxx_messageInfo_CronWorkflowList.DiscardUnknown(m) +} + +var xxx_messageInfo_CronWorkflowList proto.InternalMessageInfo + +func (m *CronWorkflowSpec) Reset() { *m = CronWorkflowSpec{} } +func (*CronWorkflowSpec) ProtoMessage() {} +func (*CronWorkflowSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{40} +} +func (m *CronWorkflowSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronWorkflowSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronWorkflowSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronWorkflowSpec.Merge(m, src) +} +func (m *CronWorkflowSpec) XXX_Size() int { + return m.Size() +} +func (m *CronWorkflowSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronWorkflowSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CronWorkflowSpec proto.InternalMessageInfo + +func (m *CronWorkflowStatus) Reset() { *m = CronWorkflowStatus{} } +func (*CronWorkflowStatus) ProtoMessage() {} +func (*CronWorkflowStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{41} +} +func (m *CronWorkflowStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronWorkflowStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronWorkflowStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronWorkflowStatus.Merge(m, src) +} +func (m *CronWorkflowStatus) XXX_Size() int { + return m.Size() +} +func (m *CronWorkflowStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronWorkflowStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CronWorkflowStatus proto.InternalMessageInfo + +func (m *DAGTask) Reset() { *m = DAGTask{} } +func (*DAGTask) ProtoMessage() {} +func (*DAGTask) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{42} +} +func (m *DAGTask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DAGTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DAGTask) XXX_Merge(src proto.Message) { + xxx_messageInfo_DAGTask.Merge(m, src) +} +func (m *DAGTask) XXX_Size() int { + return m.Size() +} +func (m *DAGTask) XXX_DiscardUnknown() { + xxx_messageInfo_DAGTask.DiscardUnknown(m) +} + +var xxx_messageInfo_DAGTask proto.InternalMessageInfo + +func (m *DAGTemplate) Reset() { *m = DAGTemplate{} } +func (*DAGTemplate) ProtoMessage() {} +func (*DAGTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{43} +} +func (m *DAGTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DAGTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DAGTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_DAGTemplate.Merge(m, src) +} +func (m *DAGTemplate) XXX_Size() int { + return m.Size() +} +func (m *DAGTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_DAGTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_DAGTemplate proto.InternalMessageInfo + +func (m *Data) Reset() { *m = Data{} } +func (*Data) ProtoMessage() {} +func (*Data) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{44} +} +func (m *Data) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Data) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Data) XXX_Merge(src proto.Message) { + xxx_messageInfo_Data.Merge(m, src) +} +func (m *Data) XXX_Size() int { + return m.Size() +} +func (m *Data) XXX_DiscardUnknown() { + xxx_messageInfo_Data.DiscardUnknown(m) +} + +var xxx_messageInfo_Data proto.InternalMessageInfo + +func (m *DataSource) Reset() { *m = DataSource{} } +func (*DataSource) ProtoMessage() {} +func (*DataSource) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{45} +} +func (m *DataSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DataSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataSource.Merge(m, src) +} +func (m *DataSource) XXX_Size() int { + return m.Size() +} +func (m *DataSource) XXX_DiscardUnknown() { + xxx_messageInfo_DataSource.DiscardUnknown(m) +} + +var xxx_messageInfo_DataSource proto.InternalMessageInfo + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{46} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *ExecutorConfig) Reset() { *m = ExecutorConfig{} } +func (*ExecutorConfig) ProtoMessage() {} +func (*ExecutorConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{47} +} +func (m *ExecutorConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecutorConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecutorConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecutorConfig.Merge(m, src) +} +func (m *ExecutorConfig) XXX_Size() int { + return m.Size() +} +func (m *ExecutorConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ExecutorConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecutorConfig proto.InternalMessageInfo + +func (m *GCSArtifact) Reset() { *m = GCSArtifact{} } +func (*GCSArtifact) ProtoMessage() {} +func (*GCSArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{48} +} +func (m *GCSArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GCSArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GCSArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_GCSArtifact.Merge(m, src) +} +func (m *GCSArtifact) XXX_Size() int { + return m.Size() +} +func (m *GCSArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_GCSArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_GCSArtifact proto.InternalMessageInfo + +func (m *GCSArtifactRepository) Reset() { *m = GCSArtifactRepository{} } +func (*GCSArtifactRepository) ProtoMessage() {} +func (*GCSArtifactRepository) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{49} +} +func (m *GCSArtifactRepository) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GCSArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GCSArtifactRepository) XXX_Merge(src proto.Message) { + xxx_messageInfo_GCSArtifactRepository.Merge(m, src) +} +func (m *GCSArtifactRepository) XXX_Size() int { + return m.Size() +} +func (m *GCSArtifactRepository) XXX_DiscardUnknown() { + xxx_messageInfo_GCSArtifactRepository.DiscardUnknown(m) +} + +var xxx_messageInfo_GCSArtifactRepository proto.InternalMessageInfo + +func (m *GCSBucket) Reset() { *m = GCSBucket{} } +func (*GCSBucket) ProtoMessage() {} +func (*GCSBucket) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{50} +} +func (m *GCSBucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GCSBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GCSBucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_GCSBucket.Merge(m, src) +} +func (m *GCSBucket) XXX_Size() int { + return m.Size() +} +func (m *GCSBucket) XXX_DiscardUnknown() { + xxx_messageInfo_GCSBucket.DiscardUnknown(m) +} + +var xxx_messageInfo_GCSBucket proto.InternalMessageInfo + +func (m *Gauge) Reset() { *m = Gauge{} } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{51} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return m.Size() +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *GitArtifact) Reset() { *m = GitArtifact{} } +func (*GitArtifact) ProtoMessage() {} +func (*GitArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{52} +} +func (m *GitArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitArtifact.Merge(m, src) +} +func (m *GitArtifact) XXX_Size() int { + return m.Size() +} +func (m *GitArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_GitArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_GitArtifact proto.InternalMessageInfo + +func (m *HDFSArtifact) Reset() { *m = HDFSArtifact{} } +func (*HDFSArtifact) ProtoMessage() {} +func (*HDFSArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{53} +} +func (m *HDFSArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HDFSArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HDFSArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_HDFSArtifact.Merge(m, src) +} +func (m *HDFSArtifact) XXX_Size() int { + return m.Size() +} +func (m *HDFSArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_HDFSArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_HDFSArtifact proto.InternalMessageInfo + +func (m *HDFSArtifactRepository) Reset() { *m = HDFSArtifactRepository{} } +func (*HDFSArtifactRepository) ProtoMessage() {} +func (*HDFSArtifactRepository) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{54} +} +func (m *HDFSArtifactRepository) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HDFSArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HDFSArtifactRepository) XXX_Merge(src proto.Message) { + xxx_messageInfo_HDFSArtifactRepository.Merge(m, src) +} +func (m *HDFSArtifactRepository) XXX_Size() int { + return m.Size() +} +func (m *HDFSArtifactRepository) XXX_DiscardUnknown() { + xxx_messageInfo_HDFSArtifactRepository.DiscardUnknown(m) +} + +var xxx_messageInfo_HDFSArtifactRepository proto.InternalMessageInfo + +func (m *HDFSConfig) Reset() { *m = HDFSConfig{} } +func (*HDFSConfig) ProtoMessage() {} +func (*HDFSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{55} +} +func (m *HDFSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HDFSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HDFSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_HDFSConfig.Merge(m, src) +} +func (m *HDFSConfig) XXX_Size() int { + return m.Size() +} +func (m *HDFSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_HDFSConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_HDFSConfig proto.InternalMessageInfo + +func (m *HDFSKrbConfig) Reset() { *m = HDFSKrbConfig{} } +func (*HDFSKrbConfig) ProtoMessage() {} +func (*HDFSKrbConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{56} +} +func (m *HDFSKrbConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HDFSKrbConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HDFSKrbConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_HDFSKrbConfig.Merge(m, src) +} +func (m *HDFSKrbConfig) XXX_Size() int { + return m.Size() +} +func (m *HDFSKrbConfig) XXX_DiscardUnknown() { + xxx_messageInfo_HDFSKrbConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_HDFSKrbConfig proto.InternalMessageInfo + +func (m *HTTP) Reset() { *m = HTTP{} } +func (*HTTP) ProtoMessage() {} +func (*HTTP) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{57} +} +func (m *HTTP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTP) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTP.Merge(m, src) +} +func (m *HTTP) XXX_Size() int { + return m.Size() +} +func (m *HTTP) XXX_DiscardUnknown() { + xxx_messageInfo_HTTP.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTP proto.InternalMessageInfo + +func (m *HTTPArtifact) Reset() { *m = HTTPArtifact{} } +func (*HTTPArtifact) ProtoMessage() {} +func (*HTTPArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{58} +} +func (m *HTTPArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPArtifact.Merge(m, src) +} +func (m *HTTPArtifact) XXX_Size() int { + return m.Size() +} +func (m *HTTPArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPArtifact proto.InternalMessageInfo + +func (m *HTTPAuth) Reset() { *m = HTTPAuth{} } +func (*HTTPAuth) ProtoMessage() {} +func (*HTTPAuth) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{59} +} +func (m *HTTPAuth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPAuth) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPAuth.Merge(m, src) +} +func (m *HTTPAuth) XXX_Size() int { + return m.Size() +} +func (m *HTTPAuth) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPAuth.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPAuth proto.InternalMessageInfo + +func (m *HTTPBodySource) Reset() { *m = HTTPBodySource{} } +func (*HTTPBodySource) ProtoMessage() {} +func (*HTTPBodySource) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{60} +} +func (m *HTTPBodySource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPBodySource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPBodySource) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPBodySource.Merge(m, src) +} +func (m *HTTPBodySource) XXX_Size() int { + return m.Size() +} +func (m *HTTPBodySource) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPBodySource.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPBodySource proto.InternalMessageInfo + +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (*HTTPHeader) ProtoMessage() {} +func (*HTTPHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{61} +} +func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPHeader.Merge(m, src) +} +func (m *HTTPHeader) XXX_Size() int { + return m.Size() +} +func (m *HTTPHeader) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo + +func (m *HTTPHeaderSource) Reset() { *m = HTTPHeaderSource{} } +func (*HTTPHeaderSource) ProtoMessage() {} +func (*HTTPHeaderSource) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{62} +} +func (m *HTTPHeaderSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPHeaderSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPHeaderSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPHeaderSource.Merge(m, src) +} +func (m *HTTPHeaderSource) XXX_Size() int { + return m.Size() +} +func (m *HTTPHeaderSource) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPHeaderSource.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPHeaderSource proto.InternalMessageInfo + +func (m *Header) Reset() { *m = Header{} } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{63} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return m.Size() +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +func (m *Histogram) Reset() { *m = Histogram{} } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{64} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Inputs) Reset() { *m = Inputs{} } +func (*Inputs) ProtoMessage() {} +func (*Inputs) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{65} +} +func (m *Inputs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Inputs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Inputs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Inputs.Merge(m, src) +} +func (m *Inputs) XXX_Size() int { + return m.Size() +} +func (m *Inputs) XXX_DiscardUnknown() { + xxx_messageInfo_Inputs.DiscardUnknown(m) +} + +var xxx_messageInfo_Inputs proto.InternalMessageInfo + +func (m *Item) Reset() { *m = Item{} } +func (*Item) ProtoMessage() {} +func (*Item) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{66} +} +func (m *Item) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Item) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Item) XXX_Merge(src proto.Message) { + xxx_messageInfo_Item.Merge(m, src) +} +func (m *Item) XXX_Size() int { + return m.Size() +} +func (m *Item) XXX_DiscardUnknown() { + xxx_messageInfo_Item.DiscardUnknown(m) +} + +var xxx_messageInfo_Item proto.InternalMessageInfo + +func (m *LabelKeys) Reset() { *m = LabelKeys{} } +func (*LabelKeys) ProtoMessage() {} +func (*LabelKeys) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{67} +} +func (m *LabelKeys) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelKeys) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelKeys) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelKeys.Merge(m, src) +} +func (m *LabelKeys) XXX_Size() int { + return m.Size() +} +func (m *LabelKeys) XXX_DiscardUnknown() { + xxx_messageInfo_LabelKeys.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelKeys proto.InternalMessageInfo + +func (m *LabelValueFrom) Reset() { *m = LabelValueFrom{} } +func (*LabelValueFrom) ProtoMessage() {} +func (*LabelValueFrom) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{68} +} +func (m *LabelValueFrom) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValueFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelValueFrom) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValueFrom.Merge(m, src) +} +func (m *LabelValueFrom) XXX_Size() int { + return m.Size() +} +func (m *LabelValueFrom) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValueFrom.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValueFrom proto.InternalMessageInfo + +func (m *LabelValues) Reset() { *m = LabelValues{} } +func (*LabelValues) ProtoMessage() {} +func (*LabelValues) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{69} +} +func (m *LabelValues) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelValues) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValues.Merge(m, src) +} +func (m *LabelValues) XXX_Size() int { + return m.Size() +} +func (m *LabelValues) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValues.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValues proto.InternalMessageInfo + +func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } +func (*LifecycleHook) ProtoMessage() {} +func (*LifecycleHook) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{70} +} +func (m *LifecycleHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LifecycleHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LifecycleHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_LifecycleHook.Merge(m, src) +} +func (m *LifecycleHook) XXX_Size() int { + return m.Size() +} +func (m *LifecycleHook) XXX_DiscardUnknown() { + xxx_messageInfo_LifecycleHook.DiscardUnknown(m) +} + +var xxx_messageInfo_LifecycleHook proto.InternalMessageInfo + +func (m *Link) Reset() { *m = Link{} } +func (*Link) ProtoMessage() {} +func (*Link) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{71} +} +func (m *Link) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Link.Merge(m, src) +} +func (m *Link) XXX_Size() int { + return m.Size() +} +func (m *Link) XXX_DiscardUnknown() { + xxx_messageInfo_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Link proto.InternalMessageInfo + +func (m *ManifestFrom) Reset() { *m = ManifestFrom{} } +func (*ManifestFrom) ProtoMessage() {} +func (*ManifestFrom) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{72} +} +func (m *ManifestFrom) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManifestFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManifestFrom) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManifestFrom.Merge(m, src) +} +func (m *ManifestFrom) XXX_Size() int { + return m.Size() +} +func (m *ManifestFrom) XXX_DiscardUnknown() { + xxx_messageInfo_ManifestFrom.DiscardUnknown(m) +} + +var xxx_messageInfo_ManifestFrom proto.InternalMessageInfo + +func (m *MemoizationStatus) Reset() { *m = MemoizationStatus{} } +func (*MemoizationStatus) ProtoMessage() {} +func (*MemoizationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{73} +} +func (m *MemoizationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoizationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MemoizationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoizationStatus.Merge(m, src) +} +func (m *MemoizationStatus) XXX_Size() int { + return m.Size() +} +func (m *MemoizationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MemoizationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoizationStatus proto.InternalMessageInfo + +func (m *Memoize) Reset() { *m = Memoize{} } +func (*Memoize) ProtoMessage() {} +func (*Memoize) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{74} +} +func (m *Memoize) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Memoize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Memoize) XXX_Merge(src proto.Message) { + xxx_messageInfo_Memoize.Merge(m, src) +} +func (m *Memoize) XXX_Size() int { + return m.Size() +} +func (m *Memoize) XXX_DiscardUnknown() { + xxx_messageInfo_Memoize.DiscardUnknown(m) +} + +var xxx_messageInfo_Memoize proto.InternalMessageInfo + +func (m *Metadata) Reset() { *m = Metadata{} } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{75} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return m.Size() +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *MetricLabel) Reset() { *m = MetricLabel{} } +func (*MetricLabel) ProtoMessage() {} +func (*MetricLabel) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{76} +} +func (m *MetricLabel) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricLabel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricLabel) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricLabel.Merge(m, src) +} +func (m *MetricLabel) XXX_Size() int { + return m.Size() +} +func (m *MetricLabel) XXX_DiscardUnknown() { + xxx_messageInfo_MetricLabel.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricLabel proto.InternalMessageInfo + +func (m *Metrics) Reset() { *m = Metrics{} } +func (*Metrics) ProtoMessage() {} +func (*Metrics) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{77} +} +func (m *Metrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Metrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metrics.Merge(m, src) +} +func (m *Metrics) XXX_Size() int { + return m.Size() +} +func (m *Metrics) XXX_DiscardUnknown() { + xxx_messageInfo_Metrics.DiscardUnknown(m) +} + +var xxx_messageInfo_Metrics proto.InternalMessageInfo + +func (m *Mutex) Reset() { *m = Mutex{} } +func (*Mutex) ProtoMessage() {} +func (*Mutex) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{78} +} +func (m *Mutex) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mutex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Mutex) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mutex.Merge(m, src) +} +func (m *Mutex) XXX_Size() int { + return m.Size() +} +func (m *Mutex) XXX_DiscardUnknown() { + xxx_messageInfo_Mutex.DiscardUnknown(m) +} + +var xxx_messageInfo_Mutex proto.InternalMessageInfo + +func (m *MutexHolding) Reset() { *m = MutexHolding{} } +func (*MutexHolding) ProtoMessage() {} +func (*MutexHolding) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{79} +} +func (m *MutexHolding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutexHolding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutexHolding) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutexHolding.Merge(m, src) +} +func (m *MutexHolding) XXX_Size() int { + return m.Size() +} +func (m *MutexHolding) XXX_DiscardUnknown() { + xxx_messageInfo_MutexHolding.DiscardUnknown(m) +} + +var xxx_messageInfo_MutexHolding proto.InternalMessageInfo + +func (m *MutexStatus) Reset() { *m = MutexStatus{} } +func (*MutexStatus) ProtoMessage() {} +func (*MutexStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{80} +} +func (m *MutexStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutexStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutexStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutexStatus.Merge(m, src) +} +func (m *MutexStatus) XXX_Size() int { + return m.Size() +} +func (m *MutexStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MutexStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MutexStatus proto.InternalMessageInfo + +func (m *NodeFlag) Reset() { *m = NodeFlag{} } +func (*NodeFlag) ProtoMessage() {} +func (*NodeFlag) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{81} +} +func (m *NodeFlag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeFlag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeFlag) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeFlag.Merge(m, src) +} +func (m *NodeFlag) XXX_Size() int { + return m.Size() +} +func (m *NodeFlag) XXX_DiscardUnknown() { + xxx_messageInfo_NodeFlag.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeFlag proto.InternalMessageInfo + +func (m *NodeResult) Reset() { *m = NodeResult{} } +func (*NodeResult) ProtoMessage() {} +func (*NodeResult) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{82} +} +func (m *NodeResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeResult.Merge(m, src) +} +func (m *NodeResult) XXX_Size() int { + return m.Size() +} +func (m *NodeResult) XXX_DiscardUnknown() { + xxx_messageInfo_NodeResult.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeResult proto.InternalMessageInfo + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{83} +} +func (m *NodeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStatus.Merge(m, src) +} +func (m *NodeStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStatus proto.InternalMessageInfo + +func (m *NodeSynchronizationStatus) Reset() { *m = NodeSynchronizationStatus{} } +func (*NodeSynchronizationStatus) ProtoMessage() {} +func (*NodeSynchronizationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{84} +} +func (m *NodeSynchronizationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSynchronizationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeSynchronizationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSynchronizationStatus.Merge(m, src) +} +func (m *NodeSynchronizationStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeSynchronizationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSynchronizationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeSynchronizationStatus proto.InternalMessageInfo + +func (m *NoneStrategy) Reset() { *m = NoneStrategy{} } +func (*NoneStrategy) ProtoMessage() {} +func (*NoneStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{85} +} +func (m *NoneStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NoneStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NoneStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NoneStrategy.Merge(m, src) +} +func (m *NoneStrategy) XXX_Size() int { + return m.Size() +} +func (m *NoneStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_NoneStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_NoneStrategy proto.InternalMessageInfo + +func (m *OAuth2Auth) Reset() { *m = OAuth2Auth{} } +func (*OAuth2Auth) ProtoMessage() {} +func (*OAuth2Auth) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{86} +} +func (m *OAuth2Auth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuth2Auth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuth2Auth) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuth2Auth.Merge(m, src) +} +func (m *OAuth2Auth) XXX_Size() int { + return m.Size() +} +func (m *OAuth2Auth) XXX_DiscardUnknown() { + xxx_messageInfo_OAuth2Auth.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuth2Auth proto.InternalMessageInfo + +func (m *OAuth2EndpointParam) Reset() { *m = OAuth2EndpointParam{} } +func (*OAuth2EndpointParam) ProtoMessage() {} +func (*OAuth2EndpointParam) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{87} +} +func (m *OAuth2EndpointParam) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuth2EndpointParam) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuth2EndpointParam) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuth2EndpointParam.Merge(m, src) +} +func (m *OAuth2EndpointParam) XXX_Size() int { + return m.Size() +} +func (m *OAuth2EndpointParam) XXX_DiscardUnknown() { + xxx_messageInfo_OAuth2EndpointParam.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuth2EndpointParam proto.InternalMessageInfo + +func (m *OSSArtifact) Reset() { *m = OSSArtifact{} } +func (*OSSArtifact) ProtoMessage() {} +func (*OSSArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{88} +} +func (m *OSSArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OSSArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OSSArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_OSSArtifact.Merge(m, src) +} +func (m *OSSArtifact) XXX_Size() int { + return m.Size() +} +func (m *OSSArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_OSSArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_OSSArtifact proto.InternalMessageInfo + +func (m *OSSArtifactRepository) Reset() { *m = OSSArtifactRepository{} } +func (*OSSArtifactRepository) ProtoMessage() {} +func (*OSSArtifactRepository) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{89} +} +func (m *OSSArtifactRepository) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OSSArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OSSArtifactRepository) XXX_Merge(src proto.Message) { + xxx_messageInfo_OSSArtifactRepository.Merge(m, src) +} +func (m *OSSArtifactRepository) XXX_Size() int { + return m.Size() +} +func (m *OSSArtifactRepository) XXX_DiscardUnknown() { + xxx_messageInfo_OSSArtifactRepository.DiscardUnknown(m) +} + +var xxx_messageInfo_OSSArtifactRepository proto.InternalMessageInfo + +func (m *OSSBucket) Reset() { *m = OSSBucket{} } +func (*OSSBucket) ProtoMessage() {} +func (*OSSBucket) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{90} +} +func (m *OSSBucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OSSBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OSSBucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_OSSBucket.Merge(m, src) +} +func (m *OSSBucket) XXX_Size() int { + return m.Size() +} +func (m *OSSBucket) XXX_DiscardUnknown() { + xxx_messageInfo_OSSBucket.DiscardUnknown(m) +} + +var xxx_messageInfo_OSSBucket proto.InternalMessageInfo + +func (m *OSSLifecycleRule) Reset() { *m = OSSLifecycleRule{} } +func (*OSSLifecycleRule) ProtoMessage() {} +func (*OSSLifecycleRule) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{91} +} +func (m *OSSLifecycleRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OSSLifecycleRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OSSLifecycleRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_OSSLifecycleRule.Merge(m, src) +} +func (m *OSSLifecycleRule) XXX_Size() int { + return m.Size() +} +func (m *OSSLifecycleRule) XXX_DiscardUnknown() { + xxx_messageInfo_OSSLifecycleRule.DiscardUnknown(m) +} + +var xxx_messageInfo_OSSLifecycleRule proto.InternalMessageInfo + +func (m *Object) Reset() { *m = Object{} } +func (*Object) ProtoMessage() {} +func (*Object) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{92} +} +func (m *Object) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Object) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object.Merge(m, src) +} +func (m *Object) XXX_Size() int { + return m.Size() +} +func (m *Object) XXX_DiscardUnknown() { + xxx_messageInfo_Object.DiscardUnknown(m) +} + +var xxx_messageInfo_Object proto.InternalMessageInfo + +func (m *Outputs) Reset() { *m = Outputs{} } +func (*Outputs) ProtoMessage() {} +func (*Outputs) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{93} +} +func (m *Outputs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Outputs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Outputs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Outputs.Merge(m, src) +} +func (m *Outputs) XXX_Size() int { + return m.Size() +} +func (m *Outputs) XXX_DiscardUnknown() { + xxx_messageInfo_Outputs.DiscardUnknown(m) +} + +var xxx_messageInfo_Outputs proto.InternalMessageInfo + +func (m *ParallelSteps) Reset() { *m = ParallelSteps{} } +func (*ParallelSteps) ProtoMessage() {} +func (*ParallelSteps) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{94} +} +func (m *ParallelSteps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParallelSteps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParallelSteps) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParallelSteps.Merge(m, src) +} +func (m *ParallelSteps) XXX_Size() int { + return m.Size() +} +func (m *ParallelSteps) XXX_DiscardUnknown() { + xxx_messageInfo_ParallelSteps.DiscardUnknown(m) +} + +var xxx_messageInfo_ParallelSteps proto.InternalMessageInfo + +func (m *Parameter) Reset() { *m = Parameter{} } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{95} +} +func (m *Parameter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Parameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Parameter.Merge(m, src) +} +func (m *Parameter) XXX_Size() int { + return m.Size() +} +func (m *Parameter) XXX_DiscardUnknown() { + xxx_messageInfo_Parameter.DiscardUnknown(m) +} + +var xxx_messageInfo_Parameter proto.InternalMessageInfo + +func (m *Plugin) Reset() { *m = Plugin{} } +func (*Plugin) ProtoMessage() {} +func (*Plugin) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{96} +} +func (m *Plugin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Plugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Plugin) XXX_Merge(src proto.Message) { + xxx_messageInfo_Plugin.Merge(m, src) +} +func (m *Plugin) XXX_Size() int { + return m.Size() +} +func (m *Plugin) XXX_DiscardUnknown() { + xxx_messageInfo_Plugin.DiscardUnknown(m) +} + +var xxx_messageInfo_Plugin proto.InternalMessageInfo + +func (m *PodGC) Reset() { *m = PodGC{} } +func (*PodGC) ProtoMessage() {} +func (*PodGC) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{97} +} +func (m *PodGC) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodGC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodGC) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodGC.Merge(m, src) +} +func (m *PodGC) XXX_Size() int { + return m.Size() +} +func (m *PodGC) XXX_DiscardUnknown() { + xxx_messageInfo_PodGC.DiscardUnknown(m) +} + +var xxx_messageInfo_PodGC proto.InternalMessageInfo + +func (m *Prometheus) Reset() { *m = Prometheus{} } +func (*Prometheus) ProtoMessage() {} +func (*Prometheus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{98} +} +func (m *Prometheus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Prometheus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Prometheus) XXX_Merge(src proto.Message) { + xxx_messageInfo_Prometheus.Merge(m, src) +} +func (m *Prometheus) XXX_Size() int { + return m.Size() +} +func (m *Prometheus) XXX_DiscardUnknown() { + xxx_messageInfo_Prometheus.DiscardUnknown(m) +} + +var xxx_messageInfo_Prometheus proto.InternalMessageInfo + +func (m *RawArtifact) Reset() { *m = RawArtifact{} } +func (*RawArtifact) ProtoMessage() {} +func (*RawArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{99} +} +func (m *RawArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RawArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawArtifact.Merge(m, src) +} +func (m *RawArtifact) XXX_Size() int { + return m.Size() +} +func (m *RawArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_RawArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_RawArtifact proto.InternalMessageInfo + +func (m *ResourceTemplate) Reset() { *m = ResourceTemplate{} } +func (*ResourceTemplate) ProtoMessage() {} +func (*ResourceTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{100} +} +func (m *ResourceTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceTemplate.Merge(m, src) +} +func (m *ResourceTemplate) XXX_Size() int { + return m.Size() +} +func (m *ResourceTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceTemplate proto.InternalMessageInfo + +func (m *RetryAffinity) Reset() { *m = RetryAffinity{} } +func (*RetryAffinity) ProtoMessage() {} +func (*RetryAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{101} +} +func (m *RetryAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RetryAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RetryAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetryAffinity.Merge(m, src) +} +func (m *RetryAffinity) XXX_Size() int { + return m.Size() +} +func (m *RetryAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_RetryAffinity.DiscardUnknown(m) +} + +var xxx_messageInfo_RetryAffinity proto.InternalMessageInfo + +func (m *RetryNodeAntiAffinity) Reset() { *m = RetryNodeAntiAffinity{} } +func (*RetryNodeAntiAffinity) ProtoMessage() {} +func (*RetryNodeAntiAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{102} +} +func (m *RetryNodeAntiAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RetryNodeAntiAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RetryNodeAntiAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetryNodeAntiAffinity.Merge(m, src) +} +func (m *RetryNodeAntiAffinity) XXX_Size() int { + return m.Size() +} +func (m *RetryNodeAntiAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_RetryNodeAntiAffinity.DiscardUnknown(m) +} + +var xxx_messageInfo_RetryNodeAntiAffinity proto.InternalMessageInfo + +func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } +func (*RetryStrategy) ProtoMessage() {} +func (*RetryStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{103} +} +func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RetryStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RetryStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetryStrategy.Merge(m, src) +} +func (m *RetryStrategy) XXX_Size() int { + return m.Size() +} +func (m *RetryStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RetryStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo + +func (m *S3Artifact) Reset() { *m = S3Artifact{} } +func (*S3Artifact) ProtoMessage() {} +func (*S3Artifact) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{104} +} +func (m *S3Artifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *S3Artifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *S3Artifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_S3Artifact.Merge(m, src) +} +func (m *S3Artifact) XXX_Size() int { + return m.Size() +} +func (m *S3Artifact) XXX_DiscardUnknown() { + xxx_messageInfo_S3Artifact.DiscardUnknown(m) +} + +var xxx_messageInfo_S3Artifact proto.InternalMessageInfo + +func (m *S3ArtifactRepository) Reset() { *m = S3ArtifactRepository{} } +func (*S3ArtifactRepository) ProtoMessage() {} +func (*S3ArtifactRepository) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{105} +} +func (m *S3ArtifactRepository) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *S3ArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *S3ArtifactRepository) XXX_Merge(src proto.Message) { + xxx_messageInfo_S3ArtifactRepository.Merge(m, src) +} +func (m *S3ArtifactRepository) XXX_Size() int { + return m.Size() +} +func (m *S3ArtifactRepository) XXX_DiscardUnknown() { + xxx_messageInfo_S3ArtifactRepository.DiscardUnknown(m) +} + +var xxx_messageInfo_S3ArtifactRepository proto.InternalMessageInfo + +func (m *S3Bucket) Reset() { *m = S3Bucket{} } +func (*S3Bucket) ProtoMessage() {} +func (*S3Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{106} +} +func (m *S3Bucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *S3Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *S3Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_S3Bucket.Merge(m, src) +} +func (m *S3Bucket) XXX_Size() int { + return m.Size() +} +func (m *S3Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_S3Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_S3Bucket proto.InternalMessageInfo + +func (m *S3EncryptionOptions) Reset() { *m = S3EncryptionOptions{} } +func (*S3EncryptionOptions) ProtoMessage() {} +func (*S3EncryptionOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{107} +} +func (m *S3EncryptionOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *S3EncryptionOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *S3EncryptionOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_S3EncryptionOptions.Merge(m, src) +} +func (m *S3EncryptionOptions) XXX_Size() int { + return m.Size() +} +func (m *S3EncryptionOptions) XXX_DiscardUnknown() { + xxx_messageInfo_S3EncryptionOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_S3EncryptionOptions proto.InternalMessageInfo + +func (m *ScriptTemplate) Reset() { *m = ScriptTemplate{} } +func (*ScriptTemplate) ProtoMessage() {} +func (*ScriptTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{108} +} +func (m *ScriptTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScriptTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScriptTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScriptTemplate.Merge(m, src) +} +func (m *ScriptTemplate) XXX_Size() int { + return m.Size() +} +func (m *ScriptTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ScriptTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ScriptTemplate proto.InternalMessageInfo + +func (m *SemaphoreHolding) Reset() { *m = SemaphoreHolding{} } +func (*SemaphoreHolding) ProtoMessage() {} +func (*SemaphoreHolding) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{109} +} +func (m *SemaphoreHolding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SemaphoreHolding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SemaphoreHolding) XXX_Merge(src proto.Message) { + xxx_messageInfo_SemaphoreHolding.Merge(m, src) +} +func (m *SemaphoreHolding) XXX_Size() int { + return m.Size() +} +func (m *SemaphoreHolding) XXX_DiscardUnknown() { + xxx_messageInfo_SemaphoreHolding.DiscardUnknown(m) +} + +var xxx_messageInfo_SemaphoreHolding proto.InternalMessageInfo + +func (m *SemaphoreRef) Reset() { *m = SemaphoreRef{} } +func (*SemaphoreRef) ProtoMessage() {} +func (*SemaphoreRef) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{110} +} +func (m *SemaphoreRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SemaphoreRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SemaphoreRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_SemaphoreRef.Merge(m, src) +} +func (m *SemaphoreRef) XXX_Size() int { + return m.Size() +} +func (m *SemaphoreRef) XXX_DiscardUnknown() { + xxx_messageInfo_SemaphoreRef.DiscardUnknown(m) +} + +var xxx_messageInfo_SemaphoreRef proto.InternalMessageInfo + +func (m *SemaphoreStatus) Reset() { *m = SemaphoreStatus{} } +func (*SemaphoreStatus) ProtoMessage() {} +func (*SemaphoreStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{111} +} +func (m *SemaphoreStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SemaphoreStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SemaphoreStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SemaphoreStatus.Merge(m, src) +} +func (m *SemaphoreStatus) XXX_Size() int { + return m.Size() +} +func (m *SemaphoreStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SemaphoreStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SemaphoreStatus proto.InternalMessageInfo + +func (m *Sequence) Reset() { *m = Sequence{} } +func (*Sequence) ProtoMessage() {} +func (*Sequence) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{112} +} +func (m *Sequence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sequence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Sequence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sequence.Merge(m, src) +} +func (m *Sequence) XXX_Size() int { + return m.Size() +} +func (m *Sequence) XXX_DiscardUnknown() { + xxx_messageInfo_Sequence.DiscardUnknown(m) +} + +var xxx_messageInfo_Sequence proto.InternalMessageInfo + +func (m *StopStrategy) Reset() { *m = StopStrategy{} } +func (*StopStrategy) ProtoMessage() {} +func (*StopStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{113} +} +func (m *StopStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StopStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StopStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopStrategy.Merge(m, src) +} +func (m *StopStrategy) XXX_Size() int { + return m.Size() +} +func (m *StopStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StopStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_StopStrategy proto.InternalMessageInfo + +func (m *Submit) Reset() { *m = Submit{} } +func (*Submit) ProtoMessage() {} +func (*Submit) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{114} +} +func (m *Submit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Submit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Submit) XXX_Merge(src proto.Message) { + xxx_messageInfo_Submit.Merge(m, src) +} +func (m *Submit) XXX_Size() int { + return m.Size() +} +func (m *Submit) XXX_DiscardUnknown() { + xxx_messageInfo_Submit.DiscardUnknown(m) +} + +var xxx_messageInfo_Submit proto.InternalMessageInfo + +func (m *SubmitOpts) Reset() { *m = SubmitOpts{} } +func (*SubmitOpts) ProtoMessage() {} +func (*SubmitOpts) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{115} +} +func (m *SubmitOpts) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubmitOpts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubmitOpts) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubmitOpts.Merge(m, src) +} +func (m *SubmitOpts) XXX_Size() int { + return m.Size() +} +func (m *SubmitOpts) XXX_DiscardUnknown() { + xxx_messageInfo_SubmitOpts.DiscardUnknown(m) +} + +var xxx_messageInfo_SubmitOpts proto.InternalMessageInfo + +func (m *SuppliedValueFrom) Reset() { *m = SuppliedValueFrom{} } +func (*SuppliedValueFrom) ProtoMessage() {} +func (*SuppliedValueFrom) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{116} +} +func (m *SuppliedValueFrom) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SuppliedValueFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SuppliedValueFrom) XXX_Merge(src proto.Message) { + xxx_messageInfo_SuppliedValueFrom.Merge(m, src) +} +func (m *SuppliedValueFrom) XXX_Size() int { + return m.Size() +} +func (m *SuppliedValueFrom) XXX_DiscardUnknown() { + xxx_messageInfo_SuppliedValueFrom.DiscardUnknown(m) +} + +var xxx_messageInfo_SuppliedValueFrom proto.InternalMessageInfo + +func (m *SuspendTemplate) Reset() { *m = SuspendTemplate{} } +func (*SuspendTemplate) ProtoMessage() {} +func (*SuspendTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{117} +} +func (m *SuspendTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SuspendTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SuspendTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_SuspendTemplate.Merge(m, src) +} +func (m *SuspendTemplate) XXX_Size() int { + return m.Size() +} +func (m *SuspendTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_SuspendTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_SuspendTemplate proto.InternalMessageInfo + +func (m *Synchronization) Reset() { *m = Synchronization{} } +func (*Synchronization) ProtoMessage() {} +func (*Synchronization) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{118} +} +func (m *Synchronization) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Synchronization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Synchronization) XXX_Merge(src proto.Message) { + xxx_messageInfo_Synchronization.Merge(m, src) +} +func (m *Synchronization) XXX_Size() int { + return m.Size() +} +func (m *Synchronization) XXX_DiscardUnknown() { + xxx_messageInfo_Synchronization.DiscardUnknown(m) +} + +var xxx_messageInfo_Synchronization proto.InternalMessageInfo + +func (m *SynchronizationStatus) Reset() { *m = SynchronizationStatus{} } +func (*SynchronizationStatus) ProtoMessage() {} +func (*SynchronizationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{119} +} +func (m *SynchronizationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SynchronizationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SynchronizationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SynchronizationStatus.Merge(m, src) +} +func (m *SynchronizationStatus) XXX_Size() int { + return m.Size() +} +func (m *SynchronizationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SynchronizationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SynchronizationStatus proto.InternalMessageInfo + +func (m *TTLStrategy) Reset() { *m = TTLStrategy{} } +func (*TTLStrategy) ProtoMessage() {} +func (*TTLStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{120} +} +func (m *TTLStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TTLStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TTLStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TTLStrategy.Merge(m, src) +} +func (m *TTLStrategy) XXX_Size() int { + return m.Size() +} +func (m *TTLStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_TTLStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_TTLStrategy proto.InternalMessageInfo + +func (m *TarStrategy) Reset() { *m = TarStrategy{} } +func (*TarStrategy) ProtoMessage() {} +func (*TarStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{121} +} +func (m *TarStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TarStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TarStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TarStrategy.Merge(m, src) +} +func (m *TarStrategy) XXX_Size() int { + return m.Size() +} +func (m *TarStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_TarStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_TarStrategy proto.InternalMessageInfo + +func (m *Template) Reset() { *m = Template{} } +func (*Template) ProtoMessage() {} +func (*Template) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{122} +} +func (m *Template) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Template) XXX_Merge(src proto.Message) { + xxx_messageInfo_Template.Merge(m, src) +} +func (m *Template) XXX_Size() int { + return m.Size() +} +func (m *Template) XXX_DiscardUnknown() { + xxx_messageInfo_Template.DiscardUnknown(m) +} + +var xxx_messageInfo_Template proto.InternalMessageInfo + +func (m *TemplateRef) Reset() { *m = TemplateRef{} } +func (*TemplateRef) ProtoMessage() {} +func (*TemplateRef) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{123} +} +func (m *TemplateRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateRef.Merge(m, src) +} +func (m *TemplateRef) XXX_Size() int { + return m.Size() +} +func (m *TemplateRef) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateRef.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateRef proto.InternalMessageInfo + +func (m *TransformationStep) Reset() { *m = TransformationStep{} } +func (*TransformationStep) ProtoMessage() {} +func (*TransformationStep) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{124} +} +func (m *TransformationStep) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransformationStep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TransformationStep) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransformationStep.Merge(m, src) +} +func (m *TransformationStep) XXX_Size() int { + return m.Size() +} +func (m *TransformationStep) XXX_DiscardUnknown() { + xxx_messageInfo_TransformationStep.DiscardUnknown(m) +} + +var xxx_messageInfo_TransformationStep proto.InternalMessageInfo + +func (m *UserContainer) Reset() { *m = UserContainer{} } +func (*UserContainer) ProtoMessage() {} +func (*UserContainer) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{125} +} +func (m *UserContainer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserContainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserContainer.Merge(m, src) +} +func (m *UserContainer) XXX_Size() int { + return m.Size() +} +func (m *UserContainer) XXX_DiscardUnknown() { + xxx_messageInfo_UserContainer.DiscardUnknown(m) +} + +var xxx_messageInfo_UserContainer proto.InternalMessageInfo + +func (m *ValueFrom) Reset() { *m = ValueFrom{} } +func (*ValueFrom) ProtoMessage() {} +func (*ValueFrom) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{126} +} +func (m *ValueFrom) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValueFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValueFrom) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValueFrom.Merge(m, src) +} +func (m *ValueFrom) XXX_Size() int { + return m.Size() +} +func (m *ValueFrom) XXX_DiscardUnknown() { + xxx_messageInfo_ValueFrom.DiscardUnknown(m) +} + +var xxx_messageInfo_ValueFrom proto.InternalMessageInfo + +func (m *Version) Reset() { *m = Version{} } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{127} +} +func (m *Version) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(m, src) +} +func (m *Version) XXX_Size() int { + return m.Size() +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *VolumeClaimGC) Reset() { *m = VolumeClaimGC{} } +func (*VolumeClaimGC) ProtoMessage() {} +func (*VolumeClaimGC) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{128} +} +func (m *VolumeClaimGC) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeClaimGC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeClaimGC) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeClaimGC.Merge(m, src) +} +func (m *VolumeClaimGC) XXX_Size() int { + return m.Size() +} +func (m *VolumeClaimGC) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeClaimGC.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeClaimGC proto.InternalMessageInfo + +func (m *Workflow) Reset() { *m = Workflow{} } +func (*Workflow) ProtoMessage() {} +func (*Workflow) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{129} +} +func (m *Workflow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Workflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Workflow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Workflow.Merge(m, src) +} +func (m *Workflow) XXX_Size() int { + return m.Size() +} +func (m *Workflow) XXX_DiscardUnknown() { + xxx_messageInfo_Workflow.DiscardUnknown(m) +} + +var xxx_messageInfo_Workflow proto.InternalMessageInfo + +func (m *WorkflowArtifactGCTask) Reset() { *m = WorkflowArtifactGCTask{} } +func (*WorkflowArtifactGCTask) ProtoMessage() {} +func (*WorkflowArtifactGCTask) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{130} +} +func (m *WorkflowArtifactGCTask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowArtifactGCTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowArtifactGCTask) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowArtifactGCTask.Merge(m, src) +} +func (m *WorkflowArtifactGCTask) XXX_Size() int { + return m.Size() +} +func (m *WorkflowArtifactGCTask) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowArtifactGCTask.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowArtifactGCTask proto.InternalMessageInfo + +func (m *WorkflowArtifactGCTaskList) Reset() { *m = WorkflowArtifactGCTaskList{} } +func (*WorkflowArtifactGCTaskList) ProtoMessage() {} +func (*WorkflowArtifactGCTaskList) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{131} +} +func (m *WorkflowArtifactGCTaskList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowArtifactGCTaskList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowArtifactGCTaskList) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowArtifactGCTaskList.Merge(m, src) +} +func (m *WorkflowArtifactGCTaskList) XXX_Size() int { + return m.Size() +} +func (m *WorkflowArtifactGCTaskList) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowArtifactGCTaskList.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowArtifactGCTaskList proto.InternalMessageInfo + +func (m *WorkflowEventBinding) Reset() { *m = WorkflowEventBinding{} } +func (*WorkflowEventBinding) ProtoMessage() {} +func (*WorkflowEventBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{132} +} +func (m *WorkflowEventBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowEventBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowEventBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowEventBinding.Merge(m, src) +} +func (m *WorkflowEventBinding) XXX_Size() int { + return m.Size() +} +func (m *WorkflowEventBinding) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowEventBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowEventBinding proto.InternalMessageInfo + +func (m *WorkflowEventBindingList) Reset() { *m = WorkflowEventBindingList{} } +func (*WorkflowEventBindingList) ProtoMessage() {} +func (*WorkflowEventBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{133} +} +func (m *WorkflowEventBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowEventBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowEventBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowEventBindingList.Merge(m, src) +} +func (m *WorkflowEventBindingList) XXX_Size() int { + return m.Size() +} +func (m *WorkflowEventBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowEventBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowEventBindingList proto.InternalMessageInfo + +func (m *WorkflowEventBindingSpec) Reset() { *m = WorkflowEventBindingSpec{} } +func (*WorkflowEventBindingSpec) ProtoMessage() {} +func (*WorkflowEventBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{134} +} +func (m *WorkflowEventBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowEventBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowEventBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowEventBindingSpec.Merge(m, src) +} +func (m *WorkflowEventBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *WorkflowEventBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowEventBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowEventBindingSpec proto.InternalMessageInfo + +func (m *WorkflowLevelArtifactGC) Reset() { *m = WorkflowLevelArtifactGC{} } +func (*WorkflowLevelArtifactGC) ProtoMessage() {} +func (*WorkflowLevelArtifactGC) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{135} +} +func (m *WorkflowLevelArtifactGC) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowLevelArtifactGC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowLevelArtifactGC) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowLevelArtifactGC.Merge(m, src) +} +func (m *WorkflowLevelArtifactGC) XXX_Size() int { + return m.Size() +} +func (m *WorkflowLevelArtifactGC) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowLevelArtifactGC.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowLevelArtifactGC proto.InternalMessageInfo + +func (m *WorkflowList) Reset() { *m = WorkflowList{} } +func (*WorkflowList) ProtoMessage() {} +func (*WorkflowList) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{136} +} +func (m *WorkflowList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowList) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowList.Merge(m, src) +} +func (m *WorkflowList) XXX_Size() int { + return m.Size() +} +func (m *WorkflowList) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowList.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowList proto.InternalMessageInfo + +func (m *WorkflowMetadata) Reset() { *m = WorkflowMetadata{} } +func (*WorkflowMetadata) ProtoMessage() {} +func (*WorkflowMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{137} +} +func (m *WorkflowMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowMetadata.Merge(m, src) +} +func (m *WorkflowMetadata) XXX_Size() int { + return m.Size() +} +func (m *WorkflowMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowMetadata proto.InternalMessageInfo + +func (m *WorkflowSpec) Reset() { *m = WorkflowSpec{} } +func (*WorkflowSpec) ProtoMessage() {} +func (*WorkflowSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{138} +} +func (m *WorkflowSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowSpec.Merge(m, src) +} +func (m *WorkflowSpec) XXX_Size() int { + return m.Size() +} +func (m *WorkflowSpec) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowSpec proto.InternalMessageInfo + +func (m *WorkflowStatus) Reset() { *m = WorkflowStatus{} } +func (*WorkflowStatus) ProtoMessage() {} +func (*WorkflowStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{139} +} +func (m *WorkflowStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowStatus.Merge(m, src) +} +func (m *WorkflowStatus) XXX_Size() int { + return m.Size() +} +func (m *WorkflowStatus) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowStatus proto.InternalMessageInfo + +func (m *WorkflowStep) Reset() { *m = WorkflowStep{} } +func (*WorkflowStep) ProtoMessage() {} +func (*WorkflowStep) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{140} +} +func (m *WorkflowStep) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowStep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowStep) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowStep.Merge(m, src) +} +func (m *WorkflowStep) XXX_Size() int { + return m.Size() +} +func (m *WorkflowStep) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowStep.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowStep proto.InternalMessageInfo + +func (m *WorkflowTaskResult) Reset() { *m = WorkflowTaskResult{} } +func (*WorkflowTaskResult) ProtoMessage() {} +func (*WorkflowTaskResult) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{141} +} +func (m *WorkflowTaskResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTaskResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTaskResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTaskResult.Merge(m, src) +} +func (m *WorkflowTaskResult) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTaskResult) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTaskResult.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTaskResult proto.InternalMessageInfo + +func (m *WorkflowTaskResultList) Reset() { *m = WorkflowTaskResultList{} } +func (*WorkflowTaskResultList) ProtoMessage() {} +func (*WorkflowTaskResultList) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{142} +} +func (m *WorkflowTaskResultList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTaskResultList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTaskResultList) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTaskResultList.Merge(m, src) +} +func (m *WorkflowTaskResultList) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTaskResultList) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTaskResultList.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTaskResultList proto.InternalMessageInfo + +func (m *WorkflowTaskSet) Reset() { *m = WorkflowTaskSet{} } +func (*WorkflowTaskSet) ProtoMessage() {} +func (*WorkflowTaskSet) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{143} +} +func (m *WorkflowTaskSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTaskSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTaskSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTaskSet.Merge(m, src) +} +func (m *WorkflowTaskSet) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTaskSet) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTaskSet.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTaskSet proto.InternalMessageInfo + +func (m *WorkflowTaskSetList) Reset() { *m = WorkflowTaskSetList{} } +func (*WorkflowTaskSetList) ProtoMessage() {} +func (*WorkflowTaskSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{144} +} +func (m *WorkflowTaskSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTaskSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTaskSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTaskSetList.Merge(m, src) +} +func (m *WorkflowTaskSetList) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTaskSetList) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTaskSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTaskSetList proto.InternalMessageInfo + +func (m *WorkflowTaskSetSpec) Reset() { *m = WorkflowTaskSetSpec{} } +func (*WorkflowTaskSetSpec) ProtoMessage() {} +func (*WorkflowTaskSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{145} +} +func (m *WorkflowTaskSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTaskSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTaskSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTaskSetSpec.Merge(m, src) +} +func (m *WorkflowTaskSetSpec) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTaskSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTaskSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTaskSetSpec proto.InternalMessageInfo + +func (m *WorkflowTaskSetStatus) Reset() { *m = WorkflowTaskSetStatus{} } +func (*WorkflowTaskSetStatus) ProtoMessage() {} +func (*WorkflowTaskSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{146} +} +func (m *WorkflowTaskSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTaskSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTaskSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTaskSetStatus.Merge(m, src) +} +func (m *WorkflowTaskSetStatus) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTaskSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTaskSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTaskSetStatus proto.InternalMessageInfo + +func (m *WorkflowTemplate) Reset() { *m = WorkflowTemplate{} } +func (*WorkflowTemplate) ProtoMessage() {} +func (*WorkflowTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{147} +} +func (m *WorkflowTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplate.Merge(m, src) +} +func (m *WorkflowTemplate) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplate proto.InternalMessageInfo + +func (m *WorkflowTemplateList) Reset() { *m = WorkflowTemplateList{} } +func (*WorkflowTemplateList) ProtoMessage() {} +func (*WorkflowTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{148} +} +func (m *WorkflowTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplateList.Merge(m, src) +} +func (m *WorkflowTemplateList) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplateList proto.InternalMessageInfo + +func (m *WorkflowTemplateRef) Reset() { *m = WorkflowTemplateRef{} } +func (*WorkflowTemplateRef) ProtoMessage() {} +func (*WorkflowTemplateRef) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{149} +} +func (m *WorkflowTemplateRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplateRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTemplateRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplateRef.Merge(m, src) +} +func (m *WorkflowTemplateRef) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplateRef) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplateRef.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplateRef proto.InternalMessageInfo + +func (m *ZipStrategy) Reset() { *m = ZipStrategy{} } +func (*ZipStrategy) ProtoMessage() {} +func (*ZipStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{150} +} +func (m *ZipStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ZipStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ZipStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ZipStrategy.Merge(m, src) +} +func (m *ZipStrategy) XXX_Size() int { + return m.Size() +} +func (m *ZipStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_ZipStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_ZipStrategy proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Amount)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Amount") + proto.RegisterType((*ArchiveStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArchiveStrategy") + proto.RegisterType((*Arguments)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Arguments") + proto.RegisterType((*ArtGCStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtGCStatus") + proto.RegisterMapType((map[string]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtGCStatus.PodsRecoupedEntry") + proto.RegisterMapType((map[ArtifactGCStrategy]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtGCStatus.StrategiesProcessedEntry") + proto.RegisterType((*Artifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Artifact") + proto.RegisterType((*ArtifactGC)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactGC") + proto.RegisterType((*ArtifactGCSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactGCSpec") + proto.RegisterMapType((map[string]ArtifactNodeSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactGCSpec.ArtifactsByNodeEntry") + proto.RegisterType((*ArtifactGCStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactGCStatus") + proto.RegisterMapType((map[string]ArtifactResultNodeStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactGCStatus.ArtifactResultsByNodeEntry") + proto.RegisterType((*ArtifactLocation)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactLocation") + proto.RegisterType((*ArtifactNodeSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactNodeSpec") + proto.RegisterMapType((map[string]Artifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactNodeSpec.ArtifactsEntry") + proto.RegisterType((*ArtifactPaths)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactPaths") + proto.RegisterType((*ArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactRepository") + proto.RegisterType((*ArtifactRepositoryRef)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactRepositoryRef") + proto.RegisterType((*ArtifactRepositoryRefStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactRepositoryRefStatus") + proto.RegisterType((*ArtifactResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactResult") + proto.RegisterType((*ArtifactResultNodeStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactResultNodeStatus") + proto.RegisterMapType((map[string]ArtifactResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactResultNodeStatus.ArtifactResultsEntry") + proto.RegisterType((*ArtifactSearchQuery)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactSearchQuery") + proto.RegisterMapType((map[ArtifactGCStrategy]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactSearchQuery.ArtifactGCStrategiesEntry") + proto.RegisterMapType((map[NodeType]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactSearchQuery.NodeTypesEntry") + proto.RegisterType((*ArtifactSearchResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactSearchResult") + proto.RegisterType((*ArtifactoryArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactoryArtifact") + proto.RegisterType((*ArtifactoryArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactoryArtifactRepository") + proto.RegisterType((*ArtifactoryAuth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactoryAuth") + proto.RegisterType((*AzureArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.AzureArtifact") + proto.RegisterType((*AzureArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.AzureArtifactRepository") + proto.RegisterType((*AzureBlobContainer)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.AzureBlobContainer") + proto.RegisterType((*Backoff)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Backoff") + proto.RegisterType((*BasicAuth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.BasicAuth") + proto.RegisterType((*Cache)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Cache") + proto.RegisterType((*ClientCertAuth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ClientCertAuth") + proto.RegisterType((*ClusterWorkflowTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate") + proto.RegisterType((*ClusterWorkflowTemplateList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplateList") + proto.RegisterType((*Column)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Column") + proto.RegisterType((*Condition)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Condition") + proto.RegisterType((*ContainerNode)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ContainerNode") + proto.RegisterType((*ContainerSetRetryStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ContainerSetRetryStrategy") + proto.RegisterType((*ContainerSetTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ContainerSetTemplate") + proto.RegisterType((*ContinueOn)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ContinueOn") + proto.RegisterType((*Counter)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Counter") + proto.RegisterType((*CreateS3BucketOptions)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.CreateS3BucketOptions") + proto.RegisterType((*CronWorkflow)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.CronWorkflow") + proto.RegisterType((*CronWorkflowList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.CronWorkflowList") + proto.RegisterType((*CronWorkflowSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.CronWorkflowSpec") + proto.RegisterType((*CronWorkflowStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.CronWorkflowStatus") + proto.RegisterType((*DAGTask)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.DAGTask") + proto.RegisterMapType((LifecycleHooks)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.DAGTask.HooksEntry") + proto.RegisterType((*DAGTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.DAGTemplate") + proto.RegisterType((*Data)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Data") + proto.RegisterType((*DataSource)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.DataSource") + proto.RegisterType((*Event)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Event") + proto.RegisterType((*ExecutorConfig)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ExecutorConfig") + proto.RegisterType((*GCSArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.GCSArtifact") + proto.RegisterType((*GCSArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.GCSArtifactRepository") + proto.RegisterType((*GCSBucket)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.GCSBucket") + proto.RegisterType((*Gauge)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Gauge") + proto.RegisterType((*GitArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.GitArtifact") + proto.RegisterType((*HDFSArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HDFSArtifact") + proto.RegisterType((*HDFSArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HDFSArtifactRepository") + proto.RegisterType((*HDFSConfig)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HDFSConfig") + proto.RegisterType((*HDFSKrbConfig)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HDFSKrbConfig") + proto.RegisterType((*HTTP)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTP") + proto.RegisterType((*HTTPArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTPArtifact") + proto.RegisterType((*HTTPAuth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTPAuth") + proto.RegisterType((*HTTPBodySource)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTPBodySource") + proto.RegisterType((*HTTPHeader)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTPHeader") + proto.RegisterType((*HTTPHeaderSource)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTPHeaderSource") + proto.RegisterType((*Header)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Header") + proto.RegisterType((*Histogram)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Histogram") + proto.RegisterType((*Inputs)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Inputs") + proto.RegisterType((*Item)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Item") + proto.RegisterType((*LabelKeys)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.LabelKeys") + proto.RegisterType((*LabelValueFrom)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.LabelValueFrom") + proto.RegisterType((*LabelValues)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.LabelValues") + proto.RegisterType((*LifecycleHook)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.LifecycleHook") + proto.RegisterType((*Link)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Link") + proto.RegisterType((*ManifestFrom)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ManifestFrom") + proto.RegisterType((*MemoizationStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.MemoizationStatus") + proto.RegisterType((*Memoize)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Memoize") + proto.RegisterType((*Metadata)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Metadata") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Metadata.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Metadata.LabelsEntry") + proto.RegisterType((*MetricLabel)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.MetricLabel") + proto.RegisterType((*Metrics)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Metrics") + proto.RegisterType((*Mutex)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Mutex") + proto.RegisterType((*MutexHolding)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.MutexHolding") + proto.RegisterType((*MutexStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.MutexStatus") + proto.RegisterType((*NodeFlag)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeFlag") + proto.RegisterType((*NodeResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeResult") + proto.RegisterType((*NodeStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeStatus") + proto.RegisterMapType((ResourcesDuration)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeStatus.ResourcesDurationEntry") + proto.RegisterType((*NodeSynchronizationStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeSynchronizationStatus") + proto.RegisterType((*NoneStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NoneStrategy") + proto.RegisterType((*OAuth2Auth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OAuth2Auth") + proto.RegisterType((*OAuth2EndpointParam)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OAuth2EndpointParam") + proto.RegisterType((*OSSArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OSSArtifact") + proto.RegisterType((*OSSArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OSSArtifactRepository") + proto.RegisterType((*OSSBucket)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OSSBucket") + proto.RegisterType((*OSSLifecycleRule)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OSSLifecycleRule") + proto.RegisterType((*Object)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Object") + proto.RegisterType((*Outputs)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Outputs") + proto.RegisterType((*ParallelSteps)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ParallelSteps") + proto.RegisterType((*Parameter)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Parameter") + proto.RegisterType((*Plugin)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Plugin") + proto.RegisterType((*PodGC)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.PodGC") + proto.RegisterType((*Prometheus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Prometheus") + proto.RegisterType((*RawArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.RawArtifact") + proto.RegisterType((*ResourceTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ResourceTemplate") + proto.RegisterType((*RetryAffinity)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.RetryAffinity") + proto.RegisterType((*RetryNodeAntiAffinity)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.RetryNodeAntiAffinity") + proto.RegisterType((*RetryStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.RetryStrategy") + proto.RegisterType((*S3Artifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.S3Artifact") + proto.RegisterType((*S3ArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.S3ArtifactRepository") + proto.RegisterType((*S3Bucket)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.S3Bucket") + proto.RegisterType((*S3EncryptionOptions)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.S3EncryptionOptions") + proto.RegisterType((*ScriptTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ScriptTemplate") + proto.RegisterType((*SemaphoreHolding)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SemaphoreHolding") + proto.RegisterType((*SemaphoreRef)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SemaphoreRef") + proto.RegisterType((*SemaphoreStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SemaphoreStatus") + proto.RegisterType((*Sequence)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Sequence") + proto.RegisterType((*StopStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.StopStrategy") + proto.RegisterType((*Submit)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Submit") + proto.RegisterType((*SubmitOpts)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SubmitOpts") + proto.RegisterType((*SuppliedValueFrom)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SuppliedValueFrom") + proto.RegisterType((*SuspendTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SuspendTemplate") + proto.RegisterType((*Synchronization)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Synchronization") + proto.RegisterType((*SynchronizationStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SynchronizationStatus") + proto.RegisterType((*TTLStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.TTLStrategy") + proto.RegisterType((*TarStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.TarStrategy") + proto.RegisterType((*Template)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Template") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Template.NodeSelectorEntry") + proto.RegisterType((*TemplateRef)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.TemplateRef") + proto.RegisterType((*TransformationStep)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.TransformationStep") + proto.RegisterType((*UserContainer)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.UserContainer") + proto.RegisterType((*ValueFrom)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ValueFrom") + proto.RegisterType((*Version)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Version") + proto.RegisterType((*VolumeClaimGC)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.VolumeClaimGC") + proto.RegisterType((*Workflow)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Workflow") + proto.RegisterType((*WorkflowArtifactGCTask)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowArtifactGCTask") + proto.RegisterType((*WorkflowArtifactGCTaskList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowArtifactGCTaskList") + proto.RegisterType((*WorkflowEventBinding)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowEventBinding") + proto.RegisterType((*WorkflowEventBindingList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowEventBindingList") + proto.RegisterType((*WorkflowEventBindingSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowEventBindingSpec") + proto.RegisterType((*WorkflowLevelArtifactGC)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowLevelArtifactGC") + proto.RegisterType((*WorkflowList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowList") + proto.RegisterType((*WorkflowMetadata)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowMetadata") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowMetadata.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowMetadata.LabelsEntry") + proto.RegisterMapType((map[string]LabelValueFrom)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowMetadata.LabelsFromEntry") + proto.RegisterType((*WorkflowSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowSpec") + proto.RegisterMapType((LifecycleHooks)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowSpec.HooksEntry") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowSpec.NodeSelectorEntry") + proto.RegisterType((*WorkflowStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus") + proto.RegisterMapType((Nodes)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.NodesEntry") + proto.RegisterMapType((ResourcesDuration)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.ResourcesDurationEntry") + proto.RegisterMapType((map[string]Template)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.StoredTemplatesEntry") + proto.RegisterMapType((map[string]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.TaskResultsCompletionStatusEntry") + proto.RegisterType((*WorkflowStep)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStep") + proto.RegisterMapType((LifecycleHooks)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStep.HooksEntry") + proto.RegisterType((*WorkflowTaskResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskResult") + proto.RegisterType((*WorkflowTaskResultList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskResultList") + proto.RegisterType((*WorkflowTaskSet)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSet") + proto.RegisterType((*WorkflowTaskSetList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSetList") + proto.RegisterType((*WorkflowTaskSetSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSetSpec") + proto.RegisterMapType((map[string]Template)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSetSpec.TasksEntry") + proto.RegisterType((*WorkflowTaskSetStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSetStatus") + proto.RegisterMapType((map[string]NodeResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSetStatus.NodesEntry") + proto.RegisterType((*WorkflowTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTemplate") + proto.RegisterType((*WorkflowTemplateList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTemplateList") + proto.RegisterType((*WorkflowTemplateRef)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTemplateRef") + proto.RegisterType((*ZipStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ZipStrategy") +} + +func init() { + proto.RegisterFile("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto", fileDescriptor_724696e352c3df5f) +} + +var fileDescriptor_724696e352c3df5f = []byte{ + // 11163 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0xbd, 0x6b, 0x70, 0x64, 0xc7, + 0x75, 0x18, 0xcc, 0x3b, 0xc0, 0xe0, 0x71, 0xf0, 0x58, 0x6c, 0xef, 0x6b, 0x88, 0x25, 0x17, 0xf4, + 0xa5, 0xc8, 0x8f, 0xb4, 0x28, 0xac, 0xb9, 0x94, 0xbe, 0x30, 0x52, 0x22, 0x09, 0x8f, 0x05, 0x16, + 0x04, 0xb0, 0x00, 0x7b, 0xb0, 0xbb, 0x26, 0x45, 0x4b, 0xba, 0x98, 0x69, 0xcc, 0x5c, 0x62, 0xe6, + 0xde, 0xe1, 0xbd, 0x77, 0xb0, 0x0b, 0x3e, 0x24, 0x85, 0x7a, 0xc7, 0xb2, 0x15, 0xcb, 0x92, 0x2c, + 0x29, 0x49, 0x95, 0xa2, 0x48, 0x09, 0x4b, 0x76, 0x25, 0x65, 0xff, 0x4a, 0xd9, 0xff, 0x52, 0x29, + 0x97, 0x52, 0x4e, 0x25, 0x72, 0x45, 0x29, 0xeb, 0x87, 0x0d, 0x46, 0x9b, 0x44, 0x95, 0x4a, 0xa2, + 0xaa, 0x58, 0x15, 0x27, 0xf1, 0xe6, 0x51, 0xa9, 0x7e, 0xde, 0xee, 0x3b, 0x77, 0xb0, 0x03, 0x6c, + 0x03, 0xab, 0xb2, 0x7f, 0x01, 0x73, 0xfa, 0xf4, 0x39, 0xdd, 0x7d, 0xbb, 0x4f, 0x9f, 0x3e, 0xe7, + 0xf4, 0x69, 0x58, 0xaf, 0xf9, 0x49, 0xbd, 0xbd, 0x39, 0x5d, 0x09, 0x9b, 0x17, 0xbd, 0xa8, 0x16, + 0xb6, 0xa2, 0xf0, 0x65, 0xf6, 0xcf, 0xbb, 0x6e, 0x86, 0xd1, 0xf6, 0x56, 0x23, 0xbc, 0x19, 0x5f, + 0xdc, 0x79, 0xe6, 0x62, 0x6b, 0xbb, 0x76, 0xd1, 0x6b, 0xf9, 0xf1, 0x45, 0x09, 0xbd, 0xb8, 0xf3, + 0xb4, 0xd7, 0x68, 0xd5, 0xbd, 0xa7, 0x2f, 0xd6, 0x48, 0x40, 0x22, 0x2f, 0x21, 0xd5, 0xe9, 0x56, + 0x14, 0x26, 0x21, 0xfa, 0x60, 0x4a, 0x71, 0x5a, 0x52, 0x64, 0xff, 0x7c, 0x44, 0x51, 0x9c, 0xde, + 0x79, 0x66, 0xba, 0xb5, 0x5d, 0x9b, 0xa6, 0x14, 0xa7, 0x25, 0x74, 0x5a, 0x52, 0x9c, 0x7c, 0x97, + 0xd6, 0xa6, 0x5a, 0x58, 0x0b, 0x2f, 0x32, 0xc2, 0x9b, 0xed, 0x2d, 0xf6, 0x8b, 0xfd, 0x60, 0xff, + 0x71, 0x86, 0x93, 0xee, 0xf6, 0xb3, 0xf1, 0xb4, 0x1f, 0xd2, 0xf6, 0x5d, 0xac, 0x84, 0x11, 0xb9, + 0xb8, 0xd3, 0xd1, 0xa8, 0xc9, 0x77, 0x68, 0x38, 0xad, 0xb0, 0xe1, 0x57, 0x76, 0xf3, 0xb0, 0xde, + 0x9d, 0x62, 0x35, 0xbd, 0x4a, 0xdd, 0x0f, 0x48, 0xb4, 0x9b, 0x76, 0xbd, 0x49, 0x12, 0x2f, 0xaf, + 0xd6, 0xc5, 0x6e, 0xb5, 0xa2, 0x76, 0x90, 0xf8, 0x4d, 0xd2, 0x51, 0xe1, 0xff, 0xbf, 0x5b, 0x85, + 0xb8, 0x52, 0x27, 0x4d, 0xaf, 0xa3, 0xde, 0x33, 0xdd, 0xea, 0xb5, 0x13, 0xbf, 0x71, 0xd1, 0x0f, + 0x92, 0x38, 0x89, 0xb2, 0x95, 0xdc, 0xcb, 0x30, 0x30, 0xd3, 0x0c, 0xdb, 0x41, 0x82, 0xde, 0x07, + 0xc5, 0x1d, 0xaf, 0xd1, 0x26, 0x25, 0xe7, 0x11, 0xe7, 0x89, 0xe1, 0xd9, 0xc7, 0xbe, 0xb7, 0x37, + 0xf5, 0xc0, 0xed, 0xbd, 0xa9, 0xe2, 0x75, 0x0a, 0xbc, 0xb3, 0x37, 0x75, 0x9a, 0x04, 0x95, 0xb0, + 0xea, 0x07, 0xb5, 0x8b, 0x2f, 0xc7, 0x61, 0x30, 0x7d, 0xb5, 0xdd, 0xdc, 0x24, 0x11, 0xe6, 0x75, + 0xdc, 0x7f, 0x5d, 0x80, 0x13, 0x33, 0x51, 0xa5, 0xee, 0xef, 0x90, 0x72, 0x42, 0xe9, 0xd7, 0x76, + 0x51, 0x1d, 0xfa, 0x12, 0x2f, 0x62, 0xe4, 0x46, 0x2e, 0xad, 0x4e, 0xdf, 0xeb, 0x77, 0x9f, 0xde, + 0xf0, 0x22, 0x49, 0x7b, 0x76, 0xf0, 0xf6, 0xde, 0x54, 0xdf, 0x86, 0x17, 0x61, 0xca, 0x02, 0x35, + 0xa0, 0x3f, 0x08, 0x03, 0x52, 0x2a, 0x30, 0x56, 0x57, 0xef, 0x9d, 0xd5, 0xd5, 0x30, 0x50, 0xfd, + 0x98, 0x1d, 0xba, 0xbd, 0x37, 0xd5, 0x4f, 0x21, 0x98, 0x71, 0xa1, 0xfd, 0x7a, 0xd5, 0x6f, 0x95, + 0xfa, 0x6c, 0xf5, 0xeb, 0x45, 0xbf, 0x65, 0xf6, 0xeb, 0x45, 0xbf, 0x85, 0x29, 0x0b, 0xf7, 0xf3, + 0x05, 0x18, 0x9e, 0x89, 0x6a, 0xed, 0x26, 0x09, 0x92, 0x18, 0x7d, 0x1c, 0xa0, 0xe5, 0x45, 0x5e, + 0x93, 0x24, 0x24, 0x8a, 0x4b, 0xce, 0x23, 0x7d, 0x4f, 0x8c, 0x5c, 0x5a, 0xbe, 0x77, 0xf6, 0xeb, + 0x92, 0xe6, 0x2c, 0x12, 0x9f, 0x1c, 0x14, 0x28, 0xc6, 0x1a, 0x4b, 0xf4, 0x1a, 0x0c, 0x7b, 0x51, + 0xe2, 0x6f, 0x79, 0x95, 0x24, 0x2e, 0x15, 0x18, 0xff, 0xe7, 0xee, 0x9d, 0xff, 0x8c, 0x20, 0x39, + 0x7b, 0x52, 0xb0, 0x1f, 0x96, 0x90, 0x18, 0xa7, 0xfc, 0xdc, 0xdf, 0xed, 0x87, 0x91, 0x99, 0x28, + 0x59, 0x9c, 0x2b, 0x27, 0x5e, 0xd2, 0x8e, 0xd1, 0x1f, 0x38, 0x70, 0x2a, 0xe6, 0xc3, 0xe6, 0x93, + 0x78, 0x3d, 0x0a, 0x2b, 0x24, 0x8e, 0x49, 0x55, 0x8c, 0xcb, 0x96, 0x95, 0x76, 0x49, 0x66, 0xd3, + 0xe5, 0x4e, 0x46, 0x97, 0x83, 0x24, 0xda, 0x9d, 0x7d, 0x5a, 0xb4, 0xf9, 0x54, 0x0e, 0xc6, 0x9b, + 0x6f, 0x4f, 0x21, 0xd9, 0x15, 0x4a, 0x89, 0x7f, 0x62, 0x9c, 0xd7, 0x6a, 0xf4, 0x75, 0x07, 0x46, + 0x5b, 0x61, 0x35, 0xc6, 0xa4, 0x12, 0xb6, 0x5b, 0xa4, 0x2a, 0x86, 0xf7, 0x23, 0x76, 0xbb, 0xb1, + 0xae, 0x71, 0xe0, 0xed, 0x3f, 0x2d, 0xda, 0x3f, 0xaa, 0x17, 0x61, 0xa3, 0x29, 0xe8, 0x59, 0x18, + 0x0d, 0xc2, 0xa4, 0xdc, 0x22, 0x15, 0x7f, 0xcb, 0x27, 0x55, 0x36, 0xf1, 0x87, 0xd2, 0x9a, 0x57, + 0xb5, 0x32, 0x6c, 0x60, 0x4e, 0x2e, 0x40, 0xa9, 0xdb, 0xc8, 0xa1, 0x09, 0xe8, 0xdb, 0x26, 0xbb, + 0x5c, 0xd8, 0x60, 0xfa, 0x2f, 0x3a, 0x2d, 0x05, 0x10, 0x5d, 0xc6, 0x43, 0x42, 0xb2, 0xbc, 0xb7, + 0xf0, 0xac, 0x33, 0xf9, 0x01, 0x38, 0xd9, 0xd1, 0xf4, 0x83, 0x10, 0x70, 0xbf, 0x3f, 0x00, 0x43, + 0xf2, 0x53, 0xa0, 0x47, 0xa0, 0x3f, 0xf0, 0x9a, 0x52, 0xce, 0x8d, 0x8a, 0x7e, 0xf4, 0x5f, 0xf5, + 0x9a, 0x74, 0x85, 0x7b, 0x4d, 0x42, 0x31, 0x5a, 0x5e, 0x52, 0x67, 0x74, 0x34, 0x8c, 0x75, 0x2f, + 0xa9, 0x63, 0x56, 0x82, 0x1e, 0x82, 0xfe, 0x66, 0x58, 0x25, 0x6c, 0x2c, 0x8a, 0x5c, 0x42, 0xac, + 0x86, 0x55, 0x82, 0x19, 0x94, 0xd6, 0xdf, 0x8a, 0xc2, 0x66, 0xa9, 0xdf, 0xac, 0xbf, 0x10, 0x85, + 0x4d, 0xcc, 0x4a, 0xd0, 0xd7, 0x1c, 0x98, 0x90, 0x73, 0x7b, 0x25, 0xac, 0x78, 0x89, 0x1f, 0x06, + 0xa5, 0x22, 0x93, 0x28, 0xd8, 0xde, 0x92, 0x92, 0x94, 0x67, 0x4b, 0xa2, 0x09, 0x13, 0xd9, 0x12, + 0xdc, 0xd1, 0x0a, 0x74, 0x09, 0xa0, 0xd6, 0x08, 0x37, 0xbd, 0x06, 0x1d, 0x90, 0xd2, 0x00, 0xeb, + 0x82, 0x92, 0x0c, 0x8b, 0xaa, 0x04, 0x6b, 0x58, 0xe8, 0x16, 0x0c, 0x7a, 0x5c, 0xfa, 0x97, 0x06, + 0x59, 0x27, 0x9e, 0xb7, 0xd1, 0x09, 0x63, 0x3b, 0x99, 0x1d, 0xb9, 0xbd, 0x37, 0x35, 0x28, 0x80, + 0x58, 0xb2, 0x43, 0x4f, 0xc1, 0x50, 0xd8, 0xa2, 0xed, 0xf6, 0x1a, 0xa5, 0x21, 0x36, 0x31, 0x27, + 0x44, 0x5b, 0x87, 0xd6, 0x04, 0x1c, 0x2b, 0x0c, 0xf4, 0x24, 0x0c, 0xc6, 0xed, 0x4d, 0xfa, 0x1d, + 0x4b, 0xc3, 0xac, 0x63, 0x27, 0x04, 0xf2, 0x60, 0x99, 0x83, 0xb1, 0x2c, 0x47, 0xef, 0x81, 0x91, + 0x88, 0x54, 0xda, 0x51, 0x4c, 0xe8, 0x87, 0x2d, 0x01, 0xa3, 0x7d, 0x4a, 0xa0, 0x8f, 0xe0, 0xb4, + 0x08, 0xeb, 0x78, 0xe8, 0xfd, 0x30, 0x4e, 0x3f, 0xf0, 0xe5, 0x5b, 0xad, 0x88, 0xc4, 0x31, 0xfd, + 0xaa, 0x23, 0x8c, 0xd1, 0x59, 0x51, 0x73, 0x7c, 0xc1, 0x28, 0xc5, 0x19, 0x6c, 0xf4, 0x3a, 0x80, + 0xa7, 0x64, 0x46, 0x69, 0x94, 0x0d, 0xe6, 0x8a, 0xbd, 0x19, 0xb1, 0x38, 0x37, 0x3b, 0x4e, 0xbf, + 0x63, 0xfa, 0x1b, 0x6b, 0xfc, 0xe8, 0xf8, 0x54, 0x49, 0x83, 0x24, 0xa4, 0x5a, 0x1a, 0x63, 0x1d, + 0x56, 0xe3, 0x33, 0xcf, 0xc1, 0x58, 0x96, 0xbb, 0x7f, 0xbb, 0x00, 0x1a, 0x15, 0x34, 0x0b, 0x43, + 0x42, 0xae, 0x89, 0x25, 0x39, 0xfb, 0xb8, 0xfc, 0x0e, 0xf2, 0x0b, 0xde, 0xd9, 0xcb, 0x95, 0x87, + 0xaa, 0x1e, 0x7a, 0x03, 0x46, 0x5a, 0x61, 0x75, 0x95, 0x24, 0x5e, 0xd5, 0x4b, 0x3c, 0xb1, 0x9b, + 0x5b, 0xd8, 0x61, 0x24, 0xc5, 0xd9, 0x13, 0xf4, 0xd3, 0xad, 0xa7, 0x2c, 0xb0, 0xce, 0x0f, 0x3d, + 0x07, 0x28, 0x26, 0xd1, 0x8e, 0x5f, 0x21, 0x33, 0x95, 0x0a, 0x55, 0x89, 0xd8, 0x02, 0xe8, 0x63, + 0x9d, 0x99, 0x14, 0x9d, 0x41, 0xe5, 0x0e, 0x0c, 0x9c, 0x53, 0xcb, 0xfd, 0x41, 0x01, 0xc6, 0xb5, + 0xbe, 0xb6, 0x48, 0x05, 0xbd, 0xe5, 0xc0, 0x09, 0xb5, 0x9d, 0xcd, 0xee, 0x5e, 0xa5, 0xb3, 0x8a, + 0x6f, 0x56, 0xc4, 0xe6, 0xf7, 0xa5, 0xbc, 0xd4, 0x4f, 0xc1, 0x87, 0xcb, 0xfa, 0x73, 0xa2, 0x0f, + 0x27, 0x32, 0xa5, 0x38, 0xdb, 0xac, 0xc9, 0xaf, 0x3a, 0x70, 0x3a, 0x8f, 0x44, 0x8e, 0xcc, 0xad, + 0xeb, 0x32, 0xd7, 0xaa, 0xf0, 0xa2, 0x5c, 0x69, 0x67, 0x74, 0x39, 0xfe, 0x7f, 0x0b, 0x30, 0xa1, + 0x4f, 0x21, 0xa6, 0x09, 0xfc, 0x53, 0x07, 0xce, 0xc8, 0x1e, 0x60, 0x12, 0xb7, 0x1b, 0x99, 0xe1, + 0x6d, 0x5a, 0x1d, 0x5e, 0xbe, 0x93, 0xce, 0xe4, 0xf1, 0xe3, 0xc3, 0xfc, 0xb0, 0x18, 0xe6, 0x33, + 0xb9, 0x38, 0x38, 0xbf, 0xa9, 0x93, 0xdf, 0x76, 0x60, 0xb2, 0x3b, 0xd1, 0x9c, 0x81, 0x6f, 0x99, + 0x03, 0xff, 0xa2, 0xbd, 0x4e, 0x72, 0xf6, 0x6c, 0xf8, 0x59, 0x67, 0xf5, 0x0f, 0xf0, 0x5b, 0x43, + 0xd0, 0xb1, 0x87, 0xa0, 0xa7, 0x61, 0x44, 0x88, 0xe3, 0x95, 0xb0, 0x16, 0xb3, 0x46, 0x0e, 0xf1, + 0xb5, 0x36, 0x93, 0x82, 0xb1, 0x8e, 0x83, 0xaa, 0x50, 0x88, 0x9f, 0x11, 0x4d, 0xb7, 0x20, 0xde, + 0xca, 0xcf, 0x28, 0x2d, 0x72, 0xe0, 0xf6, 0xde, 0x54, 0xa1, 0xfc, 0x0c, 0x2e, 0xc4, 0xcf, 0x50, + 0x4d, 0xbd, 0xe6, 0x27, 0xf6, 0x34, 0xf5, 0x45, 0x3f, 0x51, 0x7c, 0x98, 0xa6, 0xbe, 0xe8, 0x27, + 0x98, 0xb2, 0xa0, 0x27, 0x90, 0x7a, 0x92, 0xb4, 0xd8, 0x8e, 0x6f, 0xe5, 0x04, 0x72, 0x65, 0x63, + 0x63, 0x5d, 0xf1, 0x62, 0xfa, 0x05, 0x85, 0x60, 0xc6, 0x05, 0x7d, 0xce, 0xa1, 0x23, 0xce, 0x0b, + 0xc3, 0x68, 0x57, 0x28, 0x0e, 0xd7, 0xec, 0x4d, 0x81, 0x30, 0xda, 0x55, 0xcc, 0xc5, 0x87, 0x54, + 0x05, 0x58, 0x67, 0xcd, 0x3a, 0x5e, 0xdd, 0x8a, 0x99, 0x9e, 0x60, 0xa7, 0xe3, 0xf3, 0x0b, 0xe5, + 0x4c, 0xc7, 0xe7, 0x17, 0xca, 0x98, 0x71, 0xa1, 0x1f, 0x34, 0xf2, 0x6e, 0x0a, 0x1d, 0xc3, 0xc2, + 0x07, 0xc5, 0xde, 0x4d, 0xf3, 0x83, 0x62, 0xef, 0x26, 0xa6, 0x2c, 0x28, 0xa7, 0x30, 0x8e, 0x99, + 0x4a, 0x61, 0x85, 0xd3, 0x5a, 0xb9, 0x6c, 0x72, 0x5a, 0x2b, 0x97, 0x31, 0x65, 0xc1, 0x26, 0x69, + 0x25, 0x66, 0xfa, 0x88, 0x9d, 0x49, 0x3a, 0x97, 0xe1, 0xb4, 0x38, 0x57, 0xc6, 0x94, 0x05, 0x15, + 0x19, 0xde, 0xab, 0xed, 0x88, 0x2b, 0x33, 0x23, 0x97, 0xd6, 0x2c, 0xcc, 0x17, 0x4a, 0x4e, 0x71, + 0x1b, 0xbe, 0xbd, 0x37, 0x55, 0x64, 0x20, 0xcc, 0x19, 0xb9, 0xbf, 0xdf, 0x97, 0x8a, 0x0b, 0x29, + 0xcf, 0xd1, 0xaf, 0xb1, 0x8d, 0x50, 0xc8, 0x02, 0xa1, 0xfa, 0x3a, 0x47, 0xa6, 0xfa, 0x9e, 0xe2, + 0x3b, 0x9e, 0xc1, 0x0e, 0x67, 0xf9, 0xa3, 0x2f, 0x39, 0x9d, 0x67, 0x5b, 0xcf, 0xfe, 0x5e, 0x96, + 0x6e, 0xcc, 0x7c, 0xaf, 0xd8, 0xf7, 0xc8, 0x3b, 0xf9, 0x39, 0x27, 0x55, 0x22, 0xe2, 0x6e, 0xfb, + 0xc0, 0x47, 0xcd, 0x7d, 0xc0, 0xe2, 0x81, 0x5c, 0x97, 0xfb, 0x9f, 0x77, 0x60, 0x4c, 0xc2, 0xa9, + 0x7a, 0x1c, 0xa3, 0x5b, 0x30, 0x24, 0x5b, 0x2a, 0xbe, 0x9e, 0x4d, 0x5b, 0x80, 0x52, 0xe2, 0x55, + 0x63, 0x14, 0x37, 0xf7, 0xad, 0x01, 0x40, 0xe9, 0x5e, 0xd5, 0x0a, 0x63, 0x9f, 0x49, 0xa2, 0x43, + 0xec, 0x42, 0x81, 0xb6, 0x0b, 0x5d, 0xb7, 0xb9, 0x0b, 0xa5, 0xcd, 0x32, 0xf6, 0xa3, 0x2f, 0x65, + 0xe4, 0x36, 0xdf, 0x98, 0x3e, 0x72, 0x24, 0x72, 0x5b, 0x6b, 0xc2, 0xfe, 0x12, 0x7c, 0x47, 0x48, + 0x70, 0xbe, 0x75, 0xfd, 0xa2, 0x5d, 0x09, 0xae, 0xb5, 0x22, 0x2b, 0xcb, 0x23, 0x2e, 0x61, 0xf9, + 0xde, 0x75, 0xc3, 0xaa, 0x84, 0xd5, 0xb8, 0x9a, 0xb2, 0x36, 0xe2, 0xb2, 0x76, 0xc0, 0x16, 0x4f, + 0x4d, 0xd6, 0x66, 0x79, 0x2a, 0xa9, 0xfb, 0xaa, 0x94, 0xba, 0x7c, 0xd7, 0x7a, 0xc1, 0xb2, 0xd4, + 0xd5, 0xf8, 0x76, 0xca, 0xdf, 0x57, 0xe0, 0x4c, 0x27, 0x1e, 0x26, 0x5b, 0xe8, 0x22, 0x0c, 0x57, + 0xc2, 0x60, 0xcb, 0xaf, 0xad, 0x7a, 0x2d, 0x71, 0x5e, 0x53, 0xb2, 0x68, 0x4e, 0x16, 0xe0, 0x14, + 0x07, 0x3d, 0xcc, 0x05, 0x0f, 0xb7, 0x88, 0x8c, 0x08, 0xd4, 0xbe, 0x65, 0xb2, 0xcb, 0xa4, 0xd0, + 0x7b, 0x87, 0xbe, 0xf6, 0xcd, 0xa9, 0x07, 0x3e, 0xf1, 0xc7, 0x8f, 0x3c, 0xe0, 0xfe, 0x61, 0x1f, + 0x9c, 0xcf, 0xe5, 0x29, 0xb4, 0xf5, 0xdf, 0x32, 0xb4, 0x75, 0xad, 0x5c, 0x48, 0x91, 0x1b, 0x36, + 0x15, 0x59, 0x8d, 0x7c, 0x9e, 0x5e, 0xae, 0x15, 0xe3, 0xfc, 0x46, 0xd1, 0x81, 0x0a, 0xbc, 0x26, + 0x89, 0x5b, 0x5e, 0x85, 0x88, 0xde, 0xab, 0x81, 0xba, 0x2a, 0x0b, 0x70, 0x8a, 0xc3, 0x8f, 0xd0, + 0x5b, 0x5e, 0xbb, 0x91, 0x08, 0x43, 0x99, 0x76, 0x84, 0x66, 0x60, 0x2c, 0xcb, 0xd1, 0xdf, 0x71, + 0x00, 0x75, 0x72, 0x15, 0x0b, 0x71, 0xe3, 0x28, 0xc6, 0x61, 0xf6, 0xec, 0x6d, 0xed, 0x10, 0xae, + 0xf5, 0x34, 0xa7, 0x1d, 0xda, 0x37, 0xfd, 0x58, 0xba, 0x0f, 0xf1, 0xc3, 0x41, 0x0f, 0x36, 0x34, + 0x66, 0x6a, 0xa9, 0x54, 0x48, 0x1c, 0x73, 0x73, 0x9c, 0x6e, 0x6a, 0x61, 0x60, 0x2c, 0xcb, 0xd1, + 0x14, 0x14, 0x49, 0x14, 0x85, 0x91, 0x38, 0x6b, 0xb3, 0x69, 0x7c, 0x99, 0x02, 0x30, 0x87, 0xbb, + 0x3f, 0x2e, 0x40, 0xa9, 0xdb, 0xe9, 0x04, 0xfd, 0x8e, 0x76, 0xae, 0x16, 0x27, 0x27, 0x71, 0xf0, + 0x0b, 0x8f, 0xee, 0x4c, 0x94, 0x3d, 0x00, 0x76, 0x39, 0x61, 0x8b, 0x52, 0x9c, 0x6d, 0xe0, 0xe4, + 0x97, 0xb5, 0x13, 0xb6, 0x4e, 0x22, 0x67, 0x83, 0xdf, 0x32, 0x37, 0xf8, 0x75, 0xdb, 0x9d, 0xd2, + 0xb7, 0xf9, 0x3f, 0x29, 0xc2, 0x29, 0x59, 0x5a, 0x26, 0x74, 0xab, 0x7c, 0xbe, 0x4d, 0xa2, 0x5d, + 0xf4, 0x47, 0x0e, 0x9c, 0xf6, 0xb2, 0xa6, 0x1b, 0x9f, 0x1c, 0xc1, 0x40, 0x6b, 0x5c, 0xa7, 0x67, + 0x72, 0x38, 0xf2, 0x81, 0xbe, 0x24, 0x06, 0xfa, 0x74, 0x1e, 0x4a, 0x17, 0xbb, 0x7b, 0x6e, 0x07, + 0xd0, 0xb3, 0x30, 0x2a, 0xe1, 0xcc, 0xdc, 0xc3, 0x97, 0xb8, 0x32, 0x6e, 0xcf, 0x68, 0x65, 0xd8, + 0xc0, 0xa4, 0x35, 0x13, 0xd2, 0x6c, 0x35, 0xbc, 0x84, 0x68, 0x86, 0x22, 0x55, 0x73, 0x43, 0x2b, + 0xc3, 0x06, 0x26, 0x7a, 0x1c, 0x06, 0x82, 0xb0, 0x4a, 0x96, 0xaa, 0xc2, 0x40, 0x3c, 0x2e, 0xea, + 0x0c, 0x5c, 0x65, 0x50, 0x2c, 0x4a, 0xd1, 0x63, 0xa9, 0x35, 0xae, 0xc8, 0x96, 0xd0, 0x48, 0x9e, + 0x25, 0x0e, 0xfd, 0x3d, 0x07, 0x86, 0x69, 0x8d, 0x8d, 0xdd, 0x16, 0xa1, 0x7b, 0x1b, 0xfd, 0x22, + 0xd5, 0xa3, 0xf9, 0x22, 0x57, 0x25, 0x1b, 0xd3, 0xd4, 0x31, 0xac, 0xe0, 0x6f, 0xbe, 0x3d, 0x35, + 0x24, 0x7f, 0xe0, 0xb4, 0x55, 0x93, 0x8b, 0xf0, 0x60, 0xd7, 0xaf, 0x79, 0x20, 0x57, 0xc0, 0x5f, + 0x83, 0x71, 0xb3, 0x11, 0x07, 0xf2, 0x03, 0xfc, 0x13, 0x6d, 0xd9, 0xf1, 0x7e, 0x09, 0x79, 0x76, + 0xdf, 0xb4, 0x59, 0x35, 0x19, 0xe6, 0xc5, 0xd4, 0x33, 0x27, 0xc3, 0xbc, 0x98, 0x0c, 0xf3, 0xee, + 0x1f, 0x38, 0xe9, 0xd2, 0xd4, 0xd4, 0x3c, 0xba, 0x31, 0xb7, 0xa3, 0x86, 0x10, 0xc4, 0x6a, 0x63, + 0xbe, 0x86, 0x57, 0x30, 0x85, 0xa3, 0x2f, 0x6b, 0xd2, 0x91, 0x56, 0x6b, 0x0b, 0xb7, 0x86, 0x25, + 0x13, 0xbd, 0x41, 0xb8, 0x53, 0xfe, 0x89, 0x02, 0x9c, 0x6d, 0x82, 0xfb, 0xa5, 0x02, 0x3c, 0xbc, + 0xaf, 0xd2, 0x9a, 0xdb, 0x70, 0xe7, 0xbe, 0x37, 0x9c, 0x6e, 0x6b, 0x11, 0x69, 0x85, 0xd7, 0xf0, + 0x8a, 0xf8, 0x5e, 0x6a, 0x5b, 0xc3, 0x1c, 0x8c, 0x65, 0x39, 0x55, 0x1d, 0xb6, 0xc9, 0xee, 0x42, + 0x18, 0x35, 0xbd, 0x44, 0x48, 0x07, 0xa5, 0x3a, 0x2c, 0xcb, 0x02, 0x9c, 0xe2, 0xb8, 0x7f, 0xe4, + 0x40, 0xb6, 0x01, 0xc8, 0x83, 0xf1, 0x76, 0x4c, 0x22, 0xba, 0xa5, 0x96, 0x49, 0x25, 0x22, 0x72, + 0x7a, 0x3e, 0x36, 0xcd, 0xbd, 0xfd, 0xb4, 0x87, 0xd3, 0x95, 0x30, 0x22, 0xd3, 0x3b, 0x4f, 0x4f, + 0x73, 0x8c, 0x65, 0xb2, 0x5b, 0x26, 0x0d, 0x42, 0x69, 0xcc, 0xa2, 0xdb, 0x7b, 0x53, 0xe3, 0xd7, + 0x0c, 0x02, 0x38, 0x43, 0x90, 0xb2, 0x68, 0x79, 0x71, 0x7c, 0x33, 0x8c, 0xaa, 0x82, 0x45, 0xe1, + 0xc0, 0x2c, 0xd6, 0x0d, 0x02, 0x38, 0x43, 0xd0, 0xfd, 0x01, 0x3d, 0x3e, 0xea, 0x5a, 0x2b, 0xfa, + 0x26, 0xd5, 0x7d, 0x28, 0x64, 0xb6, 0x11, 0x6e, 0xce, 0x85, 0x41, 0xe2, 0xf9, 0x01, 0x91, 0xc1, + 0x02, 0x1b, 0x96, 0x74, 0x64, 0x83, 0x76, 0x6a, 0xc3, 0xef, 0x2c, 0xc3, 0x39, 0x6d, 0xa1, 0x3a, + 0xce, 0x66, 0x23, 0xdc, 0xcc, 0x7a, 0x01, 0x29, 0x12, 0x66, 0x25, 0xee, 0x4f, 0x1d, 0x38, 0xd7, + 0x45, 0x19, 0x47, 0x5f, 0x75, 0x60, 0x6c, 0xf3, 0x67, 0xa2, 0x6f, 0x66, 0x33, 0xd0, 0xfb, 0x61, + 0x9c, 0x02, 0xe8, 0x4e, 0x24, 0xe6, 0x66, 0xc1, 0xf4, 0x50, 0xcd, 0x1a, 0xa5, 0x38, 0x83, 0xed, + 0xfe, 0x7a, 0x01, 0x72, 0xb8, 0xa0, 0xa7, 0x60, 0x88, 0x04, 0xd5, 0x56, 0xe8, 0x07, 0x89, 0x10, + 0x46, 0x4a, 0xea, 0x5d, 0x16, 0x70, 0xac, 0x30, 0xc4, 0xf9, 0x43, 0x0c, 0x4c, 0xa1, 0xe3, 0xfc, + 0x21, 0x5a, 0x9e, 0xe2, 0xa0, 0x1a, 0x4c, 0x78, 0xdc, 0xbf, 0xc2, 0xe6, 0x1e, 0x9b, 0xa6, 0x7d, + 0x07, 0x99, 0xa6, 0xa7, 0x99, 0xfb, 0x33, 0x43, 0x02, 0x77, 0x10, 0x45, 0xef, 0x81, 0x91, 0x76, + 0x4c, 0xca, 0xf3, 0xcb, 0x73, 0x11, 0xa9, 0xf2, 0x53, 0xb1, 0xe6, 0xf7, 0xbb, 0x96, 0x16, 0x61, + 0x1d, 0xcf, 0xfd, 0x67, 0x0e, 0x0c, 0xce, 0x7a, 0x95, 0xed, 0x70, 0x6b, 0x8b, 0x0e, 0x45, 0xb5, + 0x1d, 0xa5, 0x86, 0x2d, 0x6d, 0x28, 0xe6, 0x05, 0x1c, 0x2b, 0x0c, 0xb4, 0x01, 0x03, 0x7c, 0xc1, + 0x8b, 0x65, 0xf7, 0x0b, 0x5a, 0x7f, 0x54, 0x1c, 0x0f, 0x9b, 0x0e, 0xed, 0xc4, 0x6f, 0x4c, 0xf3, + 0x38, 0x9e, 0xe9, 0xa5, 0x20, 0x59, 0x8b, 0xca, 0x49, 0xe4, 0x07, 0xb5, 0x59, 0xa0, 0xdb, 0xc5, + 0x02, 0xa3, 0x81, 0x05, 0x2d, 0xda, 0x8d, 0xa6, 0x77, 0x4b, 0xb2, 0x13, 0xe2, 0x47, 0x75, 0x63, + 0x35, 0x2d, 0xc2, 0x3a, 0x9e, 0xfb, 0x87, 0x0e, 0x0c, 0xcf, 0x7a, 0xb1, 0x5f, 0xf9, 0x0b, 0x24, + 0x7c, 0x3e, 0x0c, 0xc5, 0x39, 0xaf, 0x52, 0x27, 0xe8, 0x5a, 0xf6, 0xd0, 0x3b, 0x72, 0xe9, 0x89, + 0x3c, 0x36, 0xea, 0x00, 0xac, 0x73, 0x1a, 0xeb, 0x76, 0x34, 0x76, 0xdf, 0x76, 0x60, 0x7c, 0xae, + 0xe1, 0x93, 0x20, 0x99, 0x23, 0x51, 0xc2, 0x06, 0xae, 0x06, 0x13, 0x15, 0x05, 0x39, 0xcc, 0xd0, + 0xb1, 0xd9, 0x3a, 0x97, 0x21, 0x81, 0x3b, 0x88, 0xa2, 0x2a, 0x9c, 0xe0, 0xb0, 0x74, 0x55, 0x1c, + 0x68, 0xfc, 0x98, 0x75, 0x74, 0xce, 0xa4, 0x80, 0xb3, 0x24, 0xdd, 0x9f, 0x38, 0x70, 0x6e, 0xae, + 0xd1, 0x8e, 0x13, 0x12, 0xdd, 0x10, 0xd2, 0x48, 0xaa, 0xb7, 0xe8, 0xa3, 0x30, 0xd4, 0x94, 0x1e, + 0x5b, 0xe7, 0x2e, 0x13, 0x98, 0xc9, 0x33, 0x8a, 0x4d, 0x1b, 0xb3, 0xb6, 0xf9, 0x32, 0xa9, 0x24, + 0xab, 0x24, 0xf1, 0xd2, 0xf0, 0x82, 0x14, 0x86, 0x15, 0x55, 0xd4, 0x82, 0xfe, 0xb8, 0x45, 0x2a, + 0xf6, 0xa2, 0xbb, 0x64, 0x1f, 0xca, 0x2d, 0x52, 0x49, 0xe5, 0x3a, 0xf3, 0x35, 0x32, 0x4e, 0xee, + 0xff, 0x72, 0xe0, 0x7c, 0x97, 0xfe, 0xae, 0xf8, 0x71, 0x82, 0x5e, 0xea, 0xe8, 0xf3, 0x74, 0x6f, + 0x7d, 0xa6, 0xb5, 0x59, 0x8f, 0x95, 0x40, 0x90, 0x10, 0xad, 0xbf, 0x1f, 0x83, 0xa2, 0x9f, 0x90, + 0xa6, 0x34, 0x43, 0x5b, 0x30, 0x18, 0x75, 0xe9, 0xcb, 0xec, 0x98, 0x8c, 0xf1, 0x5b, 0xa2, 0xfc, + 0x30, 0x67, 0xeb, 0x6e, 0xc3, 0xc0, 0x5c, 0xd8, 0x68, 0x37, 0x83, 0xde, 0x22, 0x65, 0x92, 0xdd, + 0x16, 0xc9, 0xee, 0x91, 0x4c, 0xfd, 0x67, 0x25, 0xd2, 0x70, 0xd4, 0x97, 0x6f, 0x38, 0x72, 0xff, + 0xb9, 0x03, 0x74, 0x55, 0x55, 0x7d, 0xe1, 0x49, 0xe4, 0xe4, 0x38, 0xc3, 0x87, 0x75, 0x72, 0x77, + 0xf6, 0xa6, 0xc6, 0x14, 0xa2, 0x46, 0xff, 0xc3, 0x30, 0x10, 0xb3, 0x23, 0xb9, 0x68, 0xc3, 0x82, + 0xd4, 0x9f, 0xf9, 0x41, 0xfd, 0xce, 0xde, 0x54, 0x4f, 0x61, 0x9b, 0xd3, 0x8a, 0xb6, 0x70, 0x7a, + 0x0a, 0xaa, 0x54, 0xe1, 0x6b, 0x92, 0x38, 0xf6, 0x6a, 0xf2, 0x84, 0xa7, 0x14, 0xbe, 0x55, 0x0e, + 0xc6, 0xb2, 0xdc, 0xfd, 0x8a, 0x03, 0x63, 0x6a, 0xf3, 0xa2, 0xea, 0x3b, 0xba, 0xaa, 0x6f, 0x73, + 0x7c, 0xa6, 0x3c, 0xdc, 0x45, 0xe2, 0x88, 0x8d, 0x7c, 0xff, 0x5d, 0xf0, 0xdd, 0x30, 0x5a, 0x25, + 0x2d, 0x12, 0x54, 0x49, 0x50, 0xa1, 0xc7, 0x6f, 0x3a, 0x43, 0x86, 0x67, 0x27, 0xe8, 0x79, 0x73, + 0x5e, 0x83, 0x63, 0x03, 0xcb, 0xfd, 0x96, 0x03, 0x0f, 0x2a, 0x72, 0x65, 0x92, 0x60, 0x92, 0x44, + 0xbb, 0x2a, 0x4c, 0xf3, 0x60, 0xbb, 0xd5, 0x0d, 0xaa, 0xff, 0x26, 0x11, 0x67, 0x7e, 0xb8, 0xed, + 0x6a, 0x84, 0x6b, 0xcb, 0x8c, 0x08, 0x96, 0xd4, 0xdc, 0x5f, 0xed, 0x83, 0xd3, 0x7a, 0x23, 0x95, + 0x80, 0xf9, 0xa4, 0x03, 0xa0, 0x46, 0x80, 0x6e, 0xc8, 0x7d, 0x76, 0x7c, 0x57, 0xc6, 0x97, 0x4a, + 0x45, 0x90, 0x02, 0xc7, 0x58, 0x63, 0x8b, 0x5e, 0x80, 0xd1, 0x1d, 0xba, 0x28, 0xc8, 0x2a, 0x55, + 0x17, 0xe2, 0x52, 0x1f, 0x6b, 0xc6, 0x54, 0xde, 0xc7, 0xbc, 0x9e, 0xe2, 0xa5, 0xe6, 0x00, 0x0d, + 0x18, 0x63, 0x83, 0x14, 0x3d, 0xe9, 0x8c, 0x45, 0xfa, 0x27, 0x11, 0x36, 0xf1, 0x0f, 0x59, 0xec, + 0x63, 0xf6, 0xab, 0xcf, 0x9e, 0xbc, 0xbd, 0x37, 0x35, 0x66, 0x80, 0xb0, 0xd9, 0x08, 0xf7, 0x05, + 0x60, 0x63, 0xe1, 0x07, 0x6d, 0xb2, 0x16, 0xa0, 0x47, 0xa5, 0x8d, 0x8e, 0xfb, 0x55, 0x94, 0xe4, + 0xd0, 0xed, 0x74, 0xf4, 0x2c, 0xbb, 0xe5, 0xf9, 0x0d, 0x16, 0xbe, 0x48, 0xb1, 0xd4, 0x59, 0x76, + 0x81, 0x41, 0xb1, 0x28, 0x75, 0xa7, 0x61, 0x70, 0x8e, 0xf6, 0x9d, 0x44, 0x94, 0xae, 0x1e, 0x75, + 0x3c, 0x66, 0x44, 0x1d, 0xcb, 0xe8, 0xe2, 0x0d, 0x38, 0x33, 0x17, 0x11, 0x2f, 0x21, 0xe5, 0x67, + 0x66, 0xdb, 0x95, 0x6d, 0x92, 0xf0, 0xd0, 0xae, 0x18, 0xbd, 0x0f, 0xc6, 0x42, 0xb6, 0x65, 0xac, + 0x84, 0x95, 0x6d, 0x3f, 0xa8, 0x09, 0x93, 0xeb, 0x19, 0x41, 0x65, 0x6c, 0x4d, 0x2f, 0xc4, 0x26, + 0xae, 0xfb, 0xef, 0x0b, 0x30, 0x3a, 0x17, 0x85, 0x81, 0x14, 0x8b, 0xc7, 0xb0, 0x95, 0x25, 0xc6, + 0x56, 0x66, 0xc1, 0xdd, 0xa9, 0xb7, 0xbf, 0xdb, 0x76, 0x86, 0x5e, 0x57, 0x22, 0xb2, 0xcf, 0xd6, + 0x11, 0xc4, 0xe0, 0xcb, 0x68, 0xa7, 0x1f, 0xdb, 0x14, 0xa0, 0xee, 0x7f, 0x70, 0x60, 0x42, 0x47, + 0x3f, 0x86, 0x1d, 0x34, 0x36, 0x77, 0xd0, 0xab, 0x76, 0xfb, 0xdb, 0x65, 0xdb, 0x7c, 0x7b, 0xd0, + 0xec, 0x27, 0xf3, 0x75, 0x7f, 0xcd, 0x81, 0xd1, 0x9b, 0x1a, 0x40, 0x74, 0xd6, 0xb6, 0x12, 0xf3, + 0x0e, 0x29, 0x66, 0x74, 0xe8, 0x9d, 0xcc, 0x6f, 0x6c, 0xb4, 0x84, 0xca, 0xfd, 0xb8, 0x52, 0x27, + 0xd5, 0x76, 0x43, 0x6e, 0xdf, 0x6a, 0x48, 0xcb, 0x02, 0x8e, 0x15, 0x06, 0x7a, 0x09, 0x4e, 0x56, + 0xc2, 0xa0, 0xd2, 0x8e, 0x22, 0x12, 0x54, 0x76, 0xd7, 0xd9, 0x1d, 0x09, 0xb1, 0x21, 0x4e, 0x8b, + 0x6a, 0x27, 0xe7, 0xb2, 0x08, 0x77, 0xf2, 0x80, 0xb8, 0x93, 0x10, 0x77, 0x16, 0xc4, 0x74, 0xcb, + 0x12, 0x07, 0x2e, 0xcd, 0x59, 0xc0, 0xc0, 0x58, 0x96, 0xa3, 0x6b, 0x70, 0x2e, 0x4e, 0xbc, 0x28, + 0xf1, 0x83, 0xda, 0x3c, 0xf1, 0xaa, 0x0d, 0x3f, 0xa0, 0x47, 0x89, 0x30, 0xa8, 0x72, 0x57, 0x62, + 0xdf, 0xec, 0xf9, 0xdb, 0x7b, 0x53, 0xe7, 0xca, 0xf9, 0x28, 0xb8, 0x5b, 0x5d, 0xf4, 0x61, 0x98, + 0x14, 0xee, 0x88, 0xad, 0x76, 0xe3, 0xb9, 0x70, 0x33, 0xbe, 0xe2, 0xc7, 0xf4, 0x1c, 0xbf, 0xe2, + 0x37, 0xfd, 0x84, 0x39, 0x0c, 0x8b, 0xb3, 0x17, 0x6e, 0xef, 0x4d, 0x4d, 0x96, 0xbb, 0x62, 0xe1, + 0x7d, 0x28, 0x20, 0x0c, 0x67, 0xb9, 0xf0, 0xeb, 0xa0, 0x3d, 0xc8, 0x68, 0x4f, 0xde, 0xde, 0x9b, + 0x3a, 0xbb, 0x90, 0x8b, 0x81, 0xbb, 0xd4, 0xa4, 0x5f, 0x30, 0xf1, 0x9b, 0xe4, 0xd5, 0x30, 0x20, + 0x2c, 0x50, 0x45, 0xfb, 0x82, 0x1b, 0x02, 0x8e, 0x15, 0x06, 0x7a, 0x39, 0x9d, 0x89, 0x74, 0xb9, + 0x88, 0x80, 0x93, 0x83, 0x4b, 0x38, 0x76, 0x34, 0xb9, 0xa1, 0x51, 0x62, 0x91, 0x94, 0x06, 0x6d, + 0xf4, 0x29, 0x07, 0x46, 0xe3, 0x24, 0x54, 0xf7, 0x1a, 0x44, 0xc4, 0x89, 0x85, 0x69, 0x5f, 0xd6, + 0xa8, 0x72, 0xc5, 0x47, 0x87, 0x60, 0x83, 0x2b, 0x7a, 0x27, 0x0c, 0xcb, 0x09, 0x1c, 0x97, 0x46, + 0x98, 0xae, 0xc4, 0x8e, 0x71, 0x72, 0x7e, 0xc7, 0x38, 0x2d, 0xa7, 0xaa, 0xec, 0xcd, 0x3a, 0x09, + 0x58, 0xcc, 0xad, 0xa6, 0xca, 0xde, 0xa8, 0x93, 0x00, 0xb3, 0x12, 0xf7, 0xc7, 0x7d, 0x80, 0x3a, + 0x05, 0x1f, 0x5a, 0x86, 0x01, 0xaf, 0x92, 0xf8, 0x3b, 0x32, 0xde, 0xf0, 0xd1, 0x3c, 0xa5, 0x80, + 0x0f, 0x20, 0x26, 0x5b, 0x84, 0xce, 0x7b, 0x92, 0x4a, 0xcb, 0x19, 0x56, 0x15, 0x0b, 0x12, 0x28, + 0x84, 0x93, 0x0d, 0x2f, 0x4e, 0x64, 0x0b, 0xab, 0xf4, 0x43, 0x8a, 0xed, 0xe2, 0xe7, 0x7b, 0xfb, + 0x54, 0xb4, 0xc6, 0xec, 0x19, 0xba, 0x1e, 0x57, 0xb2, 0x84, 0x70, 0x27, 0x6d, 0xf4, 0x71, 0xa6, + 0x5d, 0x71, 0xd5, 0x57, 0xaa, 0x35, 0xcb, 0x56, 0x34, 0x0f, 0x4e, 0xd3, 0xd0, 0xac, 0x04, 0x1b, + 0xac, 0xb1, 0x44, 0x17, 0x61, 0x98, 0xad, 0x1b, 0x52, 0x25, 0x7c, 0xf5, 0xf7, 0xa5, 0x4a, 0x70, + 0x59, 0x16, 0xe0, 0x14, 0x47, 0xd3, 0x32, 0xf8, 0x82, 0xef, 0xa2, 0x65, 0xa0, 0x67, 0xa1, 0xd8, + 0xaa, 0x7b, 0xb1, 0x8c, 0x61, 0x77, 0xa5, 0xd4, 0x5e, 0xa7, 0x40, 0x26, 0x9a, 0xb4, 0x6f, 0xc9, + 0x80, 0x98, 0x57, 0x70, 0xff, 0x05, 0xc0, 0xe0, 0xfc, 0xcc, 0xe2, 0x86, 0x17, 0x6f, 0xf7, 0x70, + 0x06, 0xa2, 0xcb, 0x50, 0x28, 0xab, 0x59, 0x41, 0x2a, 0x95, 0x58, 0xac, 0x30, 0x50, 0x00, 0x03, + 0x7e, 0x40, 0x25, 0x4f, 0x69, 0xdc, 0x96, 0x9f, 0x41, 0x9d, 0xe7, 0x98, 0x21, 0x68, 0x89, 0x51, + 0xc7, 0x82, 0x0b, 0x7a, 0x1d, 0x86, 0x3d, 0x79, 0x85, 0x48, 0xec, 0xff, 0xcb, 0x36, 0x0c, 0xe8, + 0x82, 0xa4, 0x1e, 0xc2, 0x24, 0x40, 0x38, 0x65, 0x88, 0x3e, 0xe1, 0xc0, 0x88, 0xec, 0x3a, 0x26, + 0x5b, 0xc2, 0xb7, 0xbd, 0x6a, 0xaf, 0xcf, 0x98, 0x6c, 0xf1, 0xf8, 0x16, 0x0d, 0x80, 0x75, 0x96, + 0x1d, 0x67, 0xa6, 0x62, 0x2f, 0x67, 0x26, 0x74, 0x13, 0x86, 0x6f, 0xfa, 0x49, 0x9d, 0xed, 0xf0, + 0xc2, 0xa7, 0xb6, 0x70, 0xef, 0xad, 0xa6, 0xe4, 0xd2, 0x11, 0xbb, 0x21, 0x19, 0xe0, 0x94, 0x17, + 0x5d, 0x0e, 0xf4, 0x07, 0xbb, 0x82, 0xc5, 0xf6, 0x86, 0x61, 0xb3, 0x02, 0x2b, 0xc0, 0x29, 0x0e, + 0x1d, 0xe2, 0x51, 0xfa, 0xab, 0x4c, 0x5e, 0x69, 0x53, 0xd1, 0x22, 0x62, 0x16, 0x2d, 0xcc, 0x2b, + 0x49, 0x91, 0x0f, 0xd6, 0x0d, 0x8d, 0x07, 0x36, 0x38, 0x2a, 0xd1, 0x39, 0xdc, 0x4d, 0x74, 0xa2, + 0xd7, 0xf9, 0x19, 0x8e, 0x1f, 0x26, 0xc4, 0x6e, 0xb0, 0x62, 0xe7, 0x7c, 0xc3, 0x69, 0xf2, 0x6b, + 0x0d, 0xe9, 0x6f, 0xac, 0xf1, 0xa3, 0x12, 0x23, 0x0c, 0x2e, 0xdf, 0xf2, 0x13, 0x71, 0x19, 0x43, + 0x49, 0x8c, 0x35, 0x06, 0xc5, 0xa2, 0x94, 0xc7, 0x6e, 0xd0, 0x49, 0x10, 0x8b, 0x5d, 0x40, 0x8b, + 0xdd, 0x60, 0x60, 0x2c, 0xcb, 0xd1, 0xdf, 0x75, 0xa0, 0x58, 0x0f, 0xc3, 0xed, 0xb8, 0x34, 0xc6, + 0x26, 0x87, 0x05, 0x9d, 0x5a, 0x48, 0x9c, 0xe9, 0x2b, 0x94, 0xac, 0x79, 0xbd, 0xac, 0xc8, 0x60, + 0x77, 0xf6, 0xa6, 0xc6, 0x57, 0xfc, 0x2d, 0x52, 0xd9, 0xad, 0x34, 0x08, 0x83, 0xbc, 0xf9, 0xb6, + 0x06, 0xb9, 0xbc, 0x43, 0x82, 0x04, 0xf3, 0x56, 0x4d, 0x7e, 0xde, 0x01, 0x48, 0x09, 0xe5, 0x38, + 0x49, 0x89, 0x19, 0x56, 0x60, 0xe1, 0x40, 0x6d, 0x34, 0x4d, 0xf7, 0xba, 0xfe, 0x2b, 0x07, 0x46, + 0x68, 0xe7, 0xa4, 0x08, 0x7c, 0x1c, 0x06, 0x12, 0x2f, 0xaa, 0x11, 0xe9, 0x28, 0x50, 0x9f, 0x63, + 0x83, 0x41, 0xb1, 0x28, 0x45, 0x01, 0x14, 0x13, 0x2f, 0xde, 0x96, 0x6a, 0xfc, 0x92, 0xb5, 0x21, + 0x4e, 0x35, 0x78, 0xfa, 0x2b, 0xc6, 0x9c, 0x0d, 0x7a, 0x02, 0x86, 0xe8, 0xd6, 0xb1, 0xe0, 0xc5, + 0x32, 0x76, 0x67, 0x94, 0x0a, 0xf1, 0x05, 0x01, 0xc3, 0xaa, 0xd4, 0xfd, 0xf5, 0x02, 0xf4, 0xcf, + 0xf3, 0x03, 0xdd, 0x40, 0x1c, 0xb6, 0xa3, 0x0a, 0x11, 0x8a, 0xbd, 0x85, 0x39, 0x4d, 0xe9, 0x96, + 0x19, 0x4d, 0xed, 0x48, 0xc5, 0x7e, 0x63, 0xc1, 0x0b, 0x7d, 0xd9, 0x81, 0xf1, 0x24, 0xf2, 0x82, + 0x78, 0x8b, 0xb9, 0x64, 0xfc, 0x30, 0x10, 0x43, 0x64, 0x61, 0x16, 0x6e, 0x18, 0x74, 0xcb, 0x09, + 0x69, 0xa5, 0x9e, 0x21, 0xb3, 0x0c, 0x67, 0xda, 0xe0, 0xfe, 0x86, 0x03, 0x90, 0xb6, 0x1e, 0x7d, + 0xce, 0x81, 0x31, 0x4f, 0x8f, 0x19, 0x15, 0x63, 0xb4, 0x66, 0xcf, 0x7f, 0xcb, 0xc8, 0x72, 0x5b, + 0x86, 0x01, 0xc2, 0x26, 0x63, 0xf7, 0x3d, 0x50, 0x64, 0xab, 0x83, 0x1d, 0x7a, 0x84, 0xed, 0x3b, + 0x6b, 0xec, 0x92, 0x36, 0x71, 0xac, 0x30, 0xdc, 0x97, 0x60, 0xfc, 0xf2, 0x2d, 0x52, 0x69, 0x27, + 0x61, 0xc4, 0x2d, 0xff, 0x5d, 0xee, 0x08, 0x39, 0x87, 0xba, 0x23, 0xf4, 0x5d, 0x07, 0x46, 0xb4, + 0x00, 0x42, 0xba, 0x53, 0xd7, 0xe6, 0xca, 0xdc, 0xc0, 0x21, 0x86, 0x6a, 0xd9, 0x4a, 0x88, 0x22, + 0x27, 0x99, 0x6e, 0x23, 0x0a, 0x84, 0x53, 0x86, 0x77, 0x09, 0xf0, 0x73, 0x7f, 0xdf, 0x81, 0x33, + 0xb9, 0xd1, 0x8e, 0xf7, 0xb9, 0xd9, 0x86, 0x93, 0xbd, 0xd0, 0x83, 0x93, 0xfd, 0xb7, 0x1d, 0x48, + 0x29, 0x51, 0x51, 0xb4, 0x99, 0xb6, 0x5c, 0x13, 0x45, 0x82, 0x93, 0x28, 0x45, 0xaf, 0xc3, 0x39, + 0xf3, 0x0b, 0x1e, 0xd2, 0xdf, 0xc2, 0x0f, 0xa7, 0xf9, 0x94, 0x70, 0x37, 0x16, 0xee, 0xd7, 0x1d, + 0x28, 0x2e, 0x7a, 0xed, 0x1a, 0xe9, 0xc9, 0x5c, 0x46, 0xe5, 0x58, 0x44, 0xbc, 0x46, 0x22, 0x8f, + 0x0e, 0x42, 0x8e, 0x61, 0x01, 0xc3, 0xaa, 0x14, 0xcd, 0xc0, 0x70, 0xd8, 0x22, 0x86, 0x8f, 0xf0, + 0x51, 0x39, 0x7a, 0x6b, 0xb2, 0x80, 0x6e, 0x3b, 0x8c, 0xbb, 0x82, 0xe0, 0xb4, 0x96, 0xfb, 0x8d, + 0x01, 0x18, 0xd1, 0xee, 0xc5, 0x50, 0x5d, 0x20, 0x22, 0xad, 0x30, 0xab, 0x2f, 0xd3, 0x09, 0x83, + 0x59, 0x09, 0x5d, 0x83, 0x11, 0xd9, 0xf1, 0x63, 0x2e, 0xb6, 0x8c, 0x35, 0x88, 0x05, 0x1c, 0x2b, + 0x0c, 0x34, 0x05, 0xc5, 0x2a, 0x69, 0x25, 0x75, 0xd6, 0xbc, 0x7e, 0x1e, 0x1c, 0x38, 0x4f, 0x01, + 0x98, 0xc3, 0x29, 0xc2, 0x16, 0x49, 0x2a, 0x75, 0x66, 0x19, 0x16, 0xd1, 0x83, 0x0b, 0x14, 0x80, + 0x39, 0x3c, 0xc7, 0x8b, 0x59, 0x3c, 0x7a, 0x2f, 0xe6, 0x80, 0x65, 0x2f, 0x26, 0x6a, 0xc1, 0xa9, + 0x38, 0xae, 0xaf, 0x47, 0xfe, 0x8e, 0x97, 0x90, 0x74, 0xf6, 0x0d, 0x1e, 0x84, 0xcf, 0x39, 0x76, + 0x53, 0xbd, 0x7c, 0x25, 0x4b, 0x05, 0xe7, 0x91, 0x46, 0x65, 0x38, 0xe3, 0x07, 0x31, 0xa9, 0xb4, + 0x23, 0xb2, 0x54, 0x0b, 0xc2, 0x88, 0x5c, 0x09, 0x63, 0x4a, 0x4e, 0xdc, 0xb3, 0x55, 0xf1, 0xb4, + 0x4b, 0x79, 0x48, 0x38, 0xbf, 0x2e, 0x5a, 0x84, 0x93, 0x55, 0x3f, 0xf6, 0x36, 0x1b, 0xa4, 0xdc, + 0xde, 0x6c, 0x86, 0xfc, 0x68, 0x3e, 0xcc, 0x08, 0x3e, 0x28, 0xed, 0x48, 0xf3, 0x59, 0x04, 0xdc, + 0x59, 0x07, 0x3d, 0x0b, 0xa3, 0xb1, 0x1f, 0xd4, 0x1a, 0x64, 0x36, 0xf2, 0x82, 0x4a, 0x5d, 0x5c, + 0xd0, 0x55, 0xf6, 0xf6, 0xb2, 0x56, 0x86, 0x0d, 0x4c, 0xb6, 0xe6, 0x79, 0x9d, 0x8c, 0x36, 0x28, + 0xb0, 0x45, 0x29, 0x9a, 0x81, 0x13, 0xb2, 0x0f, 0xe5, 0x6d, 0xbf, 0xb5, 0xb1, 0x52, 0x66, 0x5a, + 0xe1, 0x50, 0x1a, 0x2d, 0xb4, 0x64, 0x16, 0xe3, 0x2c, 0xbe, 0xfb, 0x43, 0x07, 0x46, 0xf5, 0x70, + 0x78, 0xaa, 0xac, 0x43, 0x7d, 0x7e, 0xa1, 0xcc, 0xb7, 0x13, 0x7b, 0x4a, 0xc3, 0x15, 0x45, 0x33, + 0x3d, 0x6f, 0xa7, 0x30, 0xac, 0xf1, 0xec, 0xe1, 0x72, 0xfb, 0xa3, 0x50, 0xdc, 0x0a, 0xa9, 0x4e, + 0xd3, 0x67, 0xda, 0xfa, 0x17, 0x28, 0x10, 0xf3, 0x32, 0xf7, 0xbf, 0x39, 0x70, 0x36, 0x3f, 0xd2, + 0xff, 0x67, 0xa1, 0x93, 0x97, 0x00, 0x68, 0x57, 0x8c, 0x7d, 0x41, 0x4b, 0x6f, 0x21, 0x4b, 0xb0, + 0x86, 0xd5, 0x5b, 0xb7, 0xff, 0x65, 0x01, 0x34, 0x9e, 0xe8, 0x0b, 0x0e, 0x8c, 0x51, 0xb6, 0xcb, + 0xd1, 0xa6, 0xd1, 0xdb, 0x35, 0x3b, 0xbd, 0x55, 0x64, 0x53, 0x97, 0x86, 0x01, 0xc6, 0x26, 0x73, + 0xf4, 0x4e, 0x18, 0xf6, 0xaa, 0xd5, 0x88, 0xc4, 0xb1, 0x72, 0x0e, 0x32, 0x83, 0xd7, 0x8c, 0x04, + 0xe2, 0xb4, 0x9c, 0xca, 0xe1, 0x7a, 0x75, 0x2b, 0xa6, 0xa2, 0x4d, 0xc8, 0x7e, 0x25, 0x87, 0x29, + 0x13, 0x0a, 0xc7, 0x0a, 0x03, 0x5d, 0x87, 0xb3, 0x55, 0x2f, 0xf1, 0xb8, 0x0a, 0x48, 0xa2, 0xf5, + 0x28, 0x4c, 0x48, 0x85, 0xed, 0x1b, 0x3c, 0x88, 0xf5, 0x82, 0xa8, 0x7b, 0x76, 0x3e, 0x17, 0x0b, + 0x77, 0xa9, 0xed, 0xfe, 0x4a, 0x3f, 0x98, 0x7d, 0x42, 0x55, 0x38, 0xb1, 0x1d, 0x6d, 0xce, 0xb1, + 0x98, 0x8d, 0xc3, 0xc4, 0x4e, 0xb0, 0x98, 0x86, 0x65, 0x93, 0x02, 0xce, 0x92, 0x14, 0x5c, 0x96, + 0xc9, 0x6e, 0xe2, 0x6d, 0x1e, 0x3a, 0x72, 0x62, 0xd9, 0xa4, 0x80, 0xb3, 0x24, 0xd1, 0x7b, 0x60, + 0x64, 0x3b, 0xda, 0x94, 0xbb, 0x47, 0x36, 0x0c, 0x67, 0x39, 0x2d, 0xc2, 0x3a, 0x1e, 0xfd, 0x34, + 0xdb, 0xd1, 0x26, 0xdd, 0xb0, 0x65, 0x12, 0x09, 0xf5, 0x69, 0x96, 0x05, 0x1c, 0x2b, 0x0c, 0xd4, + 0x02, 0xb4, 0x2d, 0x47, 0x4f, 0x45, 0xa8, 0x88, 0x4d, 0xae, 0xf7, 0x00, 0x17, 0x76, 0x35, 0x60, + 0xb9, 0x83, 0x0e, 0xce, 0xa1, 0x8d, 0x5e, 0x80, 0x73, 0xdb, 0xd1, 0xa6, 0xd0, 0x63, 0xd6, 0x23, + 0x3f, 0xa8, 0xf8, 0x2d, 0x23, 0x61, 0xc4, 0x94, 0x68, 0xee, 0xb9, 0xe5, 0x7c, 0x34, 0xdc, 0xad, + 0xbe, 0xfb, 0x3b, 0xfd, 0xc0, 0xae, 0xba, 0x52, 0x31, 0xdd, 0x24, 0x49, 0x3d, 0xac, 0x66, 0x55, + 0xb3, 0x55, 0x06, 0xc5, 0xa2, 0x54, 0x06, 0xc0, 0x16, 0xba, 0x04, 0xc0, 0xde, 0x84, 0xc1, 0x3a, + 0xf1, 0xaa, 0x24, 0x92, 0xc6, 0xcd, 0x15, 0x3b, 0x97, 0x73, 0xaf, 0x30, 0xa2, 0xa9, 0x85, 0x80, + 0xff, 0x8e, 0xb1, 0xe4, 0x86, 0xde, 0x0b, 0xe3, 0x54, 0xc7, 0x0a, 0xdb, 0x89, 0xf4, 0x4f, 0x70, + 0xe3, 0x26, 0xdb, 0xec, 0x37, 0x8c, 0x12, 0x9c, 0xc1, 0x44, 0xf3, 0x30, 0x21, 0x7c, 0x09, 0xca, + 0x68, 0x2a, 0x06, 0x56, 0x65, 0xf2, 0x28, 0x67, 0xca, 0x71, 0x47, 0x0d, 0x16, 0xc0, 0x18, 0x56, + 0xb9, 0x3b, 0x59, 0x0f, 0x60, 0x0c, 0xab, 0xbb, 0x98, 0x95, 0xa0, 0x57, 0x61, 0x88, 0xfe, 0x5d, + 0x88, 0xc2, 0xa6, 0x30, 0x1b, 0xad, 0xdb, 0x19, 0x1d, 0xca, 0x43, 0x1c, 0x62, 0x99, 0xee, 0x39, + 0x2b, 0xb8, 0x60, 0xc5, 0x8f, 0x1e, 0xa5, 0xf4, 0xed, 0xf2, 0x3a, 0x89, 0xfc, 0xad, 0x5d, 0xa6, + 0xcf, 0x0c, 0xa5, 0x47, 0xa9, 0xa5, 0x0e, 0x0c, 0x9c, 0x53, 0xcb, 0xfd, 0x42, 0x01, 0x46, 0xf5, + 0x1b, 0xd3, 0x77, 0x8b, 0x8a, 0x8e, 0xd3, 0x49, 0xc1, 0x0f, 0xce, 0x57, 0x2c, 0x74, 0xfb, 0x6e, + 0x13, 0xa2, 0x0e, 0xfd, 0x5e, 0x5b, 0x28, 0xb2, 0x56, 0xec, 0x73, 0xac, 0xc7, 0xed, 0xa4, 0xce, + 0xaf, 0xd6, 0xb1, 0x78, 0x65, 0xc6, 0xc1, 0xfd, 0x74, 0x1f, 0x0c, 0xc9, 0x42, 0xf4, 0x29, 0x07, + 0x20, 0x8d, 0x1b, 0x13, 0xa2, 0x74, 0xdd, 0x46, 0x50, 0x91, 0x1e, 0xf2, 0xa6, 0x99, 0xf9, 0x15, + 0x1c, 0x6b, 0x7c, 0x51, 0x02, 0x03, 0x21, 0x6d, 0xdc, 0x25, 0x7b, 0xb7, 0xfe, 0xd7, 0x28, 0xe3, + 0x4b, 0x8c, 0x7b, 0x6a, 0xd1, 0x63, 0x30, 0x2c, 0x78, 0xd1, 0xc3, 0xe9, 0xa6, 0x0c, 0x67, 0xb4, + 0x67, 0xfd, 0x56, 0x11, 0x92, 0xe9, 0x59, 0x53, 0x81, 0x70, 0xca, 0xd0, 0x7d, 0x1a, 0xc6, 0xcd, + 0xc5, 0x40, 0x0f, 0x2b, 0x9b, 0xbb, 0x09, 0xe1, 0xa6, 0x90, 0x51, 0x7e, 0x58, 0x99, 0xa5, 0x00, + 0xcc, 0xe1, 0xee, 0x0f, 0x1c, 0x80, 0x54, 0xbc, 0xf4, 0xe0, 0x7d, 0x78, 0x54, 0xb7, 0xe3, 0x75, + 0x3b, 0x11, 0x7e, 0x1c, 0x86, 0xd9, 0x3f, 0x6c, 0xa1, 0xf7, 0xd9, 0x0a, 0x3e, 0x48, 0xdb, 0x29, + 0x96, 0x3a, 0xd3, 0x35, 0xae, 0x4b, 0x46, 0x38, 0xe5, 0xe9, 0x86, 0x30, 0x91, 0xc5, 0x46, 0x1f, + 0x82, 0xd1, 0x58, 0x6e, 0xab, 0xe9, 0xfd, 0xbf, 0x1e, 0xb7, 0x5f, 0xee, 0xfa, 0xd3, 0xaa, 0x63, + 0x83, 0x98, 0xbb, 0x06, 0x03, 0x56, 0x87, 0xd0, 0xfd, 0x8e, 0x03, 0xc3, 0xcc, 0xfb, 0x5a, 0x8b, + 0xbc, 0x66, 0x5a, 0xa5, 0x6f, 0x9f, 0x51, 0x8f, 0x61, 0x90, 0x9b, 0x0f, 0x64, 0xd4, 0x92, 0x05, + 0x29, 0xc3, 0x93, 0xf5, 0xa5, 0x52, 0x86, 0xdb, 0x29, 0x62, 0x2c, 0x39, 0xb9, 0x9f, 0x29, 0xc0, + 0xc0, 0x52, 0xd0, 0x6a, 0xff, 0xa5, 0x4f, 0x18, 0xb7, 0x0a, 0xfd, 0x4b, 0x09, 0x69, 0x9a, 0x79, + 0x0d, 0x47, 0x67, 0x1f, 0xd3, 0x73, 0x1a, 0x96, 0xcc, 0x9c, 0x86, 0xd8, 0xbb, 0x29, 0x83, 0xfa, + 0x84, 0xf9, 0x3a, 0xbd, 0x03, 0xf9, 0x14, 0x0c, 0xaf, 0x78, 0x9b, 0xa4, 0xb1, 0x4c, 0x76, 0xd9, + 0x8d, 0x45, 0x1e, 0x60, 0xe2, 0xa4, 0x36, 0x07, 0x23, 0x18, 0x64, 0x1e, 0xc6, 0x19, 0xb6, 0x5a, + 0x0c, 0xf4, 0x44, 0x42, 0xd2, 0xa4, 0x50, 0x8e, 0x79, 0x22, 0xd1, 0x12, 0x42, 0x69, 0x58, 0xee, + 0x34, 0x8c, 0xa4, 0x54, 0x7a, 0xe0, 0xfa, 0xd3, 0x02, 0x8c, 0x19, 0x56, 0x78, 0xc3, 0x37, 0xe9, + 0xdc, 0xd5, 0x37, 0x69, 0xf8, 0x0a, 0x0b, 0xf7, 0xdb, 0x57, 0xd8, 0x77, 0xfc, 0xbe, 0x42, 0xf3, + 0x23, 0xf5, 0xf7, 0xf4, 0x91, 0x1a, 0xd0, 0xbf, 0xe2, 0x07, 0xdb, 0xbd, 0xc9, 0x99, 0xb8, 0x12, + 0xb6, 0x3a, 0xe4, 0x4c, 0x99, 0x02, 0x31, 0x2f, 0x93, 0x9a, 0x4b, 0x5f, 0xbe, 0xe6, 0xe2, 0x7e, + 0xca, 0x81, 0xd1, 0x55, 0x2f, 0xf0, 0xb7, 0x48, 0x9c, 0xb0, 0x79, 0x95, 0x1c, 0xe9, 0xcd, 0xb5, + 0xd1, 0x2e, 0x39, 0x18, 0xde, 0x74, 0xe0, 0xe4, 0x2a, 0x69, 0x86, 0xfe, 0xab, 0x5e, 0x1a, 0x33, + 0x4b, 0xdb, 0x5e, 0xf7, 0x13, 0x11, 0x22, 0xa8, 0xda, 0x7e, 0xc5, 0x4f, 0x30, 0x85, 0xdf, 0xc5, + 0xc4, 0xcc, 0xee, 0x84, 0xd0, 0x03, 0x9a, 0x76, 0x9b, 0x32, 0x8d, 0x86, 0x95, 0x05, 0x38, 0xc5, + 0x71, 0x7f, 0xd7, 0x81, 0x41, 0xde, 0x08, 0x15, 0x66, 0xec, 0x74, 0xa1, 0x5d, 0x87, 0x22, 0xab, + 0x27, 0x66, 0xf5, 0xa2, 0x05, 0xf5, 0x87, 0x92, 0xe3, 0x6b, 0x90, 0xfd, 0x8b, 0x39, 0x03, 0x76, + 0x6c, 0xf1, 0x6e, 0xcd, 0xa8, 0x70, 0xe1, 0xf4, 0xd8, 0xc2, 0xa0, 0x58, 0x94, 0xba, 0xdf, 0xe8, + 0x83, 0x21, 0x95, 0x7a, 0x8c, 0x25, 0x86, 0x08, 0x82, 0x30, 0xf1, 0x78, 0x18, 0x06, 0x97, 0xd5, + 0x1f, 0xb2, 0x97, 0xfa, 0x6c, 0x7a, 0x26, 0xa5, 0xce, 0x5d, 0x8b, 0xea, 0x10, 0xaa, 0x95, 0x60, + 0xbd, 0x11, 0xe8, 0x63, 0x30, 0xd0, 0xa0, 0xd2, 0x47, 0x8a, 0xee, 0xeb, 0x16, 0x9b, 0xc3, 0xc4, + 0x9a, 0x68, 0x89, 0x1a, 0x21, 0x0e, 0xc4, 0x82, 0xeb, 0xe4, 0xfb, 0x61, 0x22, 0xdb, 0xea, 0xbb, + 0x5d, 0xf6, 0x1c, 0xd6, 0xaf, 0x8a, 0xfe, 0x55, 0x21, 0x3d, 0x0f, 0x5e, 0xd5, 0x7d, 0x1e, 0x46, + 0x56, 0x49, 0x12, 0xf9, 0x15, 0x46, 0xe0, 0x6e, 0x93, 0xab, 0x27, 0xfd, 0xe1, 0xb3, 0x6c, 0xb2, + 0x52, 0x9a, 0x31, 0x7a, 0x1d, 0xa0, 0x15, 0x85, 0xf4, 0xfc, 0x4a, 0xda, 0xf2, 0x63, 0x5b, 0xd0, + 0x87, 0xd7, 0x15, 0x4d, 0xee, 0x0d, 0x4f, 0x7f, 0x63, 0x8d, 0x9f, 0xfb, 0x22, 0x14, 0x57, 0xdb, + 0x09, 0xb9, 0xd5, 0x83, 0xc4, 0x3a, 0x68, 0xf6, 0x03, 0xf7, 0x43, 0x30, 0xca, 0x68, 0x5f, 0x09, + 0x1b, 0x74, 0x5b, 0xa5, 0x43, 0xd3, 0xa4, 0xbf, 0xb3, 0xfe, 0x0a, 0x86, 0x84, 0x79, 0x19, 0x5d, + 0x32, 0xf5, 0xb0, 0x51, 0x55, 0x37, 0xc1, 0xd4, 0x84, 0xb8, 0xc2, 0xa0, 0x58, 0x94, 0xba, 0x9f, + 0x2c, 0xc0, 0x08, 0xab, 0x28, 0xc4, 0xcd, 0x2e, 0x0c, 0xd6, 0x39, 0x1f, 0x31, 0x86, 0x16, 0xe2, + 0xcb, 0xf4, 0xd6, 0x6b, 0x67, 0x39, 0x0e, 0xc0, 0x92, 0x1f, 0x65, 0x7d, 0xd3, 0xf3, 0x13, 0xca, + 0xba, 0x70, 0xb4, 0xac, 0x6f, 0x70, 0x36, 0x58, 0xf2, 0x73, 0x7f, 0x09, 0xd8, 0x0d, 0xeb, 0x85, + 0x86, 0x57, 0xe3, 0x23, 0x17, 0x6e, 0x93, 0xaa, 0x90, 0xb9, 0xda, 0xc8, 0x51, 0x28, 0x16, 0xa5, + 0xfc, 0xd6, 0x6a, 0x12, 0xf9, 0x2a, 0x32, 0x5b, 0xbb, 0xb5, 0xca, 0xc0, 0x32, 0x0e, 0xbf, 0xea, + 0x7e, 0xa5, 0x00, 0xc0, 0x12, 0xd5, 0xf1, 0x8b, 0xd1, 0xbf, 0x20, 0x83, 0xa8, 0x4c, 0x1f, 0xa7, + 0x0a, 0xa2, 0x62, 0x57, 0xbf, 0xf5, 0xe0, 0x29, 0xfd, 0xc2, 0x44, 0x61, 0xff, 0x0b, 0x13, 0xa8, + 0x05, 0x83, 0x61, 0x3b, 0xa1, 0xba, 0xaa, 0xd8, 0xec, 0x2d, 0xb8, 0xf8, 0xd7, 0x38, 0x41, 0x7e, + 0xcb, 0x40, 0xfc, 0xc0, 0x92, 0x0d, 0x7a, 0x16, 0x86, 0x5a, 0x51, 0x58, 0xa3, 0x7b, 0xb7, 0xd8, + 0xde, 0x1f, 0x92, 0xfa, 0xd0, 0xba, 0x80, 0xdf, 0xd1, 0xfe, 0xc7, 0x0a, 0xdb, 0xfd, 0xe3, 0x09, + 0x3e, 0x2e, 0x62, 0xee, 0x4d, 0x42, 0xc1, 0x97, 0x96, 0x29, 0x10, 0x24, 0x0a, 0x4b, 0xf3, 0xb8, + 0xe0, 0x57, 0xd5, 0xba, 0x2a, 0x74, 0x5d, 0x57, 0xef, 0x81, 0x91, 0xaa, 0x1f, 0xb7, 0x1a, 0xde, + 0xee, 0xd5, 0x1c, 0xb3, 0xe0, 0x7c, 0x5a, 0x84, 0x75, 0x3c, 0xf4, 0x94, 0xb8, 0x1e, 0xd3, 0x6f, + 0x98, 0x82, 0xe4, 0xf5, 0x98, 0xf4, 0xe2, 0x3d, 0xbf, 0x19, 0x93, 0x4d, 0x50, 0x50, 0xec, 0x39, + 0x41, 0x41, 0x56, 0x13, 0x1b, 0x38, 0x7e, 0x4d, 0xec, 0x7d, 0x30, 0x26, 0x7f, 0x32, 0xf5, 0xa8, + 0x74, 0x9a, 0xb5, 0x5e, 0x99, 0xc1, 0x37, 0xf4, 0x42, 0x6c, 0xe2, 0xa6, 0x93, 0x76, 0xb0, 0xd7, + 0x49, 0x7b, 0x09, 0x60, 0x33, 0x6c, 0x07, 0x55, 0x2f, 0xda, 0x5d, 0x9a, 0x17, 0xc1, 0xb4, 0x4a, + 0xf1, 0x9b, 0x55, 0x25, 0x58, 0xc3, 0xd2, 0x27, 0xfa, 0xf0, 0x5d, 0x26, 0xfa, 0x87, 0x60, 0x98, + 0x05, 0x1e, 0x93, 0xea, 0x4c, 0x22, 0xa2, 0x9f, 0x0e, 0x12, 0xcd, 0x99, 0xc6, 0x43, 0x4a, 0x22, + 0x38, 0xa5, 0x87, 0x3e, 0x0c, 0xb0, 0xe5, 0x07, 0x7e, 0x5c, 0x67, 0xd4, 0x47, 0x0e, 0x4c, 0x5d, + 0xf5, 0x73, 0x41, 0x51, 0xc1, 0x1a, 0x45, 0xf4, 0x12, 0x9c, 0x24, 0x71, 0xe2, 0x37, 0xbd, 0x84, + 0x54, 0xd5, 0x85, 0xd2, 0x12, 0xb3, 0x65, 0xaa, 0xd0, 0xef, 0xcb, 0x59, 0x84, 0x3b, 0x79, 0x40, + 0xdc, 0x49, 0xc8, 0x58, 0x91, 0x93, 0x07, 0x59, 0x91, 0xe8, 0x7f, 0x3a, 0x70, 0x32, 0x22, 0x3c, + 0x24, 0x26, 0x56, 0x0d, 0x3b, 0xc3, 0xc4, 0x71, 0xc5, 0x46, 0x0e, 0x78, 0x95, 0xec, 0x05, 0x67, + 0xb9, 0x70, 0xc5, 0x85, 0xc8, 0xde, 0x77, 0x94, 0xdf, 0xc9, 0x03, 0xbe, 0xf9, 0xf6, 0xd4, 0x54, + 0xe7, 0x5b, 0x04, 0x8a, 0x38, 0x5d, 0x79, 0x7f, 0xf3, 0xed, 0xa9, 0x09, 0xf9, 0x3b, 0x1d, 0xb4, + 0x8e, 0x4e, 0xd2, 0x6d, 0xb5, 0x15, 0x56, 0x97, 0xd6, 0x45, 0x98, 0x9a, 0xda, 0x56, 0xd7, 0x29, + 0x10, 0xf3, 0x32, 0xf4, 0x04, 0x0c, 0x55, 0x3d, 0xd2, 0x0c, 0x03, 0x95, 0xcd, 0x97, 0x69, 0xf3, + 0xf3, 0x02, 0x86, 0x55, 0x29, 0x3d, 0x43, 0x04, 0x62, 0x4b, 0x29, 0x9d, 0xb7, 0x75, 0x86, 0x90, + 0x9b, 0x14, 0xe7, 0x2a, 0x7f, 0x61, 0xc5, 0x09, 0x35, 0x60, 0xc0, 0x67, 0x86, 0x0a, 0x11, 0x09, + 0x6b, 0xc1, 0x3a, 0xc2, 0x0d, 0x1f, 0x32, 0x0e, 0x96, 0x89, 0x7e, 0xc1, 0x43, 0xdf, 0x6b, 0x4e, + 0x1c, 0xcf, 0x5e, 0xf3, 0x04, 0x0c, 0x55, 0xea, 0x7e, 0xa3, 0x1a, 0x91, 0xa0, 0x34, 0xc1, 0x4e, + 0xec, 0x6c, 0x24, 0xe6, 0x04, 0x0c, 0xab, 0x52, 0xf4, 0x57, 0x60, 0x2c, 0x6c, 0x27, 0x4c, 0xb4, + 0xd0, 0x71, 0x8a, 0x4b, 0x27, 0x19, 0x3a, 0x8b, 0x6b, 0x5a, 0xd3, 0x0b, 0xb0, 0x89, 0x47, 0x45, + 0x7c, 0x3d, 0x8c, 0x59, 0x5e, 0x22, 0x26, 0xe2, 0xcf, 0x9a, 0x22, 0xfe, 0x8a, 0x56, 0x86, 0x0d, + 0x4c, 0xf4, 0x35, 0x07, 0x4e, 0x36, 0xb3, 0x07, 0xb8, 0xd2, 0x39, 0x36, 0x32, 0x65, 0x1b, 0x8a, + 0x7e, 0x86, 0x34, 0x8f, 0x48, 0xef, 0x00, 0xe3, 0xce, 0x46, 0xb0, 0x0c, 0x61, 0xf1, 0x6e, 0x50, + 0xa9, 0x47, 0x61, 0x60, 0x36, 0xef, 0x41, 0x5b, 0xf7, 0xe2, 0xd8, 0xda, 0xce, 0x63, 0x31, 0xfb, + 0xe0, 0xed, 0xbd, 0xa9, 0x33, 0xb9, 0x45, 0x38, 0xbf, 0x51, 0x93, 0xf3, 0x70, 0x36, 0x5f, 0x3e, + 0xdc, 0xed, 0xc4, 0xd1, 0xa7, 0x9f, 0x38, 0x16, 0xe0, 0xc1, 0xae, 0x8d, 0xa2, 0x3b, 0x8d, 0xd4, + 0x36, 0x1d, 0x73, 0xa7, 0xe9, 0xd0, 0x0e, 0xc7, 0x61, 0x54, 0x7f, 0xbc, 0xc2, 0xfd, 0x3f, 0x7d, + 0x00, 0xa9, 0x9d, 0x1c, 0x79, 0x30, 0xce, 0x6d, 0xf2, 0x4b, 0xf3, 0x87, 0xbe, 0xd1, 0x3f, 0x67, + 0x10, 0xc0, 0x19, 0x82, 0xa8, 0x09, 0x88, 0x43, 0xf8, 0xef, 0xc3, 0xf8, 0x56, 0x99, 0x2b, 0x72, + 0xae, 0x83, 0x08, 0xce, 0x21, 0x4c, 0x7b, 0x94, 0x84, 0xdb, 0x24, 0xb8, 0x86, 0x57, 0x0e, 0x93, + 0x16, 0x82, 0x7b, 0xe3, 0x0c, 0x02, 0x38, 0x43, 0x10, 0xb9, 0x30, 0xc0, 0x6c, 0x33, 0x32, 0x76, + 0x9c, 0x89, 0x17, 0xa6, 0x69, 0xc4, 0x58, 0x94, 0xa0, 0xaf, 0x38, 0x30, 0x2e, 0xb3, 0x5b, 0x30, + 0x6b, 0xa8, 0x8c, 0x1a, 0xbf, 0x66, 0xcb, 0xcf, 0x71, 0x59, 0xa7, 0x9e, 0xc6, 0x64, 0x1a, 0xe0, + 0x18, 0x67, 0x1a, 0xe1, 0xbe, 0x00, 0xa7, 0x72, 0xaa, 0x5b, 0x39, 0xd1, 0x7e, 0xd7, 0x81, 0x11, + 0x2d, 0xe9, 0x22, 0x7a, 0x1d, 0x86, 0xc3, 0xb2, 0xf5, 0x40, 0xc0, 0xb5, 0x72, 0x47, 0x20, 0xa0, + 0x02, 0xe1, 0x94, 0x61, 0x2f, 0xf1, 0x8b, 0xb9, 0x19, 0x22, 0xef, 0x73, 0xb3, 0x0f, 0x1c, 0xbf, + 0xf8, 0x2b, 0x45, 0x48, 0x29, 0x1d, 0x30, 0xeb, 0x4a, 0x1a, 0xed, 0x58, 0xd8, 0x37, 0xda, 0xb1, + 0x0a, 0x27, 0x3c, 0xe6, 0x4b, 0x3e, 0x64, 0xae, 0x15, 0x9e, 0x73, 0xd7, 0xa4, 0x80, 0xb3, 0x24, + 0x29, 0x97, 0x38, 0xad, 0xca, 0xb8, 0xf4, 0x1f, 0x98, 0x4b, 0xd9, 0xa4, 0x80, 0xb3, 0x24, 0xd1, + 0x4b, 0x50, 0xaa, 0xb0, 0xbb, 0xc3, 0xbc, 0x8f, 0x4b, 0x5b, 0x57, 0xc3, 0x64, 0x3d, 0x22, 0x31, + 0x09, 0x12, 0x91, 0x55, 0xed, 0x11, 0x31, 0x0a, 0xa5, 0xb9, 0x2e, 0x78, 0xb8, 0x2b, 0x05, 0x7a, + 0x4c, 0x61, 0xce, 0x68, 0x3f, 0xd9, 0x65, 0x42, 0x44, 0x78, 0xe9, 0xd5, 0x31, 0xa5, 0xac, 0x17, + 0x62, 0x13, 0x17, 0xfd, 0xb2, 0x03, 0x63, 0x0d, 0x69, 0xae, 0xc7, 0xed, 0x86, 0x4c, 0x11, 0x8a, + 0xad, 0x4c, 0xbf, 0x15, 0x9d, 0x32, 0xd7, 0x25, 0x0c, 0x10, 0x36, 0x79, 0x67, 0x13, 0xdf, 0x0c, + 0xf5, 0x98, 0xf8, 0xe6, 0x07, 0x0e, 0x4c, 0x64, 0xb9, 0xa1, 0x6d, 0x78, 0xb8, 0xe9, 0x45, 0xdb, + 0x4b, 0xc1, 0x56, 0xc4, 0xee, 0x88, 0x24, 0x7c, 0x32, 0xcc, 0x6c, 0x25, 0x24, 0x9a, 0xf7, 0x76, + 0xb9, 0xfb, 0xb3, 0xa8, 0xde, 0x98, 0x7a, 0x78, 0x75, 0x3f, 0x64, 0xbc, 0x3f, 0x2d, 0x54, 0x86, + 0x33, 0x14, 0x81, 0xe5, 0xc5, 0xf3, 0xc3, 0x20, 0x65, 0x52, 0x60, 0x4c, 0x54, 0x9c, 0xe2, 0x6a, + 0x1e, 0x12, 0xce, 0xaf, 0xeb, 0x5e, 0x86, 0x01, 0x7e, 0x65, 0xef, 0x9e, 0xfc, 0x47, 0xee, 0xbf, + 0x29, 0x80, 0x54, 0x0c, 0xff, 0x72, 0xbb, 0xe3, 0xe8, 0x26, 0x1a, 0x31, 0x93, 0x92, 0xb0, 0x76, + 0xb0, 0x4d, 0x54, 0x64, 0xa0, 0x14, 0x25, 0x54, 0x63, 0x26, 0xb7, 0xfc, 0x64, 0x2e, 0xac, 0x4a, + 0x1b, 0x07, 0xd3, 0x98, 0x2f, 0x0b, 0x18, 0x56, 0xa5, 0xee, 0xa7, 0x1c, 0x18, 0xa3, 0xbd, 0x6c, + 0x34, 0x48, 0xa3, 0x9c, 0x90, 0x56, 0x8c, 0x62, 0x28, 0xc6, 0xf4, 0x1f, 0x7b, 0xa6, 0xc0, 0xf4, + 0x9a, 0x27, 0x69, 0x69, 0xce, 0x1a, 0xca, 0x04, 0x73, 0x5e, 0xee, 0x5b, 0x7d, 0x30, 0xac, 0x06, + 0xbb, 0x07, 0x7b, 0xea, 0xa5, 0x34, 0x39, 0x2c, 0x97, 0xc0, 0x25, 0x2d, 0x31, 0xec, 0x1d, 0x3a, + 0x74, 0xc1, 0x2e, 0xcf, 0x92, 0x91, 0x66, 0x89, 0x7d, 0xca, 0x74, 0x35, 0x9f, 0xd5, 0xe7, 0x9f, + 0x86, 0x2f, 0x7c, 0xce, 0xb7, 0x74, 0x4f, 0x7f, 0xbf, 0xad, 0xdd, 0x4c, 0xb9, 0x31, 0xbb, 0xbb, + 0xf8, 0x33, 0xef, 0x06, 0x15, 0x7b, 0x7a, 0x37, 0xe8, 0x49, 0xe8, 0x27, 0x41, 0xbb, 0xc9, 0x54, + 0xa5, 0x61, 0x76, 0x44, 0xe8, 0xbf, 0x1c, 0xb4, 0x9b, 0x66, 0xcf, 0x18, 0x0a, 0x7a, 0x3f, 0x8c, + 0x54, 0x49, 0x5c, 0x89, 0x7c, 0x96, 0xfa, 0x41, 0x58, 0x76, 0x1e, 0x62, 0xe6, 0xb2, 0x14, 0x6c, + 0x56, 0xd4, 0x2b, 0xb8, 0xaf, 0xc2, 0xc0, 0x7a, 0xa3, 0x5d, 0xf3, 0x03, 0xd4, 0x82, 0x01, 0x9e, + 0x08, 0x42, 0xec, 0xf6, 0x16, 0xce, 0x9d, 0x5c, 0x54, 0x68, 0x51, 0x28, 0xfc, 0xb6, 0xaf, 0xe0, + 0xe3, 0x7e, 0xb2, 0x00, 0xf4, 0x68, 0xbe, 0x38, 0x87, 0xfe, 0x7a, 0xc7, 0x33, 0x39, 0x3f, 0x97, + 0xf3, 0x4c, 0xce, 0x18, 0x43, 0xce, 0x79, 0x21, 0xa7, 0x01, 0x63, 0xcc, 0x39, 0x22, 0xf7, 0x40, + 0xa1, 0x56, 0x3f, 0xd3, 0x63, 0xee, 0x04, 0xbd, 0xaa, 0xd8, 0x11, 0x74, 0x10, 0x36, 0x89, 0xa3, + 0x55, 0x38, 0xc5, 0x73, 0x8c, 0xce, 0x93, 0x86, 0xb7, 0x9b, 0xc9, 0x25, 0x76, 0x5e, 0xbe, 0x7c, + 0x36, 0xdf, 0x89, 0x82, 0xf3, 0xea, 0xb9, 0xbf, 0xd7, 0x0f, 0x9a, 0x4b, 0xa2, 0x87, 0xd5, 0xf2, + 0x4a, 0xc6, 0x01, 0xb5, 0x6a, 0xc5, 0x01, 0x25, 0xbd, 0x3a, 0x5c, 0x02, 0x99, 0x3e, 0x27, 0xda, + 0xa8, 0x3a, 0x69, 0xb4, 0x44, 0x1f, 0x55, 0xa3, 0xae, 0x90, 0x46, 0x0b, 0xb3, 0x12, 0x75, 0xd7, + 0xb1, 0xbf, 0xeb, 0x5d, 0xc7, 0x3a, 0x14, 0x6b, 0x5e, 0xbb, 0x46, 0x44, 0x04, 0xa6, 0x05, 0x5f, + 0x23, 0xbb, 0x7d, 0xc1, 0x7d, 0x8d, 0xec, 0x5f, 0xcc, 0x19, 0xd0, 0xc5, 0x5e, 0x97, 0x21, 0x29, + 0xc2, 0x48, 0x6b, 0x61, 0xb1, 0xab, 0x28, 0x17, 0xbe, 0xd8, 0xd5, 0x4f, 0x9c, 0x32, 0x43, 0x2d, + 0x18, 0xac, 0xf0, 0x0c, 0x2e, 0x42, 0x67, 0x59, 0xb2, 0x71, 0x99, 0x93, 0x11, 0xe4, 0xd6, 0x14, + 0xf1, 0x03, 0x4b, 0x36, 0xee, 0x45, 0x18, 0xd1, 0x5e, 0xeb, 0xa0, 0x9f, 0x41, 0x25, 0x0f, 0xd1, + 0x3e, 0xc3, 0xbc, 0x97, 0x78, 0x98, 0x95, 0xb8, 0xdf, 0xea, 0x07, 0x65, 0x4b, 0xd3, 0xaf, 0x1e, + 0x7a, 0x15, 0x2d, 0xd5, 0x91, 0x71, 0x0d, 0x3f, 0x0c, 0xb0, 0x28, 0xa5, 0x7a, 0x5d, 0x93, 0x44, + 0x35, 0x75, 0x8e, 0x16, 0xe2, 0x5a, 0xe9, 0x75, 0xab, 0x7a, 0x21, 0x36, 0x71, 0xa9, 0x52, 0xde, + 0x14, 0x2e, 0xfa, 0x6c, 0x60, 0xb5, 0x74, 0xdd, 0x63, 0x85, 0xc1, 0x72, 0x25, 0x34, 0x35, 0x8f, + 0xbe, 0x08, 0xc4, 0xb4, 0xe1, 0x50, 0xd2, 0xa8, 0xf2, 0x80, 0x29, 0x1d, 0x82, 0x0d, 0xae, 0x68, + 0x11, 0x4e, 0xc6, 0x24, 0x59, 0xbb, 0x19, 0x90, 0x48, 0x65, 0x29, 0x10, 0xc9, 0x38, 0xd4, 0xc5, + 0x8c, 0x72, 0x16, 0x01, 0x77, 0xd6, 0xc9, 0x8d, 0x5d, 0x2d, 0x1e, 0x38, 0x76, 0x75, 0x1e, 0x26, + 0xb6, 0x3c, 0xbf, 0xd1, 0x8e, 0x48, 0xd7, 0x08, 0xd8, 0x85, 0x4c, 0x39, 0xee, 0xa8, 0xc1, 0xee, + 0x06, 0x35, 0xbc, 0x5a, 0x5c, 0x1a, 0xd4, 0xee, 0x06, 0x51, 0x00, 0xe6, 0x70, 0xf7, 0x37, 0x1d, + 0xe0, 0x59, 0x90, 0x66, 0xb6, 0xb6, 0xfc, 0xc0, 0x4f, 0x76, 0xd1, 0xd7, 0x1d, 0x98, 0x08, 0xc2, + 0x2a, 0x99, 0x09, 0x12, 0x5f, 0x02, 0xed, 0xa5, 0xa6, 0x67, 0xbc, 0xae, 0x66, 0xc8, 0xf3, 0x94, + 0x1a, 0x59, 0x28, 0xee, 0x68, 0x86, 0x7b, 0x0e, 0xce, 0xe4, 0x12, 0x70, 0x7f, 0xd0, 0x07, 0x66, + 0x32, 0x27, 0xf4, 0x3c, 0x14, 0x1b, 0x2c, 0xbd, 0x88, 0x73, 0xc8, 0x2c, 0x5d, 0x6c, 0xac, 0x78, + 0xfe, 0x11, 0x4e, 0x09, 0xcd, 0xc3, 0x08, 0xcb, 0x10, 0x25, 0x92, 0xbf, 0x14, 0x8c, 0xac, 0x0a, + 0x23, 0x38, 0x2d, 0xba, 0x63, 0xfe, 0xc4, 0x7a, 0x35, 0xf4, 0x1a, 0x0c, 0x6e, 0xf2, 0x3c, 0x99, + 0xf6, 0x7c, 0x7e, 0x22, 0xf1, 0x26, 0xd3, 0x8d, 0x64, 0x16, 0xce, 0x3b, 0xe9, 0xbf, 0x58, 0x72, + 0x44, 0xbb, 0x30, 0xe4, 0xc9, 0x6f, 0xda, 0x6f, 0xeb, 0xa2, 0x86, 0x31, 0x7f, 0x44, 0xc4, 0x8c, + 0xfc, 0x86, 0x8a, 0x5d, 0x26, 0xb4, 0xa8, 0xd8, 0x53, 0x68, 0xd1, 0x77, 0x1c, 0x80, 0xf4, 0x51, + 0x11, 0x74, 0x0b, 0x86, 0xe2, 0x67, 0x0c, 0x43, 0x85, 0x8d, 0x4b, 0xfe, 0x82, 0xa2, 0x76, 0x11, + 0x56, 0x40, 0xb0, 0xe2, 0x76, 0x37, 0xe3, 0xca, 0x4f, 0x1d, 0x38, 0x9d, 0xf7, 0xf8, 0xc9, 0x7d, + 0x6c, 0xf1, 0x41, 0xed, 0x2a, 0xa2, 0xc2, 0x7a, 0x44, 0xb6, 0xfc, 0x5b, 0x39, 0xd9, 0x9a, 0x79, + 0x01, 0x4e, 0x71, 0xdc, 0x3f, 0x1d, 0x04, 0xc5, 0xf8, 0x88, 0xec, 0x30, 0x8f, 0xd3, 0x33, 0x53, + 0x2d, 0xd5, 0xb9, 0x14, 0x1e, 0x66, 0x50, 0x2c, 0x4a, 0xe9, 0xb9, 0x49, 0x06, 0xc5, 0x0b, 0x91, + 0xcd, 0x66, 0xa1, 0x0c, 0x9e, 0xc7, 0xaa, 0x34, 0xcf, 0xb2, 0x53, 0x3c, 0x16, 0xcb, 0xce, 0x80, + 0x7d, 0xcb, 0x4e, 0x13, 0x50, 0xcc, 0x17, 0x0a, 0x33, 0xa7, 0x08, 0x46, 0xa3, 0x07, 0x36, 0x34, + 0x97, 0x3b, 0x88, 0xe0, 0x1c, 0xc2, 0x2c, 0x86, 0x22, 0x6c, 0x90, 0x19, 0x7c, 0x55, 0x1c, 0x3e, + 0xd2, 0x18, 0x0a, 0x0e, 0xc6, 0xb2, 0xfc, 0x90, 0xa6, 0x14, 0xf4, 0xdb, 0xce, 0x3e, 0xb6, 0xaa, + 0x61, 0x5b, 0x5b, 0x50, 0x6e, 0x26, 0x3d, 0x76, 0x92, 0x3a, 0x8c, 0x01, 0xec, 0x1b, 0x0e, 0x9c, + 0x24, 0x41, 0x25, 0xda, 0x65, 0x74, 0x04, 0x35, 0xe1, 0xe2, 0xbe, 0x66, 0x63, 0xad, 0x5f, 0xce, + 0x12, 0xe7, 0x9e, 0xa4, 0x0e, 0x30, 0xee, 0x6c, 0x06, 0x5a, 0x83, 0xa1, 0x8a, 0x27, 0xe6, 0xc5, + 0xc8, 0x41, 0xe6, 0x05, 0x77, 0xd4, 0xcd, 0x88, 0xd9, 0xa0, 0x88, 0xb8, 0x3f, 0x2e, 0xc0, 0xa9, + 0x9c, 0x26, 0xb1, 0xfb, 0x5a, 0x4d, 0xba, 0x00, 0x96, 0xaa, 0xd9, 0xe5, 0xbf, 0x2c, 0xe0, 0x58, + 0x61, 0xa0, 0x75, 0x38, 0xbd, 0xdd, 0x8c, 0x53, 0x2a, 0x73, 0x61, 0x90, 0x90, 0x5b, 0x52, 0x18, + 0x48, 0xf7, 0xf7, 0xe9, 0xe5, 0x1c, 0x1c, 0x9c, 0x5b, 0x93, 0x6a, 0x4b, 0x24, 0xf0, 0x36, 0x1b, + 0x24, 0x2d, 0x12, 0xb7, 0x18, 0x95, 0xb6, 0x74, 0x39, 0x53, 0x8e, 0x3b, 0x6a, 0xa0, 0xcf, 0x39, + 0x70, 0x3e, 0x26, 0xd1, 0x0e, 0x89, 0xca, 0x7e, 0x95, 0xcc, 0xb5, 0xe3, 0x24, 0x6c, 0x92, 0xe8, + 0x90, 0xd6, 0xd9, 0xa9, 0xdb, 0x7b, 0x53, 0xe7, 0xcb, 0xdd, 0xa9, 0xe1, 0xfd, 0x58, 0xb9, 0x9f, + 0x73, 0x60, 0xbc, 0xcc, 0xce, 0xee, 0x4a, 0x75, 0xb7, 0x9d, 0x4b, 0xf5, 0x71, 0x95, 0xba, 0x23, + 0x23, 0x84, 0xcd, 0x64, 0x1b, 0xee, 0xcb, 0x30, 0x51, 0x26, 0x4d, 0xaf, 0x55, 0x67, 0xb7, 0x98, + 0x79, 0xf8, 0xd7, 0x45, 0x18, 0x8e, 0x25, 0x2c, 0xfb, 0x7c, 0x92, 0x42, 0xc6, 0x29, 0x0e, 0x7a, + 0x8c, 0x87, 0xaa, 0xc9, 0x0b, 0x47, 0xc3, 0xfc, 0x90, 0xc3, 0xe3, 0xdb, 0x62, 0x2c, 0xcb, 0xdc, + 0xb7, 0x1c, 0x18, 0x4d, 0xeb, 0x93, 0x2d, 0x54, 0x83, 0x13, 0x15, 0xed, 0xb2, 0x5e, 0x7a, 0x4d, + 0xa2, 0xf7, 0x7b, 0x7d, 0x3c, 0xc5, 0xb3, 0x49, 0x04, 0x67, 0xa9, 0x1e, 0x3c, 0xd2, 0xef, 0x8b, + 0x05, 0x38, 0xa1, 0x9a, 0x2a, 0xfc, 0x94, 0x6f, 0x64, 0x03, 0xf2, 0xb0, 0x8d, 0x24, 0x44, 0xe6, + 0xd8, 0xef, 0x13, 0x94, 0xf7, 0x46, 0x36, 0x28, 0xef, 0x48, 0xd9, 0x77, 0xb8, 0x5e, 0xbf, 0x53, + 0x80, 0x21, 0x95, 0x12, 0xe9, 0x79, 0x28, 0xb2, 0x93, 0xeb, 0xbd, 0xe9, 0xdf, 0xec, 0x14, 0x8c, + 0x39, 0x25, 0x4a, 0x92, 0x05, 0xfd, 0x1c, 0x3a, 0xf1, 0xee, 0x30, 0xb7, 0x5f, 0x7a, 0x51, 0x82, + 0x39, 0x25, 0xb4, 0x0c, 0x7d, 0x24, 0xa8, 0x0a, 0x45, 0xfc, 0xe0, 0x04, 0xd9, 0x43, 0x67, 0x97, + 0x83, 0x2a, 0xa6, 0x54, 0x58, 0x5e, 0x36, 0xae, 0x6f, 0x65, 0x9e, 0xb5, 0x11, 0xca, 0x96, 0x28, + 0x75, 0x67, 0xc1, 0xc8, 0xd9, 0x77, 0xa8, 0x9b, 0x11, 0xbf, 0xdc, 0x07, 0x03, 0xe5, 0xf6, 0x26, + 0x3d, 0x96, 0x7c, 0xdb, 0x81, 0x53, 0x37, 0x33, 0x99, 0xad, 0xd3, 0x75, 0x72, 0xcd, 0x9e, 0x1d, + 0x58, 0x0f, 0x5e, 0x53, 0xd6, 0xaf, 0x9c, 0x42, 0x9c, 0xd7, 0x1c, 0x23, 0xb9, 0x6c, 0xdf, 0x91, + 0x24, 0x97, 0xbd, 0x75, 0xc4, 0xb7, 0x37, 0xc6, 0xba, 0xdd, 0xdc, 0x70, 0x7f, 0xaf, 0x08, 0xc0, + 0xbf, 0xc6, 0x5a, 0x2b, 0xe9, 0xc5, 0xb2, 0xf7, 0x2c, 0x8c, 0xd6, 0x48, 0x40, 0x22, 0x19, 0x9a, + 0x98, 0x79, 0x75, 0x69, 0x51, 0x2b, 0xc3, 0x06, 0x26, 0x9b, 0x2c, 0x41, 0x12, 0xed, 0x72, 0x55, + 0x3b, 0x7b, 0x43, 0x43, 0x95, 0x60, 0x0d, 0x0b, 0x4d, 0x1b, 0x8e, 0x17, 0xee, 0xc3, 0x1f, 0xdf, + 0xc7, 0x4f, 0xf2, 0x7e, 0x18, 0x37, 0x33, 0xb1, 0x08, 0x85, 0x4f, 0xf9, 0xdc, 0xcd, 0x04, 0x2e, + 0x38, 0x83, 0x4d, 0x17, 0x42, 0x35, 0xda, 0xc5, 0xed, 0x40, 0x68, 0x7e, 0x6a, 0x21, 0xcc, 0x33, + 0x28, 0x16, 0xa5, 0x2c, 0x85, 0x05, 0xdb, 0x03, 0x39, 0x5c, 0xa4, 0xc1, 0x48, 0x53, 0x58, 0x68, + 0x65, 0xd8, 0xc0, 0xa4, 0x1c, 0x84, 0x65, 0x14, 0xcc, 0xa5, 0x96, 0x31, 0x67, 0xb6, 0x60, 0x3c, + 0x34, 0x2d, 0x3a, 0x5c, 0x0d, 0x7a, 0x77, 0x8f, 0x53, 0xcf, 0xa8, 0xcb, 0x63, 0x25, 0x32, 0x06, + 0xa0, 0x0c, 0x7d, 0xaa, 0xfa, 0xea, 0x17, 0x19, 0x46, 0xcd, 0xc8, 0xd6, 0xae, 0x77, 0x0d, 0xd6, + 0xe1, 0x74, 0x2b, 0xac, 0xae, 0x47, 0x7e, 0x18, 0xf9, 0xc9, 0xee, 0x5c, 0xc3, 0x8b, 0x63, 0x36, + 0x31, 0xc6, 0x4c, 0x95, 0x68, 0x3d, 0x07, 0x07, 0xe7, 0xd6, 0xa4, 0x67, 0xa2, 0x96, 0x00, 0xb2, + 0xf8, 0xb2, 0x22, 0x57, 0xea, 0x24, 0x22, 0x56, 0xa5, 0xee, 0x29, 0x38, 0x59, 0x6e, 0xb7, 0x5a, + 0x0d, 0x9f, 0x54, 0x95, 0x63, 0xc3, 0xfd, 0x00, 0x9c, 0x10, 0xa9, 0x67, 0x95, 0x02, 0x72, 0xa0, + 0x44, 0xe9, 0xee, 0x7f, 0xec, 0x83, 0x13, 0x99, 0x68, 0x1e, 0xf4, 0x5a, 0x56, 0x6d, 0xb0, 0x93, + 0x12, 0x55, 0x53, 0x18, 0x44, 0x7e, 0xd3, 0x3c, 0x15, 0xa4, 0x2e, 0x43, 0xf1, 0xad, 0x5d, 0x81, + 0x61, 0x01, 0xeb, 0x7c, 0x57, 0x31, 0xe2, 0xf9, 0x3f, 0x06, 0xa0, 0xd8, 0xca, 0x5b, 0xf7, 0xb6, + 0xfb, 0xc9, 0xd6, 0xaf, 0x82, 0xc4, 0x58, 0xe3, 0x88, 0x02, 0x18, 0x64, 0x0d, 0x21, 0xf2, 0xde, + 0xa5, 0xb5, 0xbe, 0x32, 0xad, 0x6d, 0x95, 0xd3, 0xc6, 0x92, 0x89, 0xfb, 0xd9, 0x02, 0xe4, 0x87, + 0x8c, 0xa1, 0x8f, 0x75, 0x7e, 0xf0, 0xe7, 0x2d, 0x0e, 0x84, 0x88, 0x59, 0xeb, 0xfe, 0xcd, 0x03, + 0xf3, 0x9b, 0xaf, 0x5a, 0x1a, 0x07, 0xc1, 0xb7, 0xe3, 0xcb, 0xbb, 0xff, 0xc3, 0x81, 0x91, 0x8d, + 0x8d, 0x15, 0xb5, 0xb5, 0x63, 0x38, 0x1b, 0xf3, 0x94, 0x06, 0xcc, 0xb3, 0x3e, 0x17, 0x36, 0x5b, + 0xdc, 0xd1, 0x2e, 0x02, 0x00, 0x58, 0xd6, 0xe3, 0x72, 0x2e, 0x06, 0xee, 0x52, 0x13, 0x2d, 0xc1, + 0x29, 0xbd, 0xa4, 0xac, 0x3d, 0x32, 0x59, 0x14, 0x19, 0x8e, 0x3a, 0x8b, 0x71, 0x5e, 0x9d, 0x2c, + 0x29, 0x61, 0x50, 0x66, 0xdb, 0x73, 0x0e, 0x29, 0x51, 0x8c, 0xf3, 0xea, 0xb8, 0x6b, 0x30, 0xb2, + 0xe1, 0x45, 0xaa, 0xe3, 0x1f, 0x84, 0x89, 0x4a, 0xd8, 0x94, 0xea, 0xca, 0x0a, 0xd9, 0x21, 0x0d, + 0xd1, 0x65, 0xfe, 0xb2, 0x4b, 0xa6, 0x0c, 0x77, 0x60, 0xbb, 0xff, 0xf5, 0x02, 0xa8, 0x2b, 0x9a, + 0x3d, 0xec, 0xa8, 0x2d, 0x15, 0x4c, 0x5b, 0xb4, 0x1c, 0x4c, 0xab, 0xf6, 0x96, 0x4c, 0x40, 0x6d, + 0x92, 0x06, 0xd4, 0x0e, 0xd8, 0x0e, 0xa8, 0x55, 0x4a, 0x76, 0x47, 0x50, 0xed, 0x57, 0x1d, 0x18, + 0x0d, 0xc2, 0x2a, 0x51, 0x1e, 0xd0, 0x41, 0xb6, 0xc2, 0x5f, 0xb2, 0x77, 0x37, 0x81, 0x07, 0x87, + 0x0a, 0xf2, 0x3c, 0xd0, 0x5b, 0x6d, 0xc9, 0x7a, 0x11, 0x36, 0xda, 0x81, 0x16, 0x34, 0xd3, 0x32, + 0xf7, 0xe0, 0x3c, 0x94, 0x77, 0x44, 0xbb, 0xab, 0x9d, 0xf8, 0x96, 0xa6, 0x27, 0x0e, 0xdb, 0x32, + 0x99, 0xca, 0x7b, 0x77, 0x9a, 0x23, 0x4a, 0x26, 0xee, 0x4e, 0xf5, 0x47, 0x17, 0x06, 0x78, 0x44, + 0xb8, 0xc8, 0xa5, 0xc5, 0xfc, 0xa3, 0x3c, 0x5a, 0x1c, 0x8b, 0x12, 0x94, 0xc8, 0x28, 0x8b, 0x11, + 0x5b, 0xcf, 0x70, 0x18, 0x51, 0x1c, 0xf9, 0x61, 0x16, 0xe8, 0x39, 0xfd, 0xe8, 0x3f, 0xda, 0xcb, + 0xd1, 0x7f, 0xac, 0xeb, 0xb1, 0xff, 0x0b, 0x0e, 0x8c, 0x56, 0xb4, 0x67, 0x31, 0x4a, 0x4f, 0xd8, + 0x7a, 0xfe, 0x3b, 0xef, 0xf5, 0x12, 0xee, 0x76, 0x33, 0x9e, 0xe1, 0x30, 0xb8, 0xb3, 0x04, 0xa2, + 0xcc, 0xce, 0xc1, 0x54, 0x1d, 0x2b, 0x89, 0x39, 0x4c, 0xbb, 0x89, 0x8c, 0x56, 0xa5, 0x30, 0x2c, + 0x78, 0xa1, 0xd7, 0x61, 0x48, 0x5e, 0x2a, 0x10, 0xc1, 0xf7, 0xd8, 0x86, 0x1f, 0xc4, 0x74, 0xb6, + 0xca, 0xac, 0x83, 0x1c, 0x8a, 0x15, 0x47, 0x54, 0x87, 0xbe, 0xaa, 0x57, 0x13, 0x61, 0xf8, 0xab, + 0x76, 0xb2, 0xba, 0x4a, 0x9e, 0xec, 0x48, 0x3a, 0x3f, 0xb3, 0x88, 0x29, 0x0b, 0x74, 0x2b, 0x7d, + 0x57, 0x60, 0xc2, 0xda, 0xee, 0x6b, 0xaa, 0x85, 0x5c, 0x27, 0xe8, 0x78, 0xa6, 0xa0, 0x2a, 0xfc, + 0xd3, 0xff, 0x1f, 0x63, 0xbb, 0x60, 0x27, 0x2d, 0x2c, 0x4f, 0xf4, 0x92, 0xfa, 0xb8, 0x29, 0x97, + 0x7a, 0x92, 0xb4, 0x4a, 0x3f, 0x6f, 0x8b, 0x0b, 0x4b, 0x57, 0xc2, 0x5f, 0x6a, 0xdf, 0xd8, 0x58, + 0xc7, 0x8c, 0x3a, 0x6a, 0xc0, 0x40, 0x8b, 0x85, 0xce, 0x94, 0xde, 0x69, 0x6b, 0x6f, 0xe1, 0xa1, + 0x38, 0x7c, 0x6e, 0xf2, 0xff, 0xb1, 0xe0, 0x81, 0x2e, 0xc3, 0x20, 0x7f, 0x1e, 0x87, 0x5f, 0x83, + 0x18, 0xb9, 0x34, 0xd9, 0xfd, 0x91, 0x9d, 0x74, 0xa3, 0xe0, 0xbf, 0x63, 0x2c, 0xeb, 0xa2, 0x2f, + 0x3a, 0x30, 0x4e, 0x25, 0x6a, 0xfa, 0x9e, 0x4f, 0x09, 0xd9, 0x92, 0x59, 0xd7, 0x62, 0xaa, 0x91, + 0x48, 0x59, 0xa3, 0x8e, 0x85, 0x4b, 0x06, 0x3b, 0x9c, 0x61, 0x8f, 0xde, 0x80, 0xa1, 0xd8, 0xaf, + 0x92, 0x8a, 0x17, 0xc5, 0xa5, 0x53, 0x47, 0xd3, 0x94, 0xd4, 0x23, 0x26, 0x18, 0x61, 0xc5, 0x12, + 0xfd, 0x1a, 0x7b, 0x50, 0xb5, 0x52, 0xf7, 0x77, 0xc8, 0x4a, 0x58, 0xe1, 0xc7, 0x98, 0xd3, 0xb6, + 0xd6, 0xbe, 0xf4, 0xfd, 0x49, 0xca, 0xc2, 0x51, 0x64, 0xb2, 0xc3, 0x59, 0xfe, 0xe8, 0x6f, 0x38, + 0x70, 0x86, 0x3f, 0x7c, 0x90, 0x7d, 0xcb, 0xe3, 0xcc, 0x21, 0x4d, 0x52, 0xec, 0xfe, 0xc6, 0x4c, + 0x1e, 0x49, 0x9c, 0xcf, 0x89, 0xa5, 0x29, 0x36, 0x9f, 0x5f, 0x3a, 0x6b, 0xd5, 0x33, 0xdc, 0xfb, + 0x93, 0x4b, 0xe8, 0x69, 0x18, 0x69, 0x89, 0xed, 0xd0, 0x8f, 0x9b, 0xec, 0x36, 0x4e, 0x1f, 0xbf, + 0x27, 0xb9, 0x9e, 0x82, 0xb1, 0x8e, 0x63, 0xe4, 0xac, 0x7e, 0x72, 0xbf, 0x9c, 0xd5, 0xe8, 0x1a, + 0x8c, 0x24, 0x61, 0x43, 0xa4, 0x6d, 0x8d, 0x4b, 0x25, 0x36, 0x03, 0x2f, 0xe4, 0xad, 0xad, 0x0d, + 0x85, 0x96, 0x9e, 0xdc, 0x53, 0x58, 0x8c, 0x75, 0x3a, 0x2c, 0x02, 0x5a, 0x3c, 0x28, 0x11, 0xb1, + 0x23, 0xfb, 0x83, 0x99, 0x08, 0x68, 0xbd, 0x10, 0x9b, 0xb8, 0x68, 0x11, 0x4e, 0xb6, 0x3a, 0xce, + 0xfc, 0xfc, 0x16, 0xa0, 0x0a, 0x3a, 0xe9, 0x3c, 0xf0, 0x77, 0xd6, 0x31, 0x4e, 0xfb, 0xe7, 0xf7, + 0x3b, 0xed, 0x77, 0xc9, 0xe0, 0xfc, 0xd0, 0x61, 0x32, 0x38, 0xa3, 0x2a, 0x3c, 0xe4, 0xb5, 0x93, + 0x90, 0xa5, 0xe4, 0x31, 0xab, 0xf0, 0x60, 0xf0, 0x47, 0x78, 0x7c, 0xf9, 0xed, 0xbd, 0xa9, 0x87, + 0x66, 0xf6, 0xc1, 0xc3, 0xfb, 0x52, 0x41, 0xaf, 0xc2, 0x10, 0x11, 0x59, 0xa8, 0x4b, 0x3f, 0x67, + 0x4b, 0x49, 0x30, 0xf3, 0x5a, 0xcb, 0x38, 0x5b, 0x0e, 0xc3, 0x8a, 0x1f, 0xda, 0x80, 0x91, 0x7a, + 0x18, 0x27, 0x33, 0x0d, 0xdf, 0x8b, 0x49, 0x5c, 0x7a, 0x98, 0x4d, 0x9a, 0x5c, 0xdd, 0xeb, 0x8a, + 0x44, 0x4b, 0xe7, 0xcc, 0x95, 0xb4, 0x26, 0xd6, 0xc9, 0x20, 0xc2, 0xfc, 0xc3, 0x2c, 0x12, 0x5e, + 0xfa, 0xbe, 0x2e, 0xb0, 0x8e, 0x3d, 0x9e, 0x47, 0x79, 0x3d, 0xac, 0x96, 0x4d, 0x6c, 0xe5, 0x20, + 0xd6, 0x81, 0x38, 0x4b, 0x13, 0x3d, 0x0b, 0xa3, 0xad, 0xb0, 0x5a, 0x6e, 0x91, 0xca, 0xba, 0x97, + 0x54, 0xea, 0xa5, 0x29, 0xd3, 0xca, 0xb8, 0xae, 0x95, 0x61, 0x03, 0x13, 0xb5, 0x60, 0xb0, 0xc9, + 0x73, 0x35, 0x94, 0x1e, 0xb5, 0x75, 0xb6, 0x11, 0xc9, 0x1f, 0x84, 0x0d, 0x81, 0xff, 0xc0, 0x92, + 0x0d, 0xfa, 0xfb, 0x0e, 0x9c, 0xc8, 0xdc, 0x2f, 0x2b, 0xbd, 0xc3, 0x9a, 0xca, 0x62, 0x12, 0x9e, + 0x7d, 0x9c, 0x0d, 0x9f, 0x09, 0xbc, 0xd3, 0x09, 0xc2, 0xd9, 0x16, 0xf1, 0x71, 0x61, 0x09, 0x57, + 0x4a, 0x8f, 0xd9, 0x1b, 0x17, 0x46, 0x50, 0x8e, 0x0b, 0xfb, 0x81, 0x25, 0x1b, 0xf4, 0x24, 0x0c, + 0x8a, 0xdc, 0x88, 0xa5, 0xc7, 0x4d, 0xaf, 0xbb, 0x48, 0xa1, 0x88, 0x65, 0xf9, 0xe4, 0x07, 0xe0, + 0x64, 0xc7, 0xd1, 0xed, 0x40, 0x59, 0x3f, 0x7e, 0xc3, 0x01, 0xfd, 0x42, 0xba, 0xf5, 0xa7, 0x5f, + 0x9e, 0x85, 0xd1, 0x0a, 0x7f, 0x89, 0x93, 0x5f, 0x69, 0xef, 0x37, 0xed, 0xbd, 0x73, 0x5a, 0x19, + 0x36, 0x30, 0xdd, 0x2b, 0x80, 0x3a, 0xf3, 0xf2, 0x1f, 0xca, 0x71, 0xf2, 0x0f, 0x1d, 0x18, 0x33, + 0x74, 0x06, 0xeb, 0x7e, 0xd5, 0x05, 0x40, 0x4d, 0x3f, 0x8a, 0xc2, 0x48, 0x7f, 0xf2, 0x50, 0xa4, + 0x9d, 0x60, 0xf1, 0x16, 0xab, 0x1d, 0xa5, 0x38, 0xa7, 0x86, 0xfb, 0x8f, 0xfb, 0x21, 0x0d, 0x34, + 0x57, 0x59, 0x8b, 0x9d, 0xae, 0x59, 0x8b, 0x9f, 0x82, 0xa1, 0x97, 0xe3, 0x30, 0x58, 0x4f, 0x73, + 0x1b, 0xab, 0x6f, 0xf1, 0x5c, 0x79, 0xed, 0x2a, 0xc3, 0x54, 0x18, 0x0c, 0xfb, 0x95, 0x05, 0xbf, + 0x91, 0x74, 0x26, 0xbf, 0x7d, 0xee, 0x79, 0x0e, 0xc7, 0x0a, 0x83, 0xbd, 0x7e, 0xb8, 0x43, 0x94, + 0x23, 0x20, 0x7d, 0xfd, 0x90, 0x3f, 0xb9, 0xc1, 0xca, 0xd0, 0x45, 0x18, 0x56, 0x4e, 0x04, 0xe1, + 0x99, 0x50, 0x23, 0xa5, 0x3c, 0x0d, 0x38, 0xc5, 0x61, 0x0a, 0xa1, 0x30, 0x3c, 0x0b, 0x13, 0x4a, + 0xd9, 0xc6, 0xf1, 0x24, 0x63, 0xca, 0xe6, 0xb2, 0x5d, 0x82, 0xb1, 0x62, 0x99, 0xe7, 0x5b, 0x1e, + 0x3e, 0x12, 0xdf, 0xb2, 0x76, 0xeb, 0xa1, 0xd8, 0xeb, 0xad, 0x07, 0x73, 0x6e, 0x0f, 0xf5, 0x34, + 0xb7, 0x3f, 0xdd, 0x07, 0x83, 0xd7, 0x49, 0xc4, 0xd2, 0xc6, 0x3f, 0x09, 0x83, 0x3b, 0xfc, 0xdf, + 0xec, 0x95, 0x59, 0x81, 0x81, 0x65, 0x39, 0xfd, 0x6e, 0x9b, 0x6d, 0xbf, 0x51, 0x9d, 0x4f, 0x57, + 0x71, 0x9a, 0xd6, 0x51, 0x16, 0xe0, 0x14, 0x87, 0x56, 0xa8, 0x51, 0xcd, 0xbe, 0xd9, 0xf4, 0x3b, + 0x1e, 0xf6, 0x5f, 0x94, 0x05, 0x38, 0xc5, 0x41, 0x8f, 0xc3, 0x40, 0xcd, 0x4f, 0x36, 0xbc, 0x5a, + 0xd6, 0x33, 0xba, 0xc8, 0xa0, 0x58, 0x94, 0x32, 0xb7, 0x98, 0x9f, 0x6c, 0x44, 0x84, 0x59, 0x76, + 0x3b, 0x32, 0x76, 0x2c, 0x6a, 0x65, 0xd8, 0xc0, 0x64, 0x4d, 0x0a, 0x45, 0xcf, 0x44, 0x9c, 0x6c, + 0xda, 0x24, 0x59, 0x80, 0x53, 0x1c, 0x3a, 0xff, 0x2b, 0x61, 0xb3, 0xe5, 0x37, 0x44, 0x04, 0xb7, + 0x36, 0xff, 0xe7, 0x04, 0x1c, 0x2b, 0x0c, 0x8a, 0x4d, 0x45, 0x18, 0x15, 0x3f, 0xd9, 0x97, 0xe6, + 0xd6, 0x05, 0x1c, 0x2b, 0x0c, 0xf7, 0x3a, 0x8c, 0xf1, 0x95, 0x3c, 0xd7, 0xf0, 0xfc, 0xe6, 0xe2, + 0x1c, 0xba, 0xdc, 0x71, 0xeb, 0xe1, 0xc9, 0x9c, 0x5b, 0x0f, 0x67, 0x8c, 0x4a, 0x9d, 0xb7, 0x1f, + 0xdc, 0x1f, 0x16, 0x60, 0xe8, 0x18, 0x1f, 0xeb, 0x3c, 0xf6, 0x77, 0xa7, 0xd1, 0xad, 0xcc, 0x43, + 0x9d, 0xeb, 0x36, 0x2f, 0x31, 0xed, 0xfb, 0x48, 0xe7, 0x7f, 0x2a, 0xc0, 0x59, 0x89, 0x2a, 0xcf, + 0x72, 0x8b, 0x73, 0xec, 0x01, 0xb4, 0xa3, 0x1f, 0xe8, 0xc8, 0x18, 0xe8, 0x75, 0x7b, 0xa7, 0xd1, + 0xc5, 0xb9, 0xae, 0x43, 0xfd, 0x6a, 0x66, 0xa8, 0xb1, 0x55, 0xae, 0xfb, 0x0f, 0xf6, 0x9f, 0x3b, + 0x30, 0x99, 0x3f, 0xd8, 0xc7, 0xf0, 0x36, 0xea, 0x1b, 0xe6, 0xdb, 0xa8, 0xbf, 0x68, 0x6f, 0x8a, + 0x99, 0x5d, 0xe9, 0xf2, 0x4a, 0xea, 0x7f, 0x77, 0xe0, 0xb4, 0xac, 0xc0, 0x76, 0xcf, 0x59, 0x3f, + 0x60, 0xc1, 0x3b, 0x47, 0x3f, 0xcd, 0x5e, 0x37, 0xa6, 0xd9, 0x8b, 0xf6, 0x3a, 0xae, 0xf7, 0xa3, + 0xeb, 0x9b, 0xf2, 0x7f, 0xe6, 0x40, 0x29, 0xaf, 0xc2, 0x31, 0x7c, 0xf2, 0xd7, 0xcc, 0x4f, 0x7e, + 0xfd, 0x68, 0x7a, 0xde, 0xfd, 0x83, 0x97, 0xba, 0x0d, 0x14, 0x6a, 0x48, 0xbd, 0xca, 0xb1, 0xe5, + 0x93, 0xe6, 0x2c, 0xf2, 0x15, 0xb4, 0x06, 0x0c, 0xc4, 0x2c, 0x4a, 0x45, 0x4c, 0x81, 0x2b, 0x36, + 0xb4, 0x2d, 0x4a, 0x4f, 0xd8, 0xd8, 0xd9, 0xff, 0x58, 0xf0, 0x70, 0x7f, 0xb3, 0x00, 0xe7, 0xd4, + 0x9b, 0xc7, 0x64, 0x87, 0x34, 0xd2, 0xf5, 0xc1, 0x5e, 0xc8, 0xf0, 0xd4, 0x4f, 0x7b, 0x2f, 0x64, + 0xa4, 0x2c, 0xd2, 0xb5, 0x90, 0xc2, 0xb0, 0xc6, 0x13, 0x95, 0xe1, 0x0c, 0x7b, 0xd1, 0x62, 0xc1, + 0x0f, 0xbc, 0x86, 0xff, 0x2a, 0x89, 0x30, 0x69, 0x86, 0x3b, 0x5e, 0x43, 0x68, 0xea, 0xea, 0xd6, + 0xf4, 0x42, 0x1e, 0x12, 0xce, 0xaf, 0xdb, 0x71, 0xe2, 0xee, 0xeb, 0xf5, 0xc4, 0xed, 0xfe, 0x89, + 0x03, 0xa3, 0xc7, 0xf8, 0x42, 0x74, 0x68, 0x2e, 0x89, 0xe7, 0xec, 0x2d, 0x89, 0x2e, 0xcb, 0x60, + 0xaf, 0x08, 0x1d, 0x8f, 0xe6, 0xa2, 0xcf, 0x38, 0x2a, 0x8e, 0x87, 0xc7, 0x4b, 0x7e, 0xd8, 0x5e, + 0x3b, 0x0e, 0x92, 0x6a, 0x13, 0x7d, 0x23, 0x93, 0x7f, 0xb4, 0x60, 0x2b, 0x89, 0x56, 0x47, 0x6b, + 0x0e, 0x91, 0x87, 0xf4, 0xab, 0x0e, 0x00, 0x6f, 0xa7, 0x48, 0x5f, 0x4e, 0xdb, 0xb6, 0x79, 0x64, + 0x23, 0x45, 0x99, 0xf0, 0xa6, 0xa9, 0x25, 0x94, 0x16, 0x60, 0xad, 0x25, 0xf7, 0x90, 0x60, 0xf4, + 0x9e, 0x73, 0x9b, 0x7e, 0xd1, 0x81, 0x13, 0x99, 0xe6, 0xe6, 0xd4, 0xdf, 0x32, 0xdf, 0x78, 0xb4, + 0xa0, 0x59, 0x99, 0x49, 0xad, 0x75, 0xe3, 0xc9, 0x7f, 0x71, 0xc1, 0x78, 0x6d, 0x1c, 0xbd, 0x06, + 0xc3, 0xd2, 0xf2, 0x21, 0xa7, 0xb7, 0xcd, 0xb7, 0x6e, 0xd5, 0xf1, 0x46, 0x42, 0x62, 0x9c, 0xf2, + 0xcb, 0x84, 0x09, 0x16, 0x7a, 0x0a, 0x13, 0xbc, 0xbf, 0x2f, 0xe5, 0xe6, 0xdb, 0xa5, 0xfb, 0x8f, + 0xc4, 0x2e, 0xfd, 0x90, 0x75, 0xbb, 0xf4, 0xc3, 0xc7, 0x6c, 0x97, 0xd6, 0x9c, 0x84, 0xc5, 0x7b, + 0x70, 0x12, 0xbe, 0x06, 0xa7, 0x77, 0xd2, 0x43, 0xa7, 0x9a, 0x49, 0x22, 0x75, 0xd3, 0x93, 0xb9, + 0xd6, 0x68, 0x7a, 0x80, 0x8e, 0x13, 0x12, 0x24, 0xda, 0x71, 0x35, 0x8d, 0x50, 0xbc, 0x9e, 0x43, + 0x0e, 0xe7, 0x32, 0xc9, 0x7a, 0x7b, 0x06, 0x7b, 0xf0, 0xf6, 0xbc, 0xe5, 0xc0, 0x19, 0xaf, 0xe3, + 0x9a, 0x1d, 0x26, 0x5b, 0x22, 0xe4, 0xe4, 0x86, 0x3d, 0x15, 0xc2, 0x20, 0x2f, 0xdc, 0x6a, 0x79, + 0x45, 0x38, 0xbf, 0x41, 0xe8, 0xb1, 0xd4, 0xf5, 0xce, 0xe3, 0x5a, 0xf3, 0xfd, 0xe4, 0xdf, 0xc8, + 0xc6, 0xf3, 0x00, 0x1b, 0xfa, 0x8f, 0xda, 0x3d, 0x6d, 0x5b, 0x88, 0xe9, 0x19, 0xb9, 0x87, 0x98, + 0x9e, 0x8c, 0xeb, 0x6d, 0xd4, 0x92, 0xeb, 0x2d, 0x80, 0x09, 0xbf, 0xe9, 0xd5, 0xc8, 0x7a, 0xbb, + 0xd1, 0xe0, 0xf7, 0x66, 0xe4, 0x6b, 0xc4, 0xb9, 0x16, 0xbc, 0x95, 0xb0, 0xe2, 0x35, 0xb2, 0xef, + 0xd0, 0xab, 0xfb, 0x41, 0x4b, 0x19, 0x4a, 0xb8, 0x83, 0x36, 0x9d, 0xb0, 0x2c, 0x87, 0x20, 0x49, + 0xe8, 0x68, 0xb3, 0xc0, 0x91, 0x21, 0x3e, 0x61, 0xaf, 0xa4, 0x60, 0xac, 0xe3, 0xa0, 0x65, 0x18, + 0xae, 0x06, 0xb1, 0xb8, 0x31, 0x7c, 0x82, 0x09, 0xb3, 0x77, 0x51, 0x11, 0x38, 0x7f, 0xb5, 0xac, + 0xee, 0x0a, 0x3f, 0x94, 0x93, 0x14, 0x53, 0x95, 0xe3, 0xb4, 0x3e, 0x5a, 0x65, 0xc4, 0xc4, 0x3b, + 0x6b, 0x3c, 0x9e, 0xe3, 0x91, 0x2e, 0x0e, 0xa3, 0xf9, 0xab, 0xf2, 0xa5, 0xb8, 0x31, 0xc1, 0x4e, + 0x3c, 0x98, 0x96, 0x52, 0xd0, 0x5e, 0x85, 0x3e, 0xb9, 0xef, 0xab, 0xd0, 0x2c, 0x1b, 0x6e, 0xd2, + 0x50, 0xee, 0xe1, 0x0b, 0xd6, 0xb2, 0xe1, 0xa6, 0x91, 0x92, 0x22, 0x1b, 0x6e, 0x0a, 0xc0, 0x3a, + 0x4b, 0xb4, 0xd6, 0xcd, 0x4d, 0x7e, 0x8a, 0x09, 0x8d, 0x83, 0x3b, 0xbd, 0x75, 0x7f, 0xe9, 0xe9, + 0x7d, 0xfd, 0xa5, 0x1d, 0xfe, 0xdd, 0x33, 0x07, 0xf0, 0xef, 0xd6, 0x59, 0x9e, 0xd2, 0xc5, 0x39, + 0xe1, 0x52, 0xb7, 0x70, 0xbe, 0x63, 0x99, 0x51, 0x78, 0xe4, 0x29, 0xfb, 0x17, 0x73, 0x06, 0x5d, + 0x03, 0xc8, 0xcf, 0x1d, 0x3a, 0x80, 0x9c, 0x8a, 0xe7, 0x14, 0xce, 0x12, 0xde, 0x16, 0x85, 0x78, + 0x4e, 0xc1, 0x58, 0xc7, 0xc9, 0x7a, 0x4b, 0x1f, 0x3c, 0x32, 0x6f, 0xe9, 0xe4, 0x31, 0x78, 0x4b, + 0xcf, 0xf7, 0xec, 0x2d, 0xbd, 0x05, 0xa7, 0x5a, 0x61, 0x75, 0xde, 0x8f, 0xa3, 0x36, 0xbb, 0x48, + 0x38, 0xdb, 0xae, 0xd6, 0x48, 0xc2, 0xdc, 0xad, 0x23, 0x97, 0xde, 0xa5, 0x37, 0xb2, 0xc5, 0x16, + 0xb2, 0x5c, 0xa3, 0x99, 0x0a, 0xcc, 0x74, 0xc2, 0xa2, 0x6e, 0x73, 0x0a, 0x71, 0x1e, 0x0b, 0xdd, + 0x4f, 0xfb, 0xc8, 0xf1, 0xf8, 0x69, 0x3f, 0x08, 0x43, 0x71, 0xbd, 0x9d, 0x54, 0xc3, 0x9b, 0x01, + 0x73, 0xc6, 0x0f, 0xcf, 0xbe, 0x43, 0x99, 0xb2, 0x05, 0xfc, 0xce, 0xde, 0xd4, 0x84, 0xfc, 0x5f, + 0xb3, 0x62, 0x0b, 0x08, 0xfa, 0x66, 0x97, 0xfb, 0x4a, 0xee, 0x51, 0xde, 0x57, 0x3a, 0x77, 0xa0, + 0xbb, 0x4a, 0x79, 0xce, 0xe8, 0x47, 0x7f, 0xe6, 0x9c, 0xd1, 0x5f, 0x77, 0x60, 0x6c, 0x47, 0x77, + 0x19, 0x08, 0x87, 0xb9, 0x85, 0xc0, 0x1d, 0xc3, 0x13, 0x31, 0xeb, 0x52, 0x39, 0x67, 0x80, 0xee, + 0x64, 0x01, 0xd8, 0x6c, 0x49, 0x4e, 0x50, 0xd1, 0x63, 0xf7, 0x2b, 0xa8, 0xe8, 0x0d, 0x26, 0xc7, + 0xe4, 0x21, 0x97, 0x79, 0xd1, 0xed, 0xc6, 0x14, 0x4b, 0x99, 0xa8, 0x42, 0x8a, 0x75, 0x7e, 0xe8, + 0x0b, 0x0e, 0x4c, 0xc8, 0x73, 0x99, 0x70, 0xf9, 0xc5, 0x22, 0x2a, 0xd2, 0xe6, 0x71, 0x90, 0x85, + 0xd5, 0x6f, 0x64, 0xf8, 0xe0, 0x0e, 0xce, 0x54, 0xaa, 0xab, 0x20, 0xb4, 0x5a, 0xcc, 0x82, 0x7f, + 0x85, 0x0e, 0x33, 0x93, 0x82, 0xb1, 0x8e, 0x83, 0xbe, 0xe5, 0x40, 0xb1, 0x1e, 0x86, 0xdb, 0x71, + 0xe9, 0x49, 0x26, 0xd0, 0x5f, 0xb0, 0xac, 0x9b, 0x5e, 0xa1, 0xb4, 0xb9, 0x52, 0xfa, 0xb4, 0xb4, + 0x1d, 0x31, 0xd8, 0x9d, 0xbd, 0xa9, 0x71, 0xe3, 0x39, 0xa7, 0xf8, 0xcd, 0xb7, 0x35, 0x88, 0xb0, + 0x6d, 0xb2, 0xa6, 0xa1, 0x2f, 0x3b, 0x30, 0x71, 0x33, 0x63, 0xd0, 0x10, 0x61, 0xa1, 0xd8, 0xbe, + 0xa9, 0x84, 0x0f, 0x77, 0x16, 0x8a, 0x3b, 0x5a, 0x80, 0x3e, 0x6f, 0x1a, 0x3a, 0x79, 0xfc, 0xa8, + 0xc5, 0x01, 0xcc, 0x18, 0x56, 0xf9, 0xb5, 0xa0, 0x7c, 0x8b, 0xe7, 0x3d, 0xc7, 0x87, 0x4c, 0xd2, + 0xce, 0xa4, 0x1f, 0x2b, 0xa7, 0x2a, 0x31, 0xed, 0x2d, 0x16, 0x16, 0xbb, 0xf1, 0xf9, 0x75, 0x73, + 0xcb, 0x97, 0xcf, 0xc2, 0xb8, 0xe9, 0xdb, 0x43, 0xef, 0x36, 0x9f, 0xea, 0xb8, 0x90, 0x7d, 0xf5, + 0x60, 0x4c, 0xe2, 0x1b, 0x2f, 0x1f, 0x18, 0x4f, 0x13, 0x14, 0x8e, 0xf4, 0x69, 0x82, 0xbe, 0xe3, + 0x79, 0x9a, 0x60, 0xe2, 0x28, 0x9e, 0x26, 0x38, 0x79, 0xa0, 0xa7, 0x09, 0xb4, 0xa7, 0x21, 0xfa, + 0xef, 0xf2, 0x34, 0xc4, 0x0c, 0x9c, 0x90, 0x77, 0x7f, 0x88, 0xc8, 0xfe, 0xce, 0xdd, 0xfe, 0xea, + 0x95, 0xf1, 0x39, 0xb3, 0x18, 0x67, 0xf1, 0xe9, 0x22, 0x2b, 0x06, 0xac, 0xe6, 0x80, 0xad, 0x77, + 0xa3, 0xcc, 0xa9, 0xc5, 0x8e, 0xcf, 0x42, 0x44, 0xc9, 0x68, 0xe7, 0x22, 0x83, 0xdd, 0x91, 0xff, + 0x60, 0xde, 0x02, 0xf4, 0x12, 0x94, 0xc2, 0xad, 0xad, 0x46, 0xe8, 0x55, 0xd3, 0xf7, 0x13, 0x64, + 0x5c, 0x02, 0xbf, 0xab, 0xaa, 0xd2, 0xed, 0xae, 0x75, 0xc1, 0xc3, 0x5d, 0x29, 0xa0, 0xb7, 0xa8, + 0x62, 0x92, 0x84, 0x11, 0xa9, 0xa6, 0xb6, 0x9a, 0x61, 0xd6, 0x67, 0x62, 0xbd, 0xcf, 0x65, 0x93, + 0x0f, 0xef, 0xbd, 0xfa, 0x28, 0x99, 0x52, 0x9c, 0x6d, 0x16, 0x8a, 0xe0, 0x6c, 0x2b, 0xcf, 0x54, + 0x14, 0x8b, 0x1b, 0x4b, 0xfb, 0x19, 0xac, 0xd4, 0x5b, 0xda, 0xb9, 0xc6, 0xa6, 0x18, 0x77, 0xa1, + 0xac, 0xbf, 0x71, 0x30, 0x74, 0x3c, 0x6f, 0x1c, 0x7c, 0x1c, 0xa0, 0x22, 0xb3, 0xad, 0x49, 0xe3, + 0xc3, 0xb2, 0x95, 0xab, 0x34, 0x9c, 0xa6, 0xf6, 0xac, 0xac, 0x62, 0x83, 0x35, 0x96, 0xe8, 0x7f, + 0xe7, 0x3e, 0x02, 0xc2, 0x2d, 0x2c, 0x35, 0xeb, 0x73, 0xe2, 0x67, 0xee, 0x21, 0x90, 0x7f, 0xe0, + 0xc0, 0x24, 0x9f, 0x79, 0x59, 0xe5, 0x9e, 0xaa, 0x16, 0xe2, 0x6e, 0x8f, 0xed, 0xd0, 0x15, 0x9e, + 0x35, 0xc9, 0xe0, 0xca, 0x1c, 0xdd, 0xfb, 0xb4, 0x04, 0x7d, 0x35, 0xe7, 0x48, 0x71, 0xc2, 0x96, + 0xcd, 0x32, 0xff, 0x29, 0x87, 0x53, 0xb7, 0x7b, 0x39, 0x45, 0xfc, 0xa3, 0xae, 0x26, 0x55, 0xc4, + 0x9a, 0xf7, 0x4b, 0x47, 0x64, 0x52, 0xd5, 0xdf, 0x9b, 0x38, 0x90, 0x61, 0xf5, 0x8b, 0x0e, 0x4c, + 0x78, 0x99, 0x50, 0x13, 0x66, 0x07, 0xb2, 0x62, 0x93, 0x9a, 0x89, 0xd2, 0xf8, 0x15, 0xa6, 0xe4, + 0x65, 0xa3, 0x5a, 0x70, 0x07, 0x73, 0xf4, 0x43, 0x07, 0xce, 0x27, 0x5e, 0xbc, 0xcd, 0xb3, 0x39, + 0xc7, 0xe9, 0x5d, 0x5d, 0xd1, 0xb8, 0xd3, 0x6c, 0x35, 0xbe, 0x62, 0x7d, 0x35, 0x6e, 0x74, 0xe7, + 0xc9, 0xd7, 0xe5, 0xa3, 0x62, 0x5d, 0x9e, 0xdf, 0x07, 0x13, 0xef, 0xd7, 0xf4, 0xc9, 0xcf, 0x38, + 0xfc, 0xd5, 0xaf, 0xae, 0x2a, 0xdf, 0xa6, 0xa9, 0xf2, 0xad, 0xd8, 0x7c, 0x77, 0x48, 0xd7, 0x3d, + 0x7f, 0xd5, 0x81, 0xd3, 0x79, 0x3b, 0x52, 0x4e, 0x93, 0x3e, 0x6a, 0x36, 0xc9, 0xe2, 0x29, 0x4b, + 0x6f, 0x90, 0x95, 0x67, 0x4f, 0x26, 0xaf, 0xc2, 0x23, 0x77, 0xfb, 0x8a, 0x77, 0xa3, 0x37, 0xa4, + 0xab, 0xc5, 0x7f, 0x36, 0xac, 0x79, 0x21, 0x13, 0xd2, 0xb2, 0x1e, 0xc3, 0x1d, 0xc0, 0x80, 0x1f, + 0x34, 0xfc, 0x80, 0x88, 0xfb, 0x9a, 0x36, 0xcf, 0xb0, 0xe2, 0xd9, 0x22, 0x4a, 0x1d, 0x0b, 0x2e, + 0xf7, 0xd9, 0x29, 0x99, 0x7d, 0x08, 0xae, 0xff, 0xf8, 0x1f, 0x82, 0xbb, 0x09, 0xc3, 0x37, 0xfd, + 0xa4, 0xce, 0x82, 0x29, 0x84, 0xaf, 0xcf, 0xc2, 0x3d, 0x47, 0x4a, 0x2e, 0xed, 0xfb, 0x0d, 0xc9, + 0x00, 0xa7, 0xbc, 0xd0, 0x45, 0xce, 0x98, 0x45, 0x6e, 0x67, 0x43, 0x6a, 0x6f, 0xc8, 0x02, 0x9c, + 0xe2, 0xd0, 0xc1, 0x1a, 0xa5, 0xbf, 0x64, 0x0e, 0x28, 0x91, 0x19, 0xd9, 0x46, 0xc6, 0x4b, 0x41, + 0x91, 0xdf, 0x26, 0xbe, 0xa1, 0xf1, 0xc0, 0x06, 0x47, 0x95, 0x9c, 0x7a, 0xa8, 0x6b, 0x72, 0xea, + 0xd7, 0x99, 0xc2, 0x96, 0xf8, 0x41, 0x9b, 0xac, 0x05, 0x22, 0xde, 0x7b, 0xc5, 0xce, 0xdd, 0x67, + 0x4e, 0x93, 0x1f, 0xc1, 0xd3, 0xdf, 0x58, 0xe3, 0xa7, 0xb9, 0x5c, 0x46, 0xf6, 0x75, 0xb9, 0xa4, + 0x26, 0x97, 0x51, 0xeb, 0x26, 0x97, 0x84, 0xb4, 0xac, 0x98, 0x5c, 0x7e, 0xa6, 0xcc, 0x01, 0x7f, + 0xee, 0x00, 0x52, 0x7a, 0x97, 0x12, 0xa8, 0xc7, 0x10, 0x54, 0xf9, 0x09, 0x07, 0x20, 0x50, 0xcf, + 0x85, 0xda, 0xdd, 0x05, 0x39, 0xcd, 0xb4, 0x01, 0x29, 0x0c, 0x6b, 0x3c, 0xdd, 0x3f, 0x75, 0xd2, + 0xd8, 0xe5, 0xb4, 0xef, 0xc7, 0x10, 0x44, 0xb6, 0x6b, 0x06, 0x91, 0x6d, 0x58, 0x34, 0xdd, 0xab, + 0x6e, 0x74, 0x09, 0x27, 0xfb, 0x49, 0x01, 0x4e, 0xe8, 0xc8, 0x65, 0x72, 0x1c, 0x1f, 0xfb, 0xa6, + 0x11, 0x41, 0x7b, 0xcd, 0x6e, 0x7f, 0xcb, 0xc2, 0x03, 0x94, 0x17, 0xad, 0xfd, 0xf1, 0x4c, 0xb4, + 0xf6, 0x0d, 0xfb, 0xac, 0xf7, 0x0f, 0xd9, 0xfe, 0xcf, 0x0e, 0x9c, 0xca, 0xd4, 0x38, 0x86, 0x09, + 0xb6, 0x63, 0x4e, 0xb0, 0xe7, 0xad, 0xf7, 0xba, 0xcb, 0xec, 0xfa, 0x76, 0xa1, 0xa3, 0xb7, 0xec, + 0x10, 0xf7, 0x69, 0x07, 0x8a, 0x54, 0x5b, 0x96, 0xf1, 0x5c, 0x1f, 0x3d, 0x92, 0x19, 0xc0, 0xf4, + 0x7a, 0x21, 0x9d, 0x55, 0xfb, 0x18, 0x0c, 0x73, 0xee, 0x93, 0x9f, 0x72, 0x00, 0x52, 0xa4, 0xfb, + 0xa5, 0x02, 0xbb, 0xdf, 0x2d, 0xc0, 0x99, 0xdc, 0x69, 0x84, 0x3e, 0xab, 0x2c, 0x72, 0x8e, 0xed, + 0x68, 0x45, 0x83, 0x91, 0x6e, 0x98, 0x1b, 0x33, 0x0c, 0x73, 0xc2, 0x1e, 0x77, 0xbf, 0x0e, 0x30, + 0x42, 0x4c, 0x6b, 0x83, 0xf5, 0x63, 0x27, 0x0d, 0x80, 0x55, 0x79, 0x8d, 0xfe, 0x02, 0x5e, 0xe2, + 0x71, 0x7f, 0xa2, 0xdd, 0x70, 0x90, 0x1d, 0x3d, 0x06, 0x59, 0x71, 0xd3, 0x94, 0x15, 0xd8, 0xbe, + 0x1f, 0xb9, 0x8b, 0xb0, 0x78, 0x05, 0xf2, 0x1c, 0xcb, 0xbd, 0x25, 0x81, 0x34, 0xae, 0xc3, 0x16, + 0x7a, 0xbe, 0x0e, 0x3b, 0x06, 0x23, 0x2f, 0xfa, 0x2a, 0x81, 0xe8, 0xec, 0xf4, 0xf7, 0x7e, 0x74, + 0xe1, 0x81, 0xef, 0xff, 0xe8, 0xc2, 0x03, 0x3f, 0xfc, 0xd1, 0x85, 0x07, 0x3e, 0x71, 0xfb, 0x82, + 0xf3, 0xbd, 0xdb, 0x17, 0x9c, 0xef, 0xdf, 0xbe, 0xe0, 0xfc, 0xf0, 0xf6, 0x05, 0xe7, 0xdf, 0xde, + 0xbe, 0xe0, 0xfc, 0xad, 0x7f, 0x77, 0xe1, 0x81, 0x17, 0x87, 0x64, 0xc7, 0xfe, 0x5f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xc9, 0x42, 0x1f, 0x61, 0xd5, 0xd8, 0x00, 0x00, +} + +func (m *Amount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Amount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Amount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArchiveStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArchiveStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArchiveStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Zip != nil { + { + size, err := m.Zip.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.None != nil { + { + size, err := m.None.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Tar != nil { + { + size, err := m.Tar.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Arguments) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Arguments) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Arguments) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Artifacts) > 0 { + for iNdEx := len(m.Artifacts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Artifacts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ArtGCStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtGCStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtGCStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.NotSpecified { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if len(m.PodsRecouped) > 0 { + keysForPodsRecouped := make([]string, 0, len(m.PodsRecouped)) + for k := range m.PodsRecouped { + keysForPodsRecouped = append(keysForPodsRecouped, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodsRecouped) + for iNdEx := len(keysForPodsRecouped) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodsRecouped[string(keysForPodsRecouped[iNdEx])] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(keysForPodsRecouped[iNdEx]) + copy(dAtA[i:], keysForPodsRecouped[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodsRecouped[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.StrategiesProcessed) > 0 { + keysForStrategiesProcessed := make([]string, 0, len(m.StrategiesProcessed)) + for k := range m.StrategiesProcessed { + keysForStrategiesProcessed = append(keysForStrategiesProcessed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStrategiesProcessed) + for iNdEx := len(keysForStrategiesProcessed) - 1; iNdEx >= 0; iNdEx-- { + v := m.StrategiesProcessed[ArtifactGCStrategy(keysForStrategiesProcessed[iNdEx])] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(keysForStrategiesProcessed[iNdEx]) + copy(dAtA[i:], keysForStrategiesProcessed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStrategiesProcessed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Artifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Artifact) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Artifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Deleted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + if m.ArtifactGC != nil { + { + size, err := m.ArtifactGC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + i -= len(m.FromExpression) + copy(dAtA[i:], m.FromExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FromExpression))) + i-- + dAtA[i] = 0x5a + i-- + if m.RecurseMode { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + i -= len(m.SubPath) + copy(dAtA[i:], m.SubPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) + i-- + dAtA[i] = 0x4a + i-- + if m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if m.Archive != nil { + { + size, err := m.Archive.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.GlobalName) + copy(dAtA[i:], m.GlobalName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GlobalName))) + i-- + dAtA[i] = 0x32 + { + size, err := m.ArtifactLocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x22 + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArtifactGC) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactGC) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactGC) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x1a + if m.PodMetadata != nil { + { + size, err := m.PodMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArtifactGCSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactGCSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactGCSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ArtifactsByNode) > 0 { + keysForArtifactsByNode := make([]string, 0, len(m.ArtifactsByNode)) + for k := range m.ArtifactsByNode { + keysForArtifactsByNode = append(keysForArtifactsByNode, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactsByNode) + for iNdEx := len(keysForArtifactsByNode) - 1; iNdEx >= 0; iNdEx-- { + v := m.ArtifactsByNode[string(keysForArtifactsByNode[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForArtifactsByNode[iNdEx]) + copy(dAtA[i:], keysForArtifactsByNode[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForArtifactsByNode[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ArtifactGCStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactGCStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactGCStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ArtifactResultsByNode) > 0 { + keysForArtifactResultsByNode := make([]string, 0, len(m.ArtifactResultsByNode)) + for k := range m.ArtifactResultsByNode { + keysForArtifactResultsByNode = append(keysForArtifactResultsByNode, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactResultsByNode) + for iNdEx := len(keysForArtifactResultsByNode) - 1; iNdEx >= 0; iNdEx-- { + v := m.ArtifactResultsByNode[string(keysForArtifactResultsByNode[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForArtifactResultsByNode[iNdEx]) + copy(dAtA[i:], keysForArtifactResultsByNode[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForArtifactResultsByNode[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ArtifactLocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactLocation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactLocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Azure != nil { + { + size, err := m.Azure.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.GCS != nil { + { + size, err := m.GCS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.OSS != nil { + { + size, err := m.OSS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Raw != nil { + { + size, err := m.Raw.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.HDFS != nil { + { + size, err := m.HDFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Artifactory != nil { + { + size, err := m.Artifactory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.HTTP != nil { + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.S3 != nil { + { + size, err := m.S3.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ArchiveLogs != nil { + i-- + if *m.ArchiveLogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ArtifactNodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactNodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactNodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Artifacts) > 0 { + keysForArtifacts := make([]string, 0, len(m.Artifacts)) + for k := range m.Artifacts { + keysForArtifacts = append(keysForArtifacts, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForArtifacts) + for iNdEx := len(keysForArtifacts) - 1; iNdEx >= 0; iNdEx-- { + v := m.Artifacts[string(keysForArtifacts[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForArtifacts[iNdEx]) + copy(dAtA[i:], keysForArtifacts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForArtifacts[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.ArchiveLocation != nil { + { + size, err := m.ArchiveLocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ArtifactPaths) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactPaths) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Artifact.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArtifactRepository) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactRepository) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Azure != nil { + { + size, err := m.Azure.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.GCS != nil { + { + size, err := m.GCS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.OSS != nil { + { + size, err := m.OSS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.HDFS != nil { + { + size, err := m.HDFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Artifactory != nil { + { + size, err := m.Artifactory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.S3 != nil { + { + size, err := m.S3.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ArchiveLogs != nil { + i-- + if *m.ArchiveLogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ArtifactRepositoryRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactRepositoryRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactRepositoryRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + i -= len(m.ConfigMap) + copy(dAtA[i:], m.ConfigMap) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConfigMap))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArtifactRepositoryRefStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactRepositoryRefStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactRepositoryRefStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ArtifactRepository != nil { + { + size, err := m.ArtifactRepository.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.Default { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ArtifactRepositoryRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArtifactResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + i -= len(*m.Error) + copy(dAtA[i:], *m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) + i-- + dAtA[i] = 0x1a + } + i-- + if m.Success { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArtifactResultNodeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactResultNodeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactResultNodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ArtifactResults) > 0 { + keysForArtifactResults := make([]string, 0, len(m.ArtifactResults)) + for k := range m.ArtifactResults { + keysForArtifactResults = append(keysForArtifactResults, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactResults) + for iNdEx := len(keysForArtifactResults) - 1; iNdEx >= 0; iNdEx-- { + v := m.ArtifactResults[string(keysForArtifactResults[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForArtifactResults[iNdEx]) + copy(dAtA[i:], keysForArtifactResults[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForArtifactResults[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ArtifactSearchQuery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactSearchQuery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactSearchQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NodeTypes) > 0 { + keysForNodeTypes := make([]string, 0, len(m.NodeTypes)) + for k := range m.NodeTypes { + keysForNodeTypes = append(keysForNodeTypes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeTypes) + for iNdEx := len(keysForNodeTypes) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeTypes[NodeType(keysForNodeTypes[iNdEx])] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(keysForNodeTypes[iNdEx]) + copy(dAtA[i:], keysForNodeTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeTypes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.Deleted != nil { + i-- + if *m.Deleted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.NodeId) + copy(dAtA[i:], m.NodeId) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeId))) + i-- + dAtA[i] = 0x22 + i -= len(m.TemplateName) + copy(dAtA[i:], m.TemplateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TemplateName))) + i-- + dAtA[i] = 0x1a + i -= len(m.ArtifactName) + copy(dAtA[i:], m.ArtifactName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ArtifactName))) + i-- + dAtA[i] = 0x12 + if len(m.ArtifactGCStrategies) > 0 { + keysForArtifactGCStrategies := make([]string, 0, len(m.ArtifactGCStrategies)) + for k := range m.ArtifactGCStrategies { + keysForArtifactGCStrategies = append(keysForArtifactGCStrategies, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactGCStrategies) + for iNdEx := len(keysForArtifactGCStrategies) - 1; iNdEx >= 0; iNdEx-- { + v := m.ArtifactGCStrategies[ArtifactGCStrategy(keysForArtifactGCStrategies[iNdEx])] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(keysForArtifactGCStrategies[iNdEx]) + copy(dAtA[i:], keysForArtifactGCStrategies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForArtifactGCStrategies[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ArtifactSearchResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactSearchResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactSearchResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.NodeID) + copy(dAtA[i:], m.NodeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Artifact.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArtifactoryArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactoryArtifact) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactoryArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ArtifactoryAuth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArtifactoryArtifactRepository) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactoryArtifactRepository) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactoryArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.KeyFormat) + copy(dAtA[i:], m.KeyFormat) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyFormat))) + i-- + dAtA[i] = 0x1a + i -= len(m.RepoURL) + copy(dAtA[i:], m.RepoURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RepoURL))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ArtifactoryAuth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArtifactoryAuth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactoryAuth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArtifactoryAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PasswordSecret != nil { + { + size, err := m.PasswordSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.UsernameSecret != nil { + { + size, err := m.UsernameSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AzureArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AzureArtifact) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Blob) + copy(dAtA[i:], m.Blob) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Blob))) + i-- + dAtA[i] = 0x12 + { + size, err := m.AzureBlobContainer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AzureArtifactRepository) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AzureArtifactRepository) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.BlobNameFormat) + copy(dAtA[i:], m.BlobNameFormat) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BlobNameFormat))) + i-- + dAtA[i] = 0x12 + { + size, err := m.AzureBlobContainer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AzureBlobContainer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AzureBlobContainer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureBlobContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.UseSDKCreds { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.AccountKeySecret != nil { + { + size, err := m.AccountKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x12 + i -= len(m.Endpoint) + copy(dAtA[i:], m.Endpoint) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Backoff) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backoff) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backoff) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MaxDuration) + copy(dAtA[i:], m.MaxDuration) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MaxDuration))) + i-- + dAtA[i] = 0x1a + if m.Factor != nil { + { + size, err := m.Factor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Duration) + copy(dAtA[i:], m.Duration) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Duration))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BasicAuth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BasicAuth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BasicAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PasswordSecret != nil { + { + size, err := m.PasswordSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.UsernameSecret != nil { + { + size, err := m.UsernameSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cache) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cache) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Cache) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientCertAuth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientCertAuth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientCertAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClientKeySecret != nil { + { + size, err := m.ClientKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ClientCertSecret != nil { + { + size, err := m.ClientCertSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterWorkflowTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterWorkflowTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Column) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Column) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Column) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Condition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Condition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerNode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerNode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Dependencies) > 0 { + for iNdEx := len(m.Dependencies) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Dependencies[iNdEx]) + copy(dAtA[i:], m.Dependencies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Dependencies[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerSetRetryStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSetRetryStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerSetRetryStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Retries != nil { + { + size, err := m.Retries.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Duration) + copy(dAtA[i:], m.Duration) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Duration))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerSetTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSetTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerSetTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RetryStrategy != nil { + { + size, err := m.RetryStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + return len(dAtA) - i, nil +} + +func (m *ContinueOn) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContinueOn) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContinueOn) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Failed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Error { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Counter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Counter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CreateS3BucketOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateS3BucketOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateS3BucketOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ObjectLocking { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} + +func (m *CronWorkflow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronWorkflow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronWorkflow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CronWorkflowList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronWorkflowList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronWorkflowList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CronWorkflowSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronWorkflowSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronWorkflowSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.When) + copy(dAtA[i:], m.When) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.When))) + i-- + dAtA[i] = 0x62 + if len(m.Schedules) > 0 { + for iNdEx := len(m.Schedules) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Schedules[iNdEx]) + copy(dAtA[i:], m.Schedules[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedules[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if m.StopStrategy != nil { + { + size, err := m.StopStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.WorkflowMetadata != nil { + { + size, err := m.WorkflowMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.Timezone) + copy(dAtA[i:], m.Timezone) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Timezone))) + i-- + dAtA[i] = 0x42 + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 + } + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x28 + } + i-- + if m.Suspend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0x12 + { + size, err := m.WorkflowSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CronWorkflowStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronWorkflowStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronWorkflowStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) + i-- + dAtA[i] = 0x20 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.LastScheduledTime != nil { + { + size, err := m.LastScheduledTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DAGTask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DAGTask) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DAGTask) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Inline != nil { + { + size, err := m.Inline.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if len(m.Hooks) > 0 { + keysForHooks := make([]string, 0, len(m.Hooks)) + for k := range m.Hooks { + keysForHooks = append(keysForHooks, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) + for iNdEx := len(keysForHooks) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hooks[LifecycleEvent(keysForHooks[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHooks[iNdEx]) + copy(dAtA[i:], keysForHooks[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHooks[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + i -= len(m.Depends) + copy(dAtA[i:], m.Depends) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Depends))) + i-- + dAtA[i] = 0x62 + i -= len(m.OnExit) + copy(dAtA[i:], m.OnExit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OnExit))) + i-- + dAtA[i] = 0x5a + if m.ContinueOn != nil { + { + size, err := m.ContinueOn.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i -= len(m.When) + copy(dAtA[i:], m.When) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.When))) + i-- + dAtA[i] = 0x4a + if m.WithSequence != nil { + { + size, err := m.WithSequence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i -= len(m.WithParam) + copy(dAtA[i:], m.WithParam) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WithParam))) + i-- + dAtA[i] = 0x3a + if len(m.WithItems) > 0 { + for iNdEx := len(m.WithItems) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.WithItems[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.Dependencies) > 0 { + for iNdEx := len(m.Dependencies) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Dependencies[iNdEx]) + copy(dAtA[i:], m.Dependencies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Dependencies[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.TemplateRef != nil { + { + size, err := m.TemplateRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.Arguments.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Template) + copy(dAtA[i:], m.Template) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DAGTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DAGTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DAGTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailFast != nil { + i-- + if *m.FailFast { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Tasks) > 0 { + for iNdEx := len(m.Tasks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tasks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Target) + copy(dAtA[i:], m.Target) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Target))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Data) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Data) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Transformation) > 0 { + for iNdEx := len(m.Transformation) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Transformation[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DataSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ArtifactPaths != nil { + { + size, err := m.ArtifactPaths.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Selector) + copy(dAtA[i:], m.Selector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExecutorConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecutorConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecutorConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GCSArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GCSArtifact) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GCSArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + { + size, err := m.GCSBucket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GCSArtifactRepository) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GCSArtifactRepository) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GCSArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.KeyFormat) + copy(dAtA[i:], m.KeyFormat) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyFormat))) + i-- + dAtA[i] = 0x12 + { + size, err := m.GCSBucket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GCSBucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GCSBucket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GCSBucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccountKeySecret != nil { + { + size, err := m.ServiceAccountKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Bucket) + copy(dAtA[i:], m.Bucket) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bucket))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Gauge) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Gauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x1a + if m.Realtime != nil { + i-- + if *m.Realtime { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitArtifact) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.InsecureSkipTLS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + i -= len(m.Branch) + copy(dAtA[i:], m.Branch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Branch))) + i-- + dAtA[i] = 0x5a + i-- + if m.SingleBranch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + i-- + if m.DisableSubmodules { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i-- + if m.InsecureIgnoreHostKey { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if m.SSHPrivateKeySecret != nil { + { + size, err := m.SSHPrivateKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.PasswordSecret != nil { + { + size, err := m.PasswordSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.UsernameSecret != nil { + { + size, err := m.UsernameSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Fetch) > 0 { + for iNdEx := len(m.Fetch) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Fetch[iNdEx]) + copy(dAtA[i:], m.Fetch[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Fetch[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.Depth != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Depth)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x12 + i -= len(m.Repo) + copy(dAtA[i:], m.Repo) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repo))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HDFSArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HDFSArtifact) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HDFSArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + { + size, err := m.HDFSConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HDFSArtifactRepository) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HDFSArtifactRepository) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HDFSArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.PathFormat) + copy(dAtA[i:], m.PathFormat) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathFormat))) + i-- + dAtA[i] = 0x12 + { + size, err := m.HDFSConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HDFSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HDFSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HDFSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DataTransferProtection) + copy(dAtA[i:], m.DataTransferProtection) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataTransferProtection))) + i-- + dAtA[i] = 0x22 + i -= len(m.HDFSUser) + copy(dAtA[i:], m.HDFSUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HDFSUser))) + i-- + dAtA[i] = 0x1a + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.HDFSKrbConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HDFSKrbConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HDFSKrbConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HDFSKrbConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.KrbServicePrincipalName) + copy(dAtA[i:], m.KrbServicePrincipalName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbServicePrincipalName))) + i-- + dAtA[i] = 0x32 + if m.KrbConfigConfigMap != nil { + { + size, err := m.KrbConfigConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.KrbRealm) + copy(dAtA[i:], m.KrbRealm) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbRealm))) + i-- + dAtA[i] = 0x22 + i -= len(m.KrbUsername) + copy(dAtA[i:], m.KrbUsername) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbUsername))) + i-- + dAtA[i] = 0x1a + if m.KrbKeytabSecret != nil { + { + size, err := m.KrbKeytabSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.KrbCCacheSecret != nil { + { + size, err := m.KrbCCacheSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HTTP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BodyFrom != nil { + { + size, err := m.BodyFrom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i-- + if m.InsecureSkipVerify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + i -= len(m.SuccessCondition) + copy(dAtA[i:], m.SuccessCondition) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SuccessCondition))) + i-- + dAtA[i] = 0x32 + i -= len(m.Body) + copy(dAtA[i:], m.Body) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Body))) + i-- + dAtA[i] = 0x2a + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x20 + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0x12 + i -= len(m.Method) + copy(dAtA[i:], m.Method) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Method))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPArtifact) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Auth != nil { + { + size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPAuth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPAuth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.BasicAuth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.OAuth2.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ClientCert.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPBodySource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPBodySource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPBodySource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Bytes != nil { + i -= len(m.Bytes) + copy(dAtA[i:], m.Bytes) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bytes))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValueFrom != nil { + { + size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPHeaderSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPHeaderSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPHeaderSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretKeyRef != nil { + { + size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Header) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Buckets) > 0 { + for iNdEx := len(m.Buckets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Buckets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} + +func (m *Inputs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Inputs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Inputs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Artifacts) > 0 { + for iNdEx := len(m.Artifacts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Artifacts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Item) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Item) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Item) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LabelKeys) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelKeys) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelKeys) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Items[iNdEx]) + copy(dAtA[i:], m.Items[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Items[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelValueFrom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValueFrom) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValueFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LabelValues) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValues) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValues) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Items[iNdEx]) + copy(dAtA[i:], m.Items[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Items[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LifecycleHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LifecycleHook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LifecycleHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x22 + if m.TemplateRef != nil { + { + size, err := m.TemplateRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Arguments.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Template) + copy(dAtA[i:], m.Template) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Link) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Link) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0x1a + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ManifestFrom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManifestFrom) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManifestFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Artifact != nil { + { + size, err := m.Artifact.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MemoizationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoizationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoizationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CacheName) + copy(dAtA[i:], m.CacheName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CacheName))) + i-- + dAtA[i] = 0x1a + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + i-- + if m.Hit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Memoize) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Memoize) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Memoize) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MaxAge) + copy(dAtA[i:], m.MaxAge) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MaxAge))) + i-- + dAtA[i] = 0x1a + if m.Cache != nil { + { + size, err := m.Cache.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MetricLabel) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricLabel) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricLabel) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Metrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Prometheus) > 0 { + for iNdEx := len(m.Prometheus) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Prometheus[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Mutex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mutex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mutex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutexHolding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutexHolding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutexHolding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Holder) + copy(dAtA[i:], m.Holder) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Holder))) + i-- + dAtA[i] = 0x12 + i -= len(m.Mutex) + copy(dAtA[i:], m.Mutex) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Mutex))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutexStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutexStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Waiting) > 0 { + for iNdEx := len(m.Waiting) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Waiting[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Holding) > 0 { + for iNdEx := len(m.Holding) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Holding[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodeFlag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeFlag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeFlag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Retried { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Hooked { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NodeResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Progress) + copy(dAtA[i:], m.Progress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Progress))) + i-- + dAtA[i] = 0x22 + if m.Outputs != nil { + { + size, err := m.Outputs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeFlag != nil { + { + size, err := m.NodeFlag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + i -= len(m.Progress) + copy(dAtA[i:], m.Progress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Progress))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + if m.SynchronizationStatus != nil { + { + size, err := m.SynchronizationStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + i = encodeVarintGenerated(dAtA, i, uint64(m.EstimatedDuration)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + if m.MemoizationStatus != nil { + { + size, err := m.MemoizationStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + i -= len(m.HostNodeName) + copy(dAtA[i:], m.HostNodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostNodeName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + if len(m.ResourcesDuration) > 0 { + keysForResourcesDuration := make([]string, 0, len(m.ResourcesDuration)) + for k := range m.ResourcesDuration { + keysForResourcesDuration = append(keysForResourcesDuration, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForResourcesDuration) + for iNdEx := len(keysForResourcesDuration) - 1; iNdEx >= 0; iNdEx-- { + v := m.ResourcesDuration[k8s_io_api_core_v1.ResourceName(keysForResourcesDuration[iNdEx])] + baseI := i + i = encodeVarintGenerated(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(keysForResourcesDuration[iNdEx]) + copy(dAtA[i:], keysForResourcesDuration[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForResourcesDuration[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + i -= len(m.TemplateScope) + copy(dAtA[i:], m.TemplateScope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TemplateScope))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.OutboundNodes) > 0 { + for iNdEx := len(m.OutboundNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OutboundNodes[iNdEx]) + copy(dAtA[i:], m.OutboundNodes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OutboundNodes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if len(m.Children) > 0 { + for iNdEx := len(m.Children) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Children[iNdEx]) + copy(dAtA[i:], m.Children[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Children[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.Outputs != nil { + { + size, err := m.Outputs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.Inputs != nil { + { + size, err := m.Inputs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.Daemoned != nil { + i-- + if *m.Daemoned { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + } + i -= len(m.PodIP) + copy(dAtA[i:], m.PodIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) + i-- + dAtA[i] = 0x62 + { + size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x4a + i -= len(m.BoundaryID) + copy(dAtA[i:], m.BoundaryID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BoundaryID))) + i-- + dAtA[i] = 0x42 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0x3a + if m.TemplateRef != nil { + { + size, err := m.TemplateRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + i -= len(m.TemplateName) + copy(dAtA[i:], m.TemplateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TemplateName))) + i-- + dAtA[i] = 0x2a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x22 + i -= len(m.DisplayName) + copy(dAtA[i:], m.DisplayName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeSynchronizationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSynchronizationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSynchronizationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Waiting) + copy(dAtA[i:], m.Waiting) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Waiting))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NoneStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NoneStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NoneStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *OAuth2Auth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuth2Auth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuth2Auth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EndpointParams) > 0 { + for iNdEx := len(m.EndpointParams) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EndpointParams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.TokenURLSecret != nil { + { + size, err := m.TokenURLSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ClientSecretSecret != nil { + { + size, err := m.ClientSecretSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ClientIDSecret != nil { + { + size, err := m.ClientIDSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OAuth2EndpointParam) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuth2EndpointParam) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuth2EndpointParam) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OSSArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OSSArtifact) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OSSArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + { + size, err := m.OSSBucket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OSSArtifactRepository) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OSSArtifactRepository) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OSSArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.KeyFormat) + copy(dAtA[i:], m.KeyFormat) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyFormat))) + i-- + dAtA[i] = 0x12 + { + size, err := m.OSSBucket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OSSBucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OSSBucket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OSSBucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.UseSDKCreds { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if m.LifecycleRule != nil { + { + size, err := m.LifecycleRule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.SecurityToken) + copy(dAtA[i:], m.SecurityToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecurityToken))) + i-- + dAtA[i] = 0x32 + i-- + if m.CreateBucketIfNotPresent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.SecretKeySecret != nil { + { + size, err := m.SecretKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AccessKeySecret != nil { + { + size, err := m.AccessKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Bucket) + copy(dAtA[i:], m.Bucket) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bucket))) + i-- + dAtA[i] = 0x12 + i -= len(m.Endpoint) + copy(dAtA[i:], m.Endpoint) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OSSLifecycleRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OSSLifecycleRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OSSLifecycleRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.MarkDeletionAfterDays)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.MarkInfrequentAccessAfterDays)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Object) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Object) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Outputs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Outputs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Outputs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExitCode != nil { + i -= len(*m.ExitCode) + copy(dAtA[i:], *m.ExitCode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExitCode))) + i-- + dAtA[i] = 0x22 + } + if m.Result != nil { + i -= len(*m.Result) + copy(dAtA[i:], *m.Result) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Result))) + i-- + dAtA[i] = 0x1a + } + if len(m.Artifacts) > 0 { + for iNdEx := len(m.Artifacts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Artifacts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ParallelSteps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParallelSteps) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParallelSteps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Steps) > 0 { + for iNdEx := len(m.Steps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Steps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Parameter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Parameter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Parameter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Description != nil { + i -= len(*m.Description) + copy(dAtA[i:], *m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Description))) + i-- + dAtA[i] = 0x3a + } + if len(m.Enum) > 0 { + for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Enum[iNdEx]) + copy(dAtA[i:], m.Enum[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Enum[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.GlobalName) + copy(dAtA[i:], m.GlobalName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GlobalName))) + i-- + dAtA[i] = 0x2a + if m.ValueFrom != nil { + { + size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Value != nil { + i -= len(*m.Value) + copy(dAtA[i:], *m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i-- + dAtA[i] = 0x1a + } + if m.Default != nil { + i -= len(*m.Default) + copy(dAtA[i:], *m.Default) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Default))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Plugin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Plugin) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Plugin) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodGC) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodGC) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodGC) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DeleteDelayDuration) + copy(dAtA[i:], m.DeleteDelayDuration) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeleteDelayDuration))) + i-- + dAtA[i] = 0x1a + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Prometheus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Prometheus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Prometheus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Counter != nil { + { + size, err := m.Counter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Histogram != nil { + { + size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Gauge != nil { + { + size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.When) + copy(dAtA[i:], m.When) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.When))) + i-- + dAtA[i] = 0x22 + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x1a + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RawArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawArtifact) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RawArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ManifestFrom != nil { + { + size, err := m.ManifestFrom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Flags) > 0 { + for iNdEx := len(m.Flags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Flags[iNdEx]) + copy(dAtA[i:], m.Flags[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Flags[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.FailureCondition) + copy(dAtA[i:], m.FailureCondition) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailureCondition))) + i-- + dAtA[i] = 0x32 + i -= len(m.SuccessCondition) + copy(dAtA[i:], m.SuccessCondition) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SuccessCondition))) + i-- + dAtA[i] = 0x2a + i-- + if m.SetOwnerReference { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.Manifest) + copy(dAtA[i:], m.Manifest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manifest))) + i-- + dAtA[i] = 0x1a + i -= len(m.MergeStrategy) + copy(dAtA[i:], m.MergeStrategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MergeStrategy))) + i-- + dAtA[i] = 0x12 + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RetryAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RetryAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeAntiAffinity != nil { + { + size, err := m.NodeAntiAffinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryNodeAntiAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryNodeAntiAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RetryNodeAntiAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RetryStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RetryStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x2a + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Backoff != nil { + { + size, err := m.Backoff.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.RetryPolicy) + copy(dAtA[i:], m.RetryPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RetryPolicy))) + i-- + dAtA[i] = 0x12 + if m.Limit != nil { + { + size, err := m.Limit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *S3Artifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *S3Artifact) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *S3Artifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + { + size, err := m.S3Bucket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *S3ArtifactRepository) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *S3ArtifactRepository) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *S3ArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.KeyPrefix) + copy(dAtA[i:], m.KeyPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyPrefix))) + i-- + dAtA[i] = 0x1a + i -= len(m.KeyFormat) + copy(dAtA[i:], m.KeyFormat) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyFormat))) + i-- + dAtA[i] = 0x12 + { + size, err := m.S3Bucket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *S3Bucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *S3Bucket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *S3Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SessionTokenSecret != nil { + { + size, err := m.SessionTokenSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.CASecret != nil { + { + size, err := m.CASecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.EncryptionOptions != nil { + { + size, err := m.EncryptionOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.CreateBucketIfNotPresent != nil { + { + size, err := m.CreateBucketIfNotPresent.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i-- + if m.UseSDKCreds { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + i -= len(m.RoleARN) + copy(dAtA[i:], m.RoleARN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleARN))) + i-- + dAtA[i] = 0x3a + if m.SecretKeySecret != nil { + { + size, err := m.SecretKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.AccessKeySecret != nil { + { + size, err := m.AccessKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Insecure != nil { + i-- + if *m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0x1a + i -= len(m.Bucket) + copy(dAtA[i:], m.Bucket) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bucket))) + i-- + dAtA[i] = 0x12 + i -= len(m.Endpoint) + copy(dAtA[i:], m.Endpoint) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *S3EncryptionOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *S3EncryptionOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *S3EncryptionOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServerSideCustomerKeySecret != nil { + { + size, err := m.ServerSideCustomerKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.EnableEncryption { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.KmsEncryptionContext) + copy(dAtA[i:], m.KmsEncryptionContext) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KmsEncryptionContext))) + i-- + dAtA[i] = 0x12 + i -= len(m.KmsKeyId) + copy(dAtA[i:], m.KmsKeyId) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KmsKeyId))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScriptTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScriptTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScriptTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SemaphoreHolding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SemaphoreHolding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SemaphoreHolding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Holders) > 0 { + for iNdEx := len(m.Holders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Holders[iNdEx]) + copy(dAtA[i:], m.Holders[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Holders[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Semaphore) + copy(dAtA[i:], m.Semaphore) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Semaphore))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SemaphoreRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SemaphoreRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SemaphoreRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + if m.ConfigMapKeyRef != nil { + { + size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SemaphoreStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SemaphoreStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SemaphoreStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Waiting) > 0 { + for iNdEx := len(m.Waiting) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Waiting[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Holding) > 0 { + for iNdEx := len(m.Holding) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Holding[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Sequence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sequence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sequence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x22 + if m.End != nil { + { + size, err := m.End.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Start != nil { + { + size, err := m.Start.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Count != nil { + { + size, err := m.Count.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StopStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Submit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Submit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Submit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Arguments != nil { + { + size, err := m.Arguments.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.WorkflowTemplateRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubmitOpts) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubmitOpts) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubmitOpts) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x70 + } + i -= len(m.PodPriorityClassName) + copy(dAtA[i:], m.PodPriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodPriorityClassName))) + i-- + dAtA[i] = 0x6a + i -= len(m.Annotations) + copy(dAtA[i:], m.Annotations) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Annotations))) + i-- + dAtA[i] = 0x62 + if m.OwnerReference != nil { + { + size, err := m.OwnerReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + i -= len(m.Labels) + copy(dAtA[i:], m.Labels) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Labels))) + i-- + dAtA[i] = 0x52 + i-- + if m.ServerDryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + i -= len(m.ServiceAccount) + copy(dAtA[i:], m.ServiceAccount) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccount))) + i-- + dAtA[i] = 0x3a + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Parameters[iNdEx]) + copy(dAtA[i:], m.Parameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Parameters[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.Entrypoint) + copy(dAtA[i:], m.Entrypoint) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Entrypoint))) + i-- + dAtA[i] = 0x22 + i -= len(m.GenerateName) + copy(dAtA[i:], m.GenerateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SuppliedValueFrom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SuppliedValueFrom) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SuppliedValueFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *SuspendTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SuspendTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SuspendTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Duration) + copy(dAtA[i:], m.Duration) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Duration))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Synchronization) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Synchronization) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Synchronization) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Mutexes) > 0 { + for iNdEx := len(m.Mutexes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mutexes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Semaphores) > 0 { + for iNdEx := len(m.Semaphores) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Semaphores[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Mutex != nil { + { + size, err := m.Mutex.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Semaphore != nil { + { + size, err := m.Semaphore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SynchronizationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SynchronizationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SynchronizationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Mutex != nil { + { + size, err := m.Mutex.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Semaphore != nil { + { + size, err := m.Semaphore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TTLStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TTLStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TTLStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecondsAfterFailure != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SecondsAfterFailure)) + i-- + dAtA[i] = 0x18 + } + if m.SecondsAfterSuccess != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SecondsAfterSuccess)) + i-- + dAtA[i] = 0x10 + } + if m.SecondsAfterCompletion != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SecondsAfterCompletion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TarStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TarStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TarStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CompressionLevel != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CompressionLevel)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Template) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Template) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Plugin != nil { + { + size, err := m.Plugin.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + if m.HTTP != nil { + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if m.FailFast != nil { + i-- + if *m.FailFast { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc8 + } + if m.ContainerSet != nil { + { + size, err := m.ContainerSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + if m.Data != nil { + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + i -= len(m.Timeout) + copy(dAtA[i:], m.Timeout) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Timeout))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + if m.Memoize != nil { + { + size, err := m.Memoize.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + if m.Synchronization != nil { + { + size, err := m.Synchronization.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.Metrics != nil { + { + size, err := m.Metrics.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.Executor != nil { + { + size, err := m.Executor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + i -= len(m.PodSpecPatch) + copy(dAtA[i:], m.PodSpecPatch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodSpecPatch))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if len(m.HostAliases) > 0 { + for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + } + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + i -= len(m.PriorityClassName) + copy(dAtA[i:], m.PriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + i -= len(m.SchedulerName) + copy(dAtA[i:], m.SchedulerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + } + if m.Parallelism != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.RetryStrategy != nil { + { + size, err := m.RetryStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.ActiveDeadlineSeconds != nil { + { + size, err := m.ActiveDeadlineSeconds.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.ArchiveLocation != nil { + { + size, err := m.ArchiveLocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if len(m.Sidecars) > 0 { + for iNdEx := len(m.Sidecars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sidecars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.InitContainers) > 0 { + for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if m.Suspend != nil { + { + size, err := m.Suspend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.DAG != nil { + { + size, err := m.DAG.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.Script != nil { + { + size, err := m.Script.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if m.Container != nil { + { + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if len(m.Steps) > 0 { + for iNdEx := len(m.Steps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Steps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if m.Daemon != nil { + i-- + if *m.Daemon { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + { + size, err := m.Outputs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Inputs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.Template) + copy(dAtA[i:], m.Template) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TransformationStep) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransformationStep) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TransformationStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserContainer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserContainer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MirrorVolumeMounts != nil { + i-- + if *m.MirrorVolumeMounts { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValueFrom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueFrom) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValueFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConfigMapKeyRef != nil { + { + size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x42 + i -= len(m.Event) + copy(dAtA[i:], m.Event) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Event))) + i-- + dAtA[i] = 0x3a + if m.Supplied != nil { + { + size, err := m.Supplied.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Default != nil { + i -= len(*m.Default) + copy(dAtA[i:], *m.Default) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Default))) + i-- + dAtA[i] = 0x2a + } + i -= len(m.Parameter) + copy(dAtA[i:], m.Parameter) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Parameter))) + i-- + dAtA[i] = 0x22 + i -= len(m.JQFilter) + copy(dAtA[i:], m.JQFilter) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JQFilter))) + i-- + dAtA[i] = 0x1a + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Version) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Version) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Platform) + copy(dAtA[i:], m.Platform) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Platform))) + i-- + dAtA[i] = 0x42 + i -= len(m.Compiler) + copy(dAtA[i:], m.Compiler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Compiler))) + i-- + dAtA[i] = 0x3a + i -= len(m.GoVersion) + copy(dAtA[i:], m.GoVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GoVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.GitTreeState) + copy(dAtA[i:], m.GitTreeState) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GitTreeState))) + i-- + dAtA[i] = 0x2a + i -= len(m.GitTag) + copy(dAtA[i:], m.GitTag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GitTag))) + i-- + dAtA[i] = 0x22 + i -= len(m.GitCommit) + copy(dAtA[i:], m.GitCommit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GitCommit))) + i-- + dAtA[i] = 0x1a + i -= len(m.BuildDate) + copy(dAtA[i:], m.BuildDate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BuildDate))) + i-- + dAtA[i] = 0x12 + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeClaimGC) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeClaimGC) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeClaimGC) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Workflow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Workflow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Workflow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowArtifactGCTask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowArtifactGCTask) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowArtifactGCTask) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowArtifactGCTaskList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowArtifactGCTaskList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowArtifactGCTaskList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowEventBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowEventBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowEventBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowEventBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowEventBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowEventBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowEventBindingSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowEventBindingSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowEventBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Submit != nil { + { + size, err := m.Submit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowLevelArtifactGC) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowLevelArtifactGC) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowLevelArtifactGC) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PodSpecPatch) + copy(dAtA[i:], m.PodSpecPatch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodSpecPatch))) + i-- + dAtA[i] = 0x1a + i-- + if m.ForceFinalizerRemoval { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + { + size, err := m.ArtifactGC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelsFrom) > 0 { + keysForLabelsFrom := make([]string, 0, len(m.LabelsFrom)) + for k := range m.LabelsFrom { + keysForLabelsFrom = append(keysForLabelsFrom, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabelsFrom) + for iNdEx := len(keysForLabelsFrom) - 1; iNdEx >= 0; iNdEx-- { + v := m.LabelsFrom[string(keysForLabelsFrom[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLabelsFrom[iNdEx]) + copy(dAtA[i:], keysForLabelsFrom[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabelsFrom[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WorkflowSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ArtifactGC != nil { + { + size, err := m.ArtifactGC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + if m.WorkflowMetadata != nil { + { + size, err := m.WorkflowMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if len(m.Hooks) > 0 { + keysForHooks := make([]string, 0, len(m.Hooks)) + for k := range m.Hooks { + keysForHooks = append(keysForHooks, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) + for iNdEx := len(keysForHooks) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hooks[LifecycleEvent(keysForHooks[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHooks[iNdEx]) + copy(dAtA[i:], keysForHooks[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHooks[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + } + if m.ArchiveLogs != nil { + i-- + if *m.ArchiveLogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + } + if m.TemplateDefaults != nil { + { + size, err := m.TemplateDefaults.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + if m.PodMetadata != nil { + { + size, err := m.PodMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + if m.RetryStrategy != nil { + { + size, err := m.RetryStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + if m.VolumeClaimGC != nil { + { + size, err := m.VolumeClaimGC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.Synchronization != nil { + { + size, err := m.Synchronization.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.WorkflowTemplateRef != nil { + { + size, err := m.WorkflowTemplateRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + i -= len(m.Shutdown) + copy(dAtA[i:], m.Shutdown) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Shutdown))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + if m.Metrics != nil { + { + size, err := m.Metrics.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if m.PodDisruptionBudget != nil { + { + size, err := m.PodDisruptionBudget.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if m.TTLStrategy != nil { + { + size, err := m.TTLStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if m.Executor != nil { + { + size, err := m.Executor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + i -= len(m.PodSpecPatch) + copy(dAtA[i:], m.PodSpecPatch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodSpecPatch))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if len(m.HostAliases) > 0 { + for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.PodPriority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.PodPriority)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + i -= len(m.PodPriorityClassName) + copy(dAtA[i:], m.PodPriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodPriorityClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + if m.PodGC != nil { + { + size, err := m.PodGC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + i -= len(m.SchedulerName) + copy(dAtA[i:], m.SchedulerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + i -= len(m.OnExit) + copy(dAtA[i:], m.OnExit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OnExit))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + if m.DNSConfig != nil { + { + size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.DNSPolicy != nil { + i -= len(*m.DNSPolicy) + copy(dAtA[i:], *m.DNSPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DNSPolicy))) + i-- + dAtA[i] = 0x7a + } + if m.HostNetwork != nil { + i-- + if *m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if m.Suspend != nil { + i-- + if *m.Suspend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.ArtifactRepositoryRef != nil { + { + size, err := m.ArtifactRepositoryRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Parallelism != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) + i-- + dAtA[i] = 0x38 + } + if len(m.VolumeClaimTemplates) > 0 { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Arguments.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Entrypoint) + copy(dAtA[i:], m.Entrypoint) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Entrypoint))) + i-- + dAtA[i] = 0x12 + if len(m.Templates) > 0 { + for iNdEx := len(m.Templates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Templates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TaskResultsCompletionStatus) > 0 { + keysForTaskResultsCompletionStatus := make([]string, 0, len(m.TaskResultsCompletionStatus)) + for k := range m.TaskResultsCompletionStatus { + keysForTaskResultsCompletionStatus = append(keysForTaskResultsCompletionStatus, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTaskResultsCompletionStatus) + for iNdEx := len(keysForTaskResultsCompletionStatus) - 1; iNdEx >= 0; iNdEx-- { + v := m.TaskResultsCompletionStatus[string(keysForTaskResultsCompletionStatus[iNdEx])] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(keysForTaskResultsCompletionStatus[iNdEx]) + copy(dAtA[i:], keysForTaskResultsCompletionStatus[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTaskResultsCompletionStatus[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if m.ArtifactGCStatus != nil { + { + size, err := m.ArtifactGCStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.ArtifactRepositoryRef != nil { + { + size, err := m.ArtifactRepositoryRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + i -= len(m.Progress) + copy(dAtA[i:], m.Progress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Progress))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + i = encodeVarintGenerated(dAtA, i, uint64(m.EstimatedDuration)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.Synchronization != nil { + { + size, err := m.Synchronization.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.StoredWorkflowSpec != nil { + { + size, err := m.StoredWorkflowSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + if len(m.ResourcesDuration) > 0 { + keysForResourcesDuration := make([]string, 0, len(m.ResourcesDuration)) + for k := range m.ResourcesDuration { + keysForResourcesDuration = append(keysForResourcesDuration, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForResourcesDuration) + for iNdEx := len(keysForResourcesDuration) - 1; iNdEx >= 0; iNdEx-- { + v := m.ResourcesDuration[k8s_io_api_core_v1.ResourceName(keysForResourcesDuration[iNdEx])] + baseI := i + i = encodeVarintGenerated(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(keysForResourcesDuration[iNdEx]) + copy(dAtA[i:], keysForResourcesDuration[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForResourcesDuration[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + i -= len(m.OffloadNodeStatusVersion) + copy(dAtA[i:], m.OffloadNodeStatusVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OffloadNodeStatusVersion))) + i-- + dAtA[i] = 0x52 + if len(m.StoredTemplates) > 0 { + keysForStoredTemplates := make([]string, 0, len(m.StoredTemplates)) + for k := range m.StoredTemplates { + keysForStoredTemplates = append(keysForStoredTemplates, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStoredTemplates) + for iNdEx := len(keysForStoredTemplates) - 1; iNdEx >= 0; iNdEx-- { + v := m.StoredTemplates[string(keysForStoredTemplates[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForStoredTemplates[iNdEx]) + copy(dAtA[i:], keysForStoredTemplates[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStoredTemplates[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x4a + } + } + if m.Outputs != nil { + { + size, err := m.Outputs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.PersistentVolumeClaims) > 0 { + for iNdEx := len(m.PersistentVolumeClaims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PersistentVolumeClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.Nodes) > 0 { + keysForNodes := make([]string, 0, len(m.Nodes)) + for k := range m.Nodes { + keysForNodes = append(keysForNodes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodes) + for iNdEx := len(keysForNodes) - 1; iNdEx >= 0; iNdEx-- { + v := m.Nodes[string(keysForNodes[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForNodes[iNdEx]) + copy(dAtA[i:], keysForNodes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.CompressedNodes) + copy(dAtA[i:], m.CompressedNodes) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CompressedNodes))) + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + { + size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowStep) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStep) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Inline != nil { + { + size, err := m.Inline.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if len(m.Hooks) > 0 { + keysForHooks := make([]string, 0, len(m.Hooks)) + for k := range m.Hooks { + keysForHooks = append(keysForHooks, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) + for iNdEx := len(keysForHooks) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hooks[LifecycleEvent(keysForHooks[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHooks[iNdEx]) + copy(dAtA[i:], keysForHooks[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHooks[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + i -= len(m.OnExit) + copy(dAtA[i:], m.OnExit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OnExit))) + i-- + dAtA[i] = 0x5a + if m.ContinueOn != nil { + { + size, err := m.ContinueOn.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.When) + copy(dAtA[i:], m.When) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.When))) + i-- + dAtA[i] = 0x42 + if m.WithSequence != nil { + { + size, err := m.WithSequence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.WithParam) + copy(dAtA[i:], m.WithParam) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WithParam))) + i-- + dAtA[i] = 0x32 + if len(m.WithItems) > 0 { + for iNdEx := len(m.WithItems) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.WithItems[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.TemplateRef != nil { + { + size, err := m.TemplateRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.Arguments.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Template) + copy(dAtA[i:], m.Template) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowTaskResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTaskResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTaskResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.NodeResult.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowTaskResultList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTaskResultList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTaskResultList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowTaskSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTaskSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTaskSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowTaskSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTaskSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTaskSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowTaskSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTaskSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTaskSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + keysForTasks := make([]string, 0, len(m.Tasks)) + for k := range m.Tasks { + keysForTasks = append(keysForTasks, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTasks) + for iNdEx := len(keysForTasks) - 1; iNdEx >= 0; iNdEx-- { + v := m.Tasks[string(keysForTasks[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForTasks[iNdEx]) + copy(dAtA[i:], keysForTasks[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTasks[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WorkflowTaskSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTaskSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTaskSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + keysForNodes := make([]string, 0, len(m.Nodes)) + for k := range m.Nodes { + keysForNodes = append(keysForNodes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodes) + for iNdEx := len(keysForNodes) - 1; iNdEx >= 0; iNdEx-- { + v := m.Nodes[string(keysForNodes[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForNodes[iNdEx]) + copy(dAtA[i:], keysForNodes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WorkflowTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WorkflowTemplateRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplateRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTemplateRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ZipStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ZipStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ZipStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Amount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ArchiveStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tar != nil { + l = m.Tar.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.None != nil { + l = m.None.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Zip != nil { + l = m.Zip.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Arguments) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Artifacts) > 0 { + for _, e := range m.Artifacts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ArtGCStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StrategiesProcessed) > 0 { + for k, v := range m.StrategiesProcessed { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.PodsRecouped) > 0 { + for k, v := range m.PodsRecouped { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + n += 2 + return n +} + +func (m *Artifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + l = len(m.From) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ArtifactLocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GlobalName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Archive != nil { + l = m.Archive.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.SubPath) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.FromExpression) + n += 1 + l + sovGenerated(uint64(l)) + if m.ArtifactGC != nil { + l = m.ArtifactGC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *ArtifactGC) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + if m.PodMetadata != nil { + l = m.PodMetadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ArtifactGCSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ArtifactsByNode) > 0 { + for k, v := range m.ArtifactsByNode { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ArtifactGCStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ArtifactResultsByNode) > 0 { + for k, v := range m.ArtifactResultsByNode { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ArtifactLocation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ArchiveLogs != nil { + n += 2 + } + if m.S3 != nil { + l = m.S3.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Artifactory != nil { + l = m.Artifactory.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HDFS != nil { + l = m.HDFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Raw != nil { + l = m.Raw.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OSS != nil { + l = m.OSS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCS != nil { + l = m.GCS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Azure != nil { + l = m.Azure.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ArtifactNodeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ArchiveLocation != nil { + l = m.ArchiveLocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Artifacts) > 0 { + for k, v := range m.Artifacts { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ArtifactPaths) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Artifact.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ArtifactRepository) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ArchiveLogs != nil { + n += 2 + } + if m.S3 != nil { + l = m.S3.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Artifactory != nil { + l = m.Artifactory.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HDFS != nil { + l = m.HDFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OSS != nil { + l = m.OSS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCS != nil { + l = m.GCS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Azure != nil { + l = m.Azure.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ArtifactRepositoryRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConfigMap) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ArtifactRepositoryRefStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ArtifactRepositoryRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.ArtifactRepository != nil { + l = m.ArtifactRepository.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ArtifactResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ArtifactResultNodeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ArtifactResults) > 0 { + for k, v := range m.ArtifactResults { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ArtifactSearchQuery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ArtifactGCStrategies) > 0 { + for k, v := range m.ArtifactGCStrategies { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ArtifactName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TemplateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeId) + n += 1 + l + sovGenerated(uint64(l)) + if m.Deleted != nil { + n += 2 + } + if len(m.NodeTypes) > 0 { + for k, v := range m.NodeTypes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ArtifactSearchResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Artifact.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ArtifactoryArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ArtifactoryAuth.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ArtifactoryArtifactRepository) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ArtifactoryAuth.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RepoURL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KeyFormat) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ArtifactoryAuth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UsernameSecret != nil { + l = m.UsernameSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PasswordSecret != nil { + l = m.PasswordSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AzureArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.AzureBlobContainer.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Blob) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AzureArtifactRepository) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.AzureBlobContainer.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BlobNameFormat) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AzureBlobContainer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Endpoint) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if m.AccountKeySecret != nil { + l = m.AccountKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *Backoff) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Duration) + n += 1 + l + sovGenerated(uint64(l)) + if m.Factor != nil { + l = m.Factor.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MaxDuration) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BasicAuth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UsernameSecret != nil { + l = m.UsernameSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PasswordSecret != nil { + l = m.PasswordSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Cache) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClientCertAuth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientCertSecret != nil { + l = m.ClientCertSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ClientKeySecret != nil { + l = m.ClientKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterWorkflowTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterWorkflowTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Column) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Condition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerNode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Dependencies) > 0 { + for _, s := range m.Dependencies { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ContainerSetRetryStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Duration) + n += 1 + l + sovGenerated(uint64(l)) + if m.Retries != nil { + l = m.Retries.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContainerSetTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.RetryStrategy != nil { + l = m.RetryStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContinueOn) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *Counter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CreateS3BucketOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *CronWorkflow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CronWorkflowList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CronWorkflowSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.WorkflowSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ConcurrencyPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.StartingDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) + } + if m.SuccessfulJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) + } + l = len(m.Timezone) + n += 1 + l + sovGenerated(uint64(l)) + if m.WorkflowMetadata != nil { + l = m.WorkflowMetadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StopStrategy != nil { + l = m.StopStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Schedules) > 0 { + for _, s := range m.Schedules { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.When) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CronWorkflowStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Active) > 0 { + for _, e := range m.Active { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LastScheduledTime != nil { + l = m.LastScheduledTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.Succeeded)) + n += 1 + sovGenerated(uint64(m.Failed)) + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DAGTask) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Template) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Arguments.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.TemplateRef != nil { + l = m.TemplateRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Dependencies) > 0 { + for _, s := range m.Dependencies { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.WithItems) > 0 { + for _, e := range m.WithItems { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WithParam) + n += 1 + l + sovGenerated(uint64(l)) + if m.WithSequence != nil { + l = m.WithSequence.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.When) + n += 1 + l + sovGenerated(uint64(l)) + if m.ContinueOn != nil { + l = m.ContinueOn.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.OnExit) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Depends) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hooks) > 0 { + for k, v := range m.Hooks { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Inline != nil { + l = m.Inline.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DAGTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Target) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailFast != nil { + n += 2 + } + return n +} + +func (m *Data) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Transformation) > 0 { + for _, e := range m.Transformation { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DataSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ArtifactPaths != nil { + l = m.ArtifactPaths.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Selector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExecutorConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GCSArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GCSBucket.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GCSArtifactRepository) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GCSBucket.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KeyFormat) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GCSBucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Bucket) + n += 1 + l + sovGenerated(uint64(l)) + if m.ServiceAccountKeySecret != nil { + l = m.ServiceAccountKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Gauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.Realtime != nil { + n += 2 + } + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Repo) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + if m.Depth != nil { + n += 1 + sovGenerated(uint64(*m.Depth)) + } + if len(m.Fetch) > 0 { + for _, s := range m.Fetch { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.UsernameSecret != nil { + l = m.UsernameSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PasswordSecret != nil { + l = m.PasswordSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SSHPrivateKeySecret != nil { + l = m.SSHPrivateKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + n += 2 + n += 2 + l = len(m.Branch) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *HDFSArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.HDFSConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *HDFSArtifactRepository) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.HDFSConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PathFormat) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *HDFSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.HDFSKrbConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.HDFSUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DataTransferProtection) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HDFSKrbConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KrbCCacheSecret != nil { + l = m.KrbCCacheSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.KrbKeytabSecret != nil { + l = m.KrbKeytabSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.KrbUsername) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KrbRealm) + n += 1 + l + sovGenerated(uint64(l)) + if m.KrbConfigConfigMap != nil { + l = m.KrbConfigConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.KrbServicePrincipalName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HTTP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Method) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + l = len(m.Body) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SuccessCondition) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.BodyFrom != nil { + l = m.BodyFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HTTPArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Auth != nil { + l = m.Auth.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HTTPAuth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ClientCert.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.OAuth2.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.BasicAuth.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HTTPBodySource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Bytes != nil { + l = len(m.Bytes) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HTTPHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HTTPHeaderSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Header) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Buckets) > 0 { + for _, e := range m.Buckets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Inputs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Artifacts) > 0 { + for _, e := range m.Artifacts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Item) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LabelKeys) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Items) > 0 { + for _, s := range m.Items { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelValueFrom) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LabelValues) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Items) > 0 { + for _, s := range m.Items { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LifecycleHook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Template) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Arguments.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.TemplateRef != nil { + l = m.TemplateRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Link) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ManifestFrom) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Artifact != nil { + l = m.Artifact.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MemoizationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CacheName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Memoize) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Cache != nil { + l = m.Cache.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MaxAge) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Metadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *MetricLabel) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Metrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Prometheus) > 0 { + for _, e := range m.Prometheus { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Mutex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MutexHolding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Mutex) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Holder) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MutexStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Holding) > 0 { + for _, e := range m.Holding { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Waiting) > 0 { + for _, e := range m.Waiting { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeFlag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *NodeResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Outputs != nil { + l = m.Outputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Progress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DisplayName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TemplateName) + n += 1 + l + sovGenerated(uint64(l)) + if m.TemplateRef != nil { + l = m.TemplateRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BoundaryID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.Daemoned != nil { + n += 2 + } + if m.Inputs != nil { + l = m.Inputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Outputs != nil { + l = m.Outputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Children) > 0 { + for _, s := range m.Children { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.OutboundNodes) > 0 { + for _, s := range m.OutboundNodes { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TemplateScope) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.ResourcesDuration) > 0 { + for k, v := range m.ResourcesDuration { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + sovGenerated(uint64(v)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.HostNodeName) + n += 2 + l + sovGenerated(uint64(l)) + if m.MemoizationStatus != nil { + l = m.MemoizationStatus.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + n += 2 + sovGenerated(uint64(m.EstimatedDuration)) + if m.SynchronizationStatus != nil { + l = m.SynchronizationStatus.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.Progress) + n += 2 + l + sovGenerated(uint64(l)) + if m.NodeFlag != nil { + l = m.NodeFlag.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NodeSynchronizationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Waiting) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NoneStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *OAuth2Auth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientIDSecret != nil { + l = m.ClientIDSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ClientSecretSecret != nil { + l = m.ClientSecretSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TokenURLSecret != nil { + l = m.TokenURLSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EndpointParams) > 0 { + for _, e := range m.EndpointParams { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuth2EndpointParam) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OSSArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.OSSBucket.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OSSArtifactRepository) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.OSSBucket.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KeyFormat) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OSSBucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Endpoint) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Bucket) + n += 1 + l + sovGenerated(uint64(l)) + if m.AccessKeySecret != nil { + l = m.AccessKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretKeySecret != nil { + l = m.SecretKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.SecurityToken) + n += 1 + l + sovGenerated(uint64(l)) + if m.LifecycleRule != nil { + l = m.LifecycleRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *OSSLifecycleRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.MarkInfrequentAccessAfterDays)) + n += 1 + sovGenerated(uint64(m.MarkDeletionAfterDays)) + return n +} + +func (m *Object) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Outputs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Artifacts) > 0 { + for _, e := range m.Artifacts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Result != nil { + l = len(*m.Result) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ExitCode != nil { + l = len(*m.ExitCode) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ParallelSteps) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Steps) > 0 { + for _, e := range m.Steps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Parameter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Default != nil { + l = len(*m.Default) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.GlobalName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Enum) > 0 { + for _, s := range m.Enum { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Description != nil { + l = len(*m.Description) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Plugin) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodGC) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.DeleteDelayDuration) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Prometheus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Help) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.When) + n += 1 + l + sovGenerated(uint64(l)) + if m.Gauge != nil { + l = m.Gauge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Histogram != nil { + l = m.Histogram.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Counter != nil { + l = m.Counter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RawArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MergeStrategy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Manifest) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.SuccessCondition) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FailureCondition) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Flags) > 0 { + for _, s := range m.Flags { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ManifestFrom != nil { + l = m.ManifestFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RetryAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeAntiAffinity != nil { + l = m.NodeAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RetryNodeAntiAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RetryStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != nil { + l = m.Limit.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.RetryPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Backoff != nil { + l = m.Backoff.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Affinity != nil { + l = m.Affinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *S3Artifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.S3Bucket.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *S3ArtifactRepository) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.S3Bucket.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KeyFormat) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KeyPrefix) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *S3Bucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Endpoint) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Bucket) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + if m.Insecure != nil { + n += 2 + } + if m.AccessKeySecret != nil { + l = m.AccessKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretKeySecret != nil { + l = m.SecretKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.RoleARN) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.CreateBucketIfNotPresent != nil { + l = m.CreateBucketIfNotPresent.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EncryptionOptions != nil { + l = m.EncryptionOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CASecret != nil { + l = m.CASecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SessionTokenSecret != nil { + l = m.SessionTokenSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *S3EncryptionOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.KmsKeyId) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KmsEncryptionContext) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.ServerSideCustomerKeySecret != nil { + l = m.ServerSideCustomerKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ScriptTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SemaphoreHolding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Semaphore) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Holders) > 0 { + for _, s := range m.Holders { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SemaphoreRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SemaphoreStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Holding) > 0 { + for _, e := range m.Holding { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Waiting) > 0 { + for _, e := range m.Waiting { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Sequence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + l = m.Count.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Start != nil { + l = m.Start.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.End != nil { + l = m.End.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StopStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Submit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.WorkflowTemplateRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Arguments != nil { + l = m.Arguments.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubmitOpts) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GenerateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Entrypoint) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for _, s := range m.Parameters { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + l = len(m.Labels) + n += 1 + l + sovGenerated(uint64(l)) + if m.OwnerReference != nil { + l = m.OwnerReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Annotations) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodPriorityClassName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 1 + sovGenerated(uint64(*m.Priority)) + } + return n +} + +func (m *SuppliedValueFrom) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *SuspendTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Duration) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Synchronization) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Semaphore != nil { + l = m.Semaphore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mutex != nil { + l = m.Mutex.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Semaphores) > 0 { + for _, e := range m.Semaphores { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Mutexes) > 0 { + for _, e := range m.Mutexes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SynchronizationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Semaphore != nil { + l = m.Semaphore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mutex != nil { + l = m.Mutex.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TTLStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SecondsAfterCompletion != nil { + n += 1 + sovGenerated(uint64(*m.SecondsAfterCompletion)) + } + if m.SecondsAfterSuccess != nil { + n += 1 + sovGenerated(uint64(*m.SecondsAfterSuccess)) + } + if m.SecondsAfterFailure != nil { + n += 1 + sovGenerated(uint64(*m.SecondsAfterFailure)) + } + return n +} + +func (m *TarStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CompressionLevel != nil { + n += 1 + sovGenerated(uint64(*m.CompressionLevel)) + } + return n +} + +func (m *Template) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Inputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Outputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Affinity != nil { + l = m.Affinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Daemon != nil { + n += 2 + } + if len(m.Steps) > 0 { + for _, e := range m.Steps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Script != nil { + l = m.Script.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DAG != nil { + l = m.DAG.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Suspend != nil { + l = m.Suspend.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.Sidecars) > 0 { + for _, e := range m.Sidecars { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.ArchiveLocation != nil { + l = m.ArchiveLocation.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ActiveDeadlineSeconds != nil { + l = m.ActiveDeadlineSeconds.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.RetryStrategy != nil { + l = m.RetryStrategy.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Parallelism != nil { + n += 2 + sovGenerated(uint64(*m.Parallelism)) + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + l = len(m.ServiceAccountName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.PodSpecPatch) + n += 2 + l + sovGenerated(uint64(l)) + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if m.Executor != nil { + l = m.Executor.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Metrics != nil { + l = m.Metrics.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Synchronization != nil { + l = m.Synchronization.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Memoize != nil { + l = m.Memoize.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.Timeout) + n += 2 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = m.Data.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ContainerSet != nil { + l = m.ContainerSet.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FailFast != nil { + n += 3 + } + if m.HTTP != nil { + l = m.HTTP.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Plugin != nil { + l = m.Plugin.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TemplateRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Template) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *TransformationStep) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserContainer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MirrorVolumeMounts != nil { + n += 2 + } + return n +} + +func (m *ValueFrom) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.JSONPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.JQFilter) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Parameter) + n += 1 + l + sovGenerated(uint64(l)) + if m.Default != nil { + l = len(*m.Default) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Supplied != nil { + l = m.Supplied.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Event) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Version) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BuildDate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GitCommit) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GitTag) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GitTreeState) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GoVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Compiler) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Platform) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeClaimGC) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Workflow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WorkflowArtifactGCTask) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WorkflowArtifactGCTaskList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WorkflowEventBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WorkflowEventBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WorkflowEventBindingSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Event.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Submit != nil { + l = m.Submit.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WorkflowLevelArtifactGC) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ArtifactGC.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.PodSpecPatch) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WorkflowList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WorkflowMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.LabelsFrom) > 0 { + for k, v := range m.LabelsFrom { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *WorkflowSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Templates) > 0 { + for _, e := range m.Templates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Entrypoint) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Arguments.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Parallelism != nil { + n += 1 + sovGenerated(uint64(*m.Parallelism)) + } + if m.ArtifactRepositoryRef != nil { + l = m.ArtifactRepositoryRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Suspend != nil { + n += 2 + } + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Affinity != nil { + l = m.Affinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.HostNetwork != nil { + n += 2 + } + if m.DNSPolicy != nil { + l = len(*m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.OnExit) + n += 2 + l + sovGenerated(uint64(l)) + if m.ActiveDeadlineSeconds != nil { + n += 2 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if m.PodGC != nil { + l = m.PodGC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.PodPriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.PodPriority != nil { + n += 2 + sovGenerated(uint64(*m.PodPriority)) + } + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.PodSpecPatch) + n += 2 + l + sovGenerated(uint64(l)) + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if m.Executor != nil { + l = m.Executor.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.TTLStrategy != nil { + l = m.TTLStrategy.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PodDisruptionBudget != nil { + l = m.PodDisruptionBudget.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Metrics != nil { + l = m.Metrics.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.Shutdown) + n += 2 + l + sovGenerated(uint64(l)) + if m.WorkflowTemplateRef != nil { + l = m.WorkflowTemplateRef.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Synchronization != nil { + l = m.Synchronization.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VolumeClaimGC != nil { + l = m.VolumeClaimGC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.RetryStrategy != nil { + l = m.RetryStrategy.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PodMetadata != nil { + l = m.PodMetadata.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.TemplateDefaults != nil { + l = m.TemplateDefaults.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ArchiveLogs != nil { + n += 3 + } + if len(m.Hooks) > 0 { + for k, v := range m.Hooks { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.WorkflowMetadata != nil { + l = m.WorkflowMetadata.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ArtifactGC != nil { + l = m.ArtifactGC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WorkflowStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CompressedNodes) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Nodes) > 0 { + for k, v := range m.Nodes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.PersistentVolumeClaims) > 0 { + for _, e := range m.PersistentVolumeClaims { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Outputs != nil { + l = m.Outputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.StoredTemplates) > 0 { + for k, v := range m.StoredTemplates { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.OffloadNodeStatusVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ResourcesDuration) > 0 { + for k, v := range m.ResourcesDuration { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + sovGenerated(uint64(v)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StoredWorkflowSpec != nil { + l = m.StoredWorkflowSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Synchronization != nil { + l = m.Synchronization.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + sovGenerated(uint64(m.EstimatedDuration)) + l = len(m.Progress) + n += 2 + l + sovGenerated(uint64(l)) + if m.ArtifactRepositoryRef != nil { + l = m.ArtifactRepositoryRef.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ArtifactGCStatus != nil { + l = m.ArtifactGCStatus.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.TaskResultsCompletionStatus) > 0 { + for k, v := range m.TaskResultsCompletionStatus { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *WorkflowStep) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Template) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Arguments.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.TemplateRef != nil { + l = m.TemplateRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.WithItems) > 0 { + for _, e := range m.WithItems { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WithParam) + n += 1 + l + sovGenerated(uint64(l)) + if m.WithSequence != nil { + l = m.WithSequence.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.When) + n += 1 + l + sovGenerated(uint64(l)) + if m.ContinueOn != nil { + l = m.ContinueOn.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.OnExit) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hooks) > 0 { + for k, v := range m.Hooks { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Inline != nil { + l = m.Inline.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WorkflowTaskResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeResult.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WorkflowTaskResultList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WorkflowTaskSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WorkflowTaskSetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WorkflowTaskSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tasks) > 0 { + for k, v := range m.Tasks { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *WorkflowTaskSetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nodes) > 0 { + for k, v := range m.Nodes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *WorkflowTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WorkflowTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WorkflowTemplateRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ZipStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Amount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Amount{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *ArchiveStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArchiveStrategy{`, + `Tar:` + strings.Replace(this.Tar.String(), "TarStrategy", "TarStrategy", 1) + `,`, + `None:` + strings.Replace(this.None.String(), "NoneStrategy", "NoneStrategy", 1) + `,`, + `Zip:` + strings.Replace(this.Zip.String(), "ZipStrategy", "ZipStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Arguments) String() string { + if this == nil { + return "nil" + } + repeatedStringForParameters := "[]Parameter{" + for _, f := range this.Parameters { + repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "Parameter", "Parameter", 1), `&`, ``, 1) + "," + } + repeatedStringForParameters += "}" + repeatedStringForArtifacts := "[]Artifact{" + for _, f := range this.Artifacts { + repeatedStringForArtifacts += strings.Replace(strings.Replace(f.String(), "Artifact", "Artifact", 1), `&`, ``, 1) + "," + } + repeatedStringForArtifacts += "}" + s := strings.Join([]string{`&Arguments{`, + `Parameters:` + repeatedStringForParameters + `,`, + `Artifacts:` + repeatedStringForArtifacts + `,`, + `}`, + }, "") + return s +} +func (this *ArtGCStatus) String() string { + if this == nil { + return "nil" + } + keysForStrategiesProcessed := make([]string, 0, len(this.StrategiesProcessed)) + for k := range this.StrategiesProcessed { + keysForStrategiesProcessed = append(keysForStrategiesProcessed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStrategiesProcessed) + mapStringForStrategiesProcessed := "map[ArtifactGCStrategy]bool{" + for _, k := range keysForStrategiesProcessed { + mapStringForStrategiesProcessed += fmt.Sprintf("%v: %v,", k, this.StrategiesProcessed[ArtifactGCStrategy(k)]) + } + mapStringForStrategiesProcessed += "}" + keysForPodsRecouped := make([]string, 0, len(this.PodsRecouped)) + for k := range this.PodsRecouped { + keysForPodsRecouped = append(keysForPodsRecouped, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodsRecouped) + mapStringForPodsRecouped := "map[string]bool{" + for _, k := range keysForPodsRecouped { + mapStringForPodsRecouped += fmt.Sprintf("%v: %v,", k, this.PodsRecouped[k]) + } + mapStringForPodsRecouped += "}" + s := strings.Join([]string{`&ArtGCStatus{`, + `StrategiesProcessed:` + mapStringForStrategiesProcessed + `,`, + `PodsRecouped:` + mapStringForPodsRecouped + `,`, + `NotSpecified:` + fmt.Sprintf("%v", this.NotSpecified) + `,`, + `}`, + }, "") + return s +} +func (this *Artifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Artifact{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `ArtifactLocation:` + strings.Replace(strings.Replace(this.ArtifactLocation.String(), "ArtifactLocation", "ArtifactLocation", 1), `&`, ``, 1) + `,`, + `GlobalName:` + fmt.Sprintf("%v", this.GlobalName) + `,`, + `Archive:` + strings.Replace(this.Archive.String(), "ArchiveStrategy", "ArchiveStrategy", 1) + `,`, + `Optional:` + fmt.Sprintf("%v", this.Optional) + `,`, + `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `RecurseMode:` + fmt.Sprintf("%v", this.RecurseMode) + `,`, + `FromExpression:` + fmt.Sprintf("%v", this.FromExpression) + `,`, + `ArtifactGC:` + strings.Replace(this.ArtifactGC.String(), "ArtifactGC", "ArtifactGC", 1) + `,`, + `Deleted:` + fmt.Sprintf("%v", this.Deleted) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactGC) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactGC{`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `PodMetadata:` + strings.Replace(this.PodMetadata.String(), "Metadata", "Metadata", 1) + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactGCSpec) String() string { + if this == nil { + return "nil" + } + keysForArtifactsByNode := make([]string, 0, len(this.ArtifactsByNode)) + for k := range this.ArtifactsByNode { + keysForArtifactsByNode = append(keysForArtifactsByNode, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactsByNode) + mapStringForArtifactsByNode := "map[string]ArtifactNodeSpec{" + for _, k := range keysForArtifactsByNode { + mapStringForArtifactsByNode += fmt.Sprintf("%v: %v,", k, this.ArtifactsByNode[k]) + } + mapStringForArtifactsByNode += "}" + s := strings.Join([]string{`&ArtifactGCSpec{`, + `ArtifactsByNode:` + mapStringForArtifactsByNode + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactGCStatus) String() string { + if this == nil { + return "nil" + } + keysForArtifactResultsByNode := make([]string, 0, len(this.ArtifactResultsByNode)) + for k := range this.ArtifactResultsByNode { + keysForArtifactResultsByNode = append(keysForArtifactResultsByNode, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactResultsByNode) + mapStringForArtifactResultsByNode := "map[string]ArtifactResultNodeStatus{" + for _, k := range keysForArtifactResultsByNode { + mapStringForArtifactResultsByNode += fmt.Sprintf("%v: %v,", k, this.ArtifactResultsByNode[k]) + } + mapStringForArtifactResultsByNode += "}" + s := strings.Join([]string{`&ArtifactGCStatus{`, + `ArtifactResultsByNode:` + mapStringForArtifactResultsByNode + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactLocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactLocation{`, + `ArchiveLogs:` + valueToStringGenerated(this.ArchiveLogs) + `,`, + `S3:` + strings.Replace(this.S3.String(), "S3Artifact", "S3Artifact", 1) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitArtifact", "GitArtifact", 1) + `,`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPArtifact", "HTTPArtifact", 1) + `,`, + `Artifactory:` + strings.Replace(this.Artifactory.String(), "ArtifactoryArtifact", "ArtifactoryArtifact", 1) + `,`, + `HDFS:` + strings.Replace(this.HDFS.String(), "HDFSArtifact", "HDFSArtifact", 1) + `,`, + `Raw:` + strings.Replace(this.Raw.String(), "RawArtifact", "RawArtifact", 1) + `,`, + `OSS:` + strings.Replace(this.OSS.String(), "OSSArtifact", "OSSArtifact", 1) + `,`, + `GCS:` + strings.Replace(this.GCS.String(), "GCSArtifact", "GCSArtifact", 1) + `,`, + `Azure:` + strings.Replace(this.Azure.String(), "AzureArtifact", "AzureArtifact", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactNodeSpec) String() string { + if this == nil { + return "nil" + } + keysForArtifacts := make([]string, 0, len(this.Artifacts)) + for k := range this.Artifacts { + keysForArtifacts = append(keysForArtifacts, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForArtifacts) + mapStringForArtifacts := "map[string]Artifact{" + for _, k := range keysForArtifacts { + mapStringForArtifacts += fmt.Sprintf("%v: %v,", k, this.Artifacts[k]) + } + mapStringForArtifacts += "}" + s := strings.Join([]string{`&ArtifactNodeSpec{`, + `ArchiveLocation:` + strings.Replace(this.ArchiveLocation.String(), "ArtifactLocation", "ArtifactLocation", 1) + `,`, + `Artifacts:` + mapStringForArtifacts + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactPaths) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactPaths{`, + `Artifact:` + strings.Replace(strings.Replace(this.Artifact.String(), "Artifact", "Artifact", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactRepository) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactRepository{`, + `ArchiveLogs:` + valueToStringGenerated(this.ArchiveLogs) + `,`, + `S3:` + strings.Replace(this.S3.String(), "S3ArtifactRepository", "S3ArtifactRepository", 1) + `,`, + `Artifactory:` + strings.Replace(this.Artifactory.String(), "ArtifactoryArtifactRepository", "ArtifactoryArtifactRepository", 1) + `,`, + `HDFS:` + strings.Replace(this.HDFS.String(), "HDFSArtifactRepository", "HDFSArtifactRepository", 1) + `,`, + `OSS:` + strings.Replace(this.OSS.String(), "OSSArtifactRepository", "OSSArtifactRepository", 1) + `,`, + `GCS:` + strings.Replace(this.GCS.String(), "GCSArtifactRepository", "GCSArtifactRepository", 1) + `,`, + `Azure:` + strings.Replace(this.Azure.String(), "AzureArtifactRepository", "AzureArtifactRepository", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactResult{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Success:` + fmt.Sprintf("%v", this.Success) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactResultNodeStatus) String() string { + if this == nil { + return "nil" + } + keysForArtifactResults := make([]string, 0, len(this.ArtifactResults)) + for k := range this.ArtifactResults { + keysForArtifactResults = append(keysForArtifactResults, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactResults) + mapStringForArtifactResults := "map[string]ArtifactResult{" + for _, k := range keysForArtifactResults { + mapStringForArtifactResults += fmt.Sprintf("%v: %v,", k, this.ArtifactResults[k]) + } + mapStringForArtifactResults += "}" + s := strings.Join([]string{`&ArtifactResultNodeStatus{`, + `ArtifactResults:` + mapStringForArtifactResults + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactSearchQuery) String() string { + if this == nil { + return "nil" + } + keysForArtifactGCStrategies := make([]string, 0, len(this.ArtifactGCStrategies)) + for k := range this.ArtifactGCStrategies { + keysForArtifactGCStrategies = append(keysForArtifactGCStrategies, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactGCStrategies) + mapStringForArtifactGCStrategies := "map[ArtifactGCStrategy]bool{" + for _, k := range keysForArtifactGCStrategies { + mapStringForArtifactGCStrategies += fmt.Sprintf("%v: %v,", k, this.ArtifactGCStrategies[ArtifactGCStrategy(k)]) + } + mapStringForArtifactGCStrategies += "}" + keysForNodeTypes := make([]string, 0, len(this.NodeTypes)) + for k := range this.NodeTypes { + keysForNodeTypes = append(keysForNodeTypes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeTypes) + mapStringForNodeTypes := "map[NodeType]bool{" + for _, k := range keysForNodeTypes { + mapStringForNodeTypes += fmt.Sprintf("%v: %v,", k, this.NodeTypes[NodeType(k)]) + } + mapStringForNodeTypes += "}" + s := strings.Join([]string{`&ArtifactSearchQuery{`, + `ArtifactGCStrategies:` + mapStringForArtifactGCStrategies + `,`, + `ArtifactName:` + fmt.Sprintf("%v", this.ArtifactName) + `,`, + `TemplateName:` + fmt.Sprintf("%v", this.TemplateName) + `,`, + `NodeId:` + fmt.Sprintf("%v", this.NodeId) + `,`, + `Deleted:` + valueToStringGenerated(this.Deleted) + `,`, + `NodeTypes:` + mapStringForNodeTypes + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactSearchResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactSearchResult{`, + `Artifact:` + strings.Replace(strings.Replace(this.Artifact.String(), "Artifact", "Artifact", 1), `&`, ``, 1) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactoryArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactoryArtifact{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `ArtifactoryAuth:` + strings.Replace(strings.Replace(this.ArtifactoryAuth.String(), "ArtifactoryAuth", "ArtifactoryAuth", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactoryArtifactRepository) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactoryArtifactRepository{`, + `ArtifactoryAuth:` + strings.Replace(strings.Replace(this.ArtifactoryAuth.String(), "ArtifactoryAuth", "ArtifactoryAuth", 1), `&`, ``, 1) + `,`, + `RepoURL:` + fmt.Sprintf("%v", this.RepoURL) + `,`, + `KeyFormat:` + fmt.Sprintf("%v", this.KeyFormat) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactoryAuth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactoryAuth{`, + `UsernameSecret:` + strings.Replace(fmt.Sprintf("%v", this.UsernameSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AzureArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureArtifact{`, + `AzureBlobContainer:` + strings.Replace(strings.Replace(this.AzureBlobContainer.String(), "AzureBlobContainer", "AzureBlobContainer", 1), `&`, ``, 1) + `,`, + `Blob:` + fmt.Sprintf("%v", this.Blob) + `,`, + `}`, + }, "") + return s +} +func (this *AzureArtifactRepository) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureArtifactRepository{`, + `AzureBlobContainer:` + strings.Replace(strings.Replace(this.AzureBlobContainer.String(), "AzureBlobContainer", "AzureBlobContainer", 1), `&`, ``, 1) + `,`, + `BlobNameFormat:` + fmt.Sprintf("%v", this.BlobNameFormat) + `,`, + `}`, + }, "") + return s +} +func (this *AzureBlobContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureBlobContainer{`, + `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `AccountKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.AccountKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `UseSDKCreds:` + fmt.Sprintf("%v", this.UseSDKCreds) + `,`, + `}`, + }, "") + return s +} +func (this *Backoff) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Backoff{`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `Factor:` + strings.Replace(fmt.Sprintf("%v", this.Factor), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxDuration:` + fmt.Sprintf("%v", this.MaxDuration) + `,`, + `}`, + }, "") + return s +} +func (this *BasicAuth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BasicAuth{`, + `UsernameSecret:` + strings.Replace(fmt.Sprintf("%v", this.UsernameSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Cache) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Cache{`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClientCertAuth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClientCertAuth{`, + `ClientCertSecret:` + strings.Replace(fmt.Sprintf("%v", this.ClientCertSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `ClientKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.ClientKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterWorkflowTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterWorkflowTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowSpec", "WorkflowSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterWorkflowTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterWorkflowTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterWorkflowTemplate", "ClusterWorkflowTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterWorkflowTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Column) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Column{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `}`, + }, "") + return s +} +func (this *Condition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Condition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerNode) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerNode{`, + `Container:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1), `&`, ``, 1) + `,`, + `Dependencies:` + fmt.Sprintf("%v", this.Dependencies) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSetRetryStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerSetRetryStrategy{`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `Retries:` + strings.Replace(fmt.Sprintf("%v", this.Retries), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSetTemplate) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForContainers := "[]ContainerNode{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ContainerNode", "ContainerNode", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + s := strings.Join([]string{`&ContainerSetTemplate{`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `RetryStrategy:` + strings.Replace(this.RetryStrategy.String(), "ContainerSetRetryStrategy", "ContainerSetRetryStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContinueOn) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContinueOn{`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `}`, + }, "") + return s +} +func (this *Counter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Counter{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *CreateS3BucketOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateS3BucketOptions{`, + `ObjectLocking:` + fmt.Sprintf("%v", this.ObjectLocking) + `,`, + `}`, + }, "") + return s +} +func (this *CronWorkflow) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronWorkflow{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronWorkflowSpec", "CronWorkflowSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronWorkflowStatus", "CronWorkflowStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronWorkflowList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CronWorkflow{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronWorkflow", "CronWorkflow", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CronWorkflowList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CronWorkflowSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronWorkflowSpec{`, + `WorkflowSpec:` + strings.Replace(strings.Replace(this.WorkflowSpec.String(), "WorkflowSpec", "WorkflowSpec", 1), `&`, ``, 1) + `,`, + `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, + `ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`, + `Suspend:` + fmt.Sprintf("%v", this.Suspend) + `,`, + `StartingDeadlineSeconds:` + valueToStringGenerated(this.StartingDeadlineSeconds) + `,`, + `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, + `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, + `Timezone:` + fmt.Sprintf("%v", this.Timezone) + `,`, + `WorkflowMetadata:` + strings.Replace(fmt.Sprintf("%v", this.WorkflowMetadata), "ObjectMeta", "v11.ObjectMeta", 1) + `,`, + `StopStrategy:` + strings.Replace(this.StopStrategy.String(), "StopStrategy", "StopStrategy", 1) + `,`, + `Schedules:` + fmt.Sprintf("%v", this.Schedules) + `,`, + `When:` + fmt.Sprintf("%v", this.When) + `,`, + `}`, + }, "") + return s +} +func (this *CronWorkflowStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&CronWorkflowStatus{`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduledTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduledTime), "Time", "v11.Time", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `}`, + }, "") + return s +} +func (this *DAGTask) String() string { + if this == nil { + return "nil" + } + repeatedStringForWithItems := "[]Item{" + for _, f := range this.WithItems { + repeatedStringForWithItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForWithItems += "}" + keysForHooks := make([]string, 0, len(this.Hooks)) + for k := range this.Hooks { + keysForHooks = append(keysForHooks, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) + mapStringForHooks := "LifecycleHooks{" + for _, k := range keysForHooks { + mapStringForHooks += fmt.Sprintf("%v: %v,", k, this.Hooks[LifecycleEvent(k)]) + } + mapStringForHooks += "}" + s := strings.Join([]string{`&DAGTask{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Template:` + fmt.Sprintf("%v", this.Template) + `,`, + `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, + `TemplateRef:` + strings.Replace(this.TemplateRef.String(), "TemplateRef", "TemplateRef", 1) + `,`, + `Dependencies:` + fmt.Sprintf("%v", this.Dependencies) + `,`, + `WithItems:` + repeatedStringForWithItems + `,`, + `WithParam:` + fmt.Sprintf("%v", this.WithParam) + `,`, + `WithSequence:` + strings.Replace(this.WithSequence.String(), "Sequence", "Sequence", 1) + `,`, + `When:` + fmt.Sprintf("%v", this.When) + `,`, + `ContinueOn:` + strings.Replace(this.ContinueOn.String(), "ContinueOn", "ContinueOn", 1) + `,`, + `OnExit:` + fmt.Sprintf("%v", this.OnExit) + `,`, + `Depends:` + fmt.Sprintf("%v", this.Depends) + `,`, + `Hooks:` + mapStringForHooks + `,`, + `Inline:` + strings.Replace(this.Inline.String(), "Template", "Template", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DAGTemplate) String() string { + if this == nil { + return "nil" + } + repeatedStringForTasks := "[]DAGTask{" + for _, f := range this.Tasks { + repeatedStringForTasks += strings.Replace(strings.Replace(f.String(), "DAGTask", "DAGTask", 1), `&`, ``, 1) + "," + } + repeatedStringForTasks += "}" + s := strings.Join([]string{`&DAGTemplate{`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `Tasks:` + repeatedStringForTasks + `,`, + `FailFast:` + valueToStringGenerated(this.FailFast) + `,`, + `}`, + }, "") + return s +} +func (this *Data) String() string { + if this == nil { + return "nil" + } + repeatedStringForTransformation := "[]TransformationStep{" + for _, f := range this.Transformation { + repeatedStringForTransformation += strings.Replace(strings.Replace(f.String(), "TransformationStep", "TransformationStep", 1), `&`, ``, 1) + "," + } + repeatedStringForTransformation += "}" + s := strings.Join([]string{`&Data{`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "DataSource", "DataSource", 1), `&`, ``, 1) + `,`, + `Transformation:` + repeatedStringForTransformation + `,`, + `}`, + }, "") + return s +} +func (this *DataSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DataSource{`, + `ArtifactPaths:` + strings.Replace(this.ArtifactPaths.String(), "ArtifactPaths", "ArtifactPaths", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `}`, + }, "") + return s +} +func (this *ExecutorConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecutorConfig{`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `}`, + }, "") + return s +} +func (this *GCSArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCSArtifact{`, + `GCSBucket:` + strings.Replace(strings.Replace(this.GCSBucket.String(), "GCSBucket", "GCSBucket", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `}`, + }, "") + return s +} +func (this *GCSArtifactRepository) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCSArtifactRepository{`, + `GCSBucket:` + strings.Replace(strings.Replace(this.GCSBucket.String(), "GCSBucket", "GCSBucket", 1), `&`, ``, 1) + `,`, + `KeyFormat:` + fmt.Sprintf("%v", this.KeyFormat) + `,`, + `}`, + }, "") + return s +} +func (this *GCSBucket) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCSBucket{`, + `Bucket:` + fmt.Sprintf("%v", this.Bucket) + `,`, + `ServiceAccountKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Gauge) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Gauge{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Realtime:` + valueToStringGenerated(this.Realtime) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `}`, + }, "") + return s +} +func (this *GitArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitArtifact{`, + `Repo:` + fmt.Sprintf("%v", this.Repo) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Depth:` + valueToStringGenerated(this.Depth) + `,`, + `Fetch:` + fmt.Sprintf("%v", this.Fetch) + `,`, + `UsernameSecret:` + strings.Replace(fmt.Sprintf("%v", this.UsernameSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `SSHPrivateKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SSHPrivateKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `InsecureIgnoreHostKey:` + fmt.Sprintf("%v", this.InsecureIgnoreHostKey) + `,`, + `DisableSubmodules:` + fmt.Sprintf("%v", this.DisableSubmodules) + `,`, + `SingleBranch:` + fmt.Sprintf("%v", this.SingleBranch) + `,`, + `Branch:` + fmt.Sprintf("%v", this.Branch) + `,`, + `InsecureSkipTLS:` + fmt.Sprintf("%v", this.InsecureSkipTLS) + `,`, + `}`, + }, "") + return s +} +func (this *HDFSArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HDFSArtifact{`, + `HDFSConfig:` + strings.Replace(strings.Replace(this.HDFSConfig.String(), "HDFSConfig", "HDFSConfig", 1), `&`, ``, 1) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `}`, + }, "") + return s +} +func (this *HDFSArtifactRepository) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HDFSArtifactRepository{`, + `HDFSConfig:` + strings.Replace(strings.Replace(this.HDFSConfig.String(), "HDFSConfig", "HDFSConfig", 1), `&`, ``, 1) + `,`, + `PathFormat:` + fmt.Sprintf("%v", this.PathFormat) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `}`, + }, "") + return s +} +func (this *HDFSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HDFSConfig{`, + `HDFSKrbConfig:` + strings.Replace(strings.Replace(this.HDFSKrbConfig.String(), "HDFSKrbConfig", "HDFSKrbConfig", 1), `&`, ``, 1) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `HDFSUser:` + fmt.Sprintf("%v", this.HDFSUser) + `,`, + `DataTransferProtection:` + fmt.Sprintf("%v", this.DataTransferProtection) + `,`, + `}`, + }, "") + return s +} +func (this *HDFSKrbConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HDFSKrbConfig{`, + `KrbCCacheSecret:` + strings.Replace(fmt.Sprintf("%v", this.KrbCCacheSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `KrbKeytabSecret:` + strings.Replace(fmt.Sprintf("%v", this.KrbKeytabSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `KrbUsername:` + fmt.Sprintf("%v", this.KrbUsername) + `,`, + `KrbRealm:` + fmt.Sprintf("%v", this.KrbRealm) + `,`, + `KrbConfigConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.KrbConfigConfigMap), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, + `KrbServicePrincipalName:` + fmt.Sprintf("%v", this.KrbServicePrincipalName) + `,`, + `}`, + }, "") + return s +} +func (this *HTTP) String() string { + if this == nil { + return "nil" + } + repeatedStringForHeaders := "[]HTTPHeader{" + for _, f := range this.Headers { + repeatedStringForHeaders += strings.Replace(strings.Replace(f.String(), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + "," + } + repeatedStringForHeaders += "}" + s := strings.Join([]string{`&HTTP{`, + `Method:` + fmt.Sprintf("%v", this.Method) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Headers:` + repeatedStringForHeaders + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `Body:` + fmt.Sprintf("%v", this.Body) + `,`, + `SuccessCondition:` + fmt.Sprintf("%v", this.SuccessCondition) + `,`, + `InsecureSkipVerify:` + fmt.Sprintf("%v", this.InsecureSkipVerify) + `,`, + `BodyFrom:` + strings.Replace(this.BodyFrom.String(), "HTTPBodySource", "HTTPBodySource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPArtifact) String() string { + if this == nil { + return "nil" + } + repeatedStringForHeaders := "[]Header{" + for _, f := range this.Headers { + repeatedStringForHeaders += strings.Replace(strings.Replace(f.String(), "Header", "Header", 1), `&`, ``, 1) + "," + } + repeatedStringForHeaders += "}" + s := strings.Join([]string{`&HTTPArtifact{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Headers:` + repeatedStringForHeaders + `,`, + `Auth:` + strings.Replace(this.Auth.String(), "HTTPAuth", "HTTPAuth", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPAuth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPAuth{`, + `ClientCert:` + strings.Replace(strings.Replace(this.ClientCert.String(), "ClientCertAuth", "ClientCertAuth", 1), `&`, ``, 1) + `,`, + `OAuth2:` + strings.Replace(strings.Replace(this.OAuth2.String(), "OAuth2Auth", "OAuth2Auth", 1), `&`, ``, 1) + `,`, + `BasicAuth:` + strings.Replace(strings.Replace(this.BasicAuth.String(), "BasicAuth", "BasicAuth", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPBodySource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPBodySource{`, + `Bytes:` + valueToStringGenerated(this.Bytes) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ValueFrom:` + strings.Replace(this.ValueFrom.String(), "HTTPHeaderSource", "HTTPHeaderSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPHeaderSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPHeaderSource{`, + `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Header) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Header{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Histogram) String() string { + if this == nil { + return "nil" + } + repeatedStringForBuckets := "[]Amount{" + for _, f := range this.Buckets { + repeatedStringForBuckets += strings.Replace(strings.Replace(f.String(), "Amount", "Amount", 1), `&`, ``, 1) + "," + } + repeatedStringForBuckets += "}" + s := strings.Join([]string{`&Histogram{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Buckets:` + repeatedStringForBuckets + `,`, + `}`, + }, "") + return s +} +func (this *Inputs) String() string { + if this == nil { + return "nil" + } + repeatedStringForParameters := "[]Parameter{" + for _, f := range this.Parameters { + repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "Parameter", "Parameter", 1), `&`, ``, 1) + "," + } + repeatedStringForParameters += "}" + repeatedStringForArtifacts := "[]Artifact{" + for _, f := range this.Artifacts { + repeatedStringForArtifacts += strings.Replace(strings.Replace(f.String(), "Artifact", "Artifact", 1), `&`, ``, 1) + "," + } + repeatedStringForArtifacts += "}" + s := strings.Join([]string{`&Inputs{`, + `Parameters:` + repeatedStringForParameters + `,`, + `Artifacts:` + repeatedStringForArtifacts + `,`, + `}`, + }, "") + return s +} +func (this *LabelKeys) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelKeys{`, + `Items:` + fmt.Sprintf("%v", this.Items) + `,`, + `}`, + }, "") + return s +} +func (this *LabelValueFrom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelValueFrom{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *LabelValues) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelValues{`, + `Items:` + fmt.Sprintf("%v", this.Items) + `,`, + `}`, + }, "") + return s +} +func (this *LifecycleHook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LifecycleHook{`, + `Template:` + fmt.Sprintf("%v", this.Template) + `,`, + `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, + `TemplateRef:` + strings.Replace(this.TemplateRef.String(), "TemplateRef", "TemplateRef", 1) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *Link) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Link{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `}`, + }, "") + return s +} +func (this *ManifestFrom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ManifestFrom{`, + `Artifact:` + strings.Replace(this.Artifact.String(), "Artifact", "Artifact", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MemoizationStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoizationStatus{`, + `Hit:` + fmt.Sprintf("%v", this.Hit) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `CacheName:` + fmt.Sprintf("%v", this.CacheName) + `,`, + `}`, + }, "") + return s +} +func (this *Memoize) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Memoize{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Cache:` + strings.Replace(this.Cache.String(), "Cache", "Cache", 1) + `,`, + `MaxAge:` + fmt.Sprintf("%v", this.MaxAge) + `,`, + `}`, + }, "") + return s +} +func (this *Metadata) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Metadata{`, + `Annotations:` + mapStringForAnnotations + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *MetricLabel) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricLabel{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Metrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForPrometheus := "[]*Prometheus{" + for _, f := range this.Prometheus { + repeatedStringForPrometheus += strings.Replace(f.String(), "Prometheus", "Prometheus", 1) + "," + } + repeatedStringForPrometheus += "}" + s := strings.Join([]string{`&Metrics{`, + `Prometheus:` + repeatedStringForPrometheus + `,`, + `}`, + }, "") + return s +} +func (this *Mutex) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mutex{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *MutexHolding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutexHolding{`, + `Mutex:` + fmt.Sprintf("%v", this.Mutex) + `,`, + `Holder:` + fmt.Sprintf("%v", this.Holder) + `,`, + `}`, + }, "") + return s +} +func (this *MutexStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForHolding := "[]MutexHolding{" + for _, f := range this.Holding { + repeatedStringForHolding += strings.Replace(strings.Replace(f.String(), "MutexHolding", "MutexHolding", 1), `&`, ``, 1) + "," + } + repeatedStringForHolding += "}" + repeatedStringForWaiting := "[]MutexHolding{" + for _, f := range this.Waiting { + repeatedStringForWaiting += strings.Replace(strings.Replace(f.String(), "MutexHolding", "MutexHolding", 1), `&`, ``, 1) + "," + } + repeatedStringForWaiting += "}" + s := strings.Join([]string{`&MutexStatus{`, + `Holding:` + repeatedStringForHolding + `,`, + `Waiting:` + repeatedStringForWaiting + `,`, + `}`, + }, "") + return s +} +func (this *NodeFlag) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeFlag{`, + `Hooked:` + fmt.Sprintf("%v", this.Hooked) + `,`, + `Retried:` + fmt.Sprintf("%v", this.Retried) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeResult{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Outputs:` + strings.Replace(this.Outputs.String(), "Outputs", "Outputs", 1) + `,`, + `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + keysForResourcesDuration := make([]string, 0, len(this.ResourcesDuration)) + for k := range this.ResourcesDuration { + keysForResourcesDuration = append(keysForResourcesDuration, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForResourcesDuration) + mapStringForResourcesDuration := "ResourcesDuration{" + for _, k := range keysForResourcesDuration { + mapStringForResourcesDuration += fmt.Sprintf("%v: %v,", k, this.ResourcesDuration[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForResourcesDuration += "}" + s := strings.Join([]string{`&NodeStatus{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `TemplateName:` + fmt.Sprintf("%v", this.TemplateName) + `,`, + `TemplateRef:` + strings.Replace(this.TemplateRef.String(), "TemplateRef", "TemplateRef", 1) + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `BoundaryID:` + fmt.Sprintf("%v", this.BoundaryID) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `Daemoned:` + valueToStringGenerated(this.Daemoned) + `,`, + `Inputs:` + strings.Replace(this.Inputs.String(), "Inputs", "Inputs", 1) + `,`, + `Outputs:` + strings.Replace(this.Outputs.String(), "Outputs", "Outputs", 1) + `,`, + `Children:` + fmt.Sprintf("%v", this.Children) + `,`, + `OutboundNodes:` + fmt.Sprintf("%v", this.OutboundNodes) + `,`, + `TemplateScope:` + fmt.Sprintf("%v", this.TemplateScope) + `,`, + `ResourcesDuration:` + mapStringForResourcesDuration + `,`, + `HostNodeName:` + fmt.Sprintf("%v", this.HostNodeName) + `,`, + `MemoizationStatus:` + strings.Replace(this.MemoizationStatus.String(), "MemoizationStatus", "MemoizationStatus", 1) + `,`, + `EstimatedDuration:` + fmt.Sprintf("%v", this.EstimatedDuration) + `,`, + `SynchronizationStatus:` + strings.Replace(this.SynchronizationStatus.String(), "NodeSynchronizationStatus", "NodeSynchronizationStatus", 1) + `,`, + `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`, + `NodeFlag:` + strings.Replace(this.NodeFlag.String(), "NodeFlag", "NodeFlag", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSynchronizationStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSynchronizationStatus{`, + `Waiting:` + fmt.Sprintf("%v", this.Waiting) + `,`, + `}`, + }, "") + return s +} +func (this *NoneStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NoneStrategy{`, + `}`, + }, "") + return s +} +func (this *OAuth2Auth) String() string { + if this == nil { + return "nil" + } + repeatedStringForEndpointParams := "[]OAuth2EndpointParam{" + for _, f := range this.EndpointParams { + repeatedStringForEndpointParams += strings.Replace(strings.Replace(f.String(), "OAuth2EndpointParam", "OAuth2EndpointParam", 1), `&`, ``, 1) + "," + } + repeatedStringForEndpointParams += "}" + s := strings.Join([]string{`&OAuth2Auth{`, + `ClientIDSecret:` + strings.Replace(fmt.Sprintf("%v", this.ClientIDSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `ClientSecretSecret:` + strings.Replace(fmt.Sprintf("%v", this.ClientSecretSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `TokenURLSecret:` + strings.Replace(fmt.Sprintf("%v", this.TokenURLSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `EndpointParams:` + repeatedStringForEndpointParams + `,`, + `}`, + }, "") + return s +} +func (this *OAuth2EndpointParam) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuth2EndpointParam{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *OSSArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OSSArtifact{`, + `OSSBucket:` + strings.Replace(strings.Replace(this.OSSBucket.String(), "OSSBucket", "OSSBucket", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `}`, + }, "") + return s +} +func (this *OSSArtifactRepository) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OSSArtifactRepository{`, + `OSSBucket:` + strings.Replace(strings.Replace(this.OSSBucket.String(), "OSSBucket", "OSSBucket", 1), `&`, ``, 1) + `,`, + `KeyFormat:` + fmt.Sprintf("%v", this.KeyFormat) + `,`, + `}`, + }, "") + return s +} +func (this *OSSBucket) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OSSBucket{`, + `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, + `Bucket:` + fmt.Sprintf("%v", this.Bucket) + `,`, + `AccessKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.AccessKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `SecretKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `CreateBucketIfNotPresent:` + fmt.Sprintf("%v", this.CreateBucketIfNotPresent) + `,`, + `SecurityToken:` + fmt.Sprintf("%v", this.SecurityToken) + `,`, + `LifecycleRule:` + strings.Replace(this.LifecycleRule.String(), "OSSLifecycleRule", "OSSLifecycleRule", 1) + `,`, + `UseSDKCreds:` + fmt.Sprintf("%v", this.UseSDKCreds) + `,`, + `}`, + }, "") + return s +} +func (this *OSSLifecycleRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OSSLifecycleRule{`, + `MarkInfrequentAccessAfterDays:` + fmt.Sprintf("%v", this.MarkInfrequentAccessAfterDays) + `,`, + `MarkDeletionAfterDays:` + fmt.Sprintf("%v", this.MarkDeletionAfterDays) + `,`, + `}`, + }, "") + return s +} +func (this *Object) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object{`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Outputs) String() string { + if this == nil { + return "nil" + } + repeatedStringForParameters := "[]Parameter{" + for _, f := range this.Parameters { + repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "Parameter", "Parameter", 1), `&`, ``, 1) + "," + } + repeatedStringForParameters += "}" + repeatedStringForArtifacts := "[]Artifact{" + for _, f := range this.Artifacts { + repeatedStringForArtifacts += strings.Replace(strings.Replace(f.String(), "Artifact", "Artifact", 1), `&`, ``, 1) + "," + } + repeatedStringForArtifacts += "}" + s := strings.Join([]string{`&Outputs{`, + `Parameters:` + repeatedStringForParameters + `,`, + `Artifacts:` + repeatedStringForArtifacts + `,`, + `Result:` + valueToStringGenerated(this.Result) + `,`, + `ExitCode:` + valueToStringGenerated(this.ExitCode) + `,`, + `}`, + }, "") + return s +} +func (this *ParallelSteps) String() string { + if this == nil { + return "nil" + } + repeatedStringForSteps := "[]WorkflowStep{" + for _, f := range this.Steps { + repeatedStringForSteps += strings.Replace(strings.Replace(f.String(), "WorkflowStep", "WorkflowStep", 1), `&`, ``, 1) + "," + } + repeatedStringForSteps += "}" + s := strings.Join([]string{`&ParallelSteps{`, + `Steps:` + repeatedStringForSteps + `,`, + `}`, + }, "") + return s +} +func (this *Parameter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Parameter{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Default:` + valueToStringGenerated(this.Default) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `ValueFrom:` + strings.Replace(this.ValueFrom.String(), "ValueFrom", "ValueFrom", 1) + `,`, + `GlobalName:` + fmt.Sprintf("%v", this.GlobalName) + `,`, + `Enum:` + fmt.Sprintf("%v", this.Enum) + `,`, + `Description:` + valueToStringGenerated(this.Description) + `,`, + `}`, + }, "") + return s +} +func (this *Plugin) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Plugin{`, + `Object:` + strings.Replace(strings.Replace(this.Object.String(), "Object", "Object", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodGC) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodGC{`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `DeleteDelayDuration:` + fmt.Sprintf("%v", this.DeleteDelayDuration) + `,`, + `}`, + }, "") + return s +} +func (this *Prometheus) String() string { + if this == nil { + return "nil" + } + repeatedStringForLabels := "[]*MetricLabel{" + for _, f := range this.Labels { + repeatedStringForLabels += strings.Replace(f.String(), "MetricLabel", "MetricLabel", 1) + "," + } + repeatedStringForLabels += "}" + s := strings.Join([]string{`&Prometheus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Labels:` + repeatedStringForLabels + `,`, + `Help:` + fmt.Sprintf("%v", this.Help) + `,`, + `When:` + fmt.Sprintf("%v", this.When) + `,`, + `Gauge:` + strings.Replace(this.Gauge.String(), "Gauge", "Gauge", 1) + `,`, + `Histogram:` + strings.Replace(this.Histogram.String(), "Histogram", "Histogram", 1) + `,`, + `Counter:` + strings.Replace(this.Counter.String(), "Counter", "Counter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RawArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RawArtifact{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceTemplate{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `MergeStrategy:` + fmt.Sprintf("%v", this.MergeStrategy) + `,`, + `Manifest:` + fmt.Sprintf("%v", this.Manifest) + `,`, + `SetOwnerReference:` + fmt.Sprintf("%v", this.SetOwnerReference) + `,`, + `SuccessCondition:` + fmt.Sprintf("%v", this.SuccessCondition) + `,`, + `FailureCondition:` + fmt.Sprintf("%v", this.FailureCondition) + `,`, + `Flags:` + fmt.Sprintf("%v", this.Flags) + `,`, + `ManifestFrom:` + strings.Replace(this.ManifestFrom.String(), "ManifestFrom", "ManifestFrom", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RetryAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RetryAffinity{`, + `NodeAntiAffinity:` + strings.Replace(this.NodeAntiAffinity.String(), "RetryNodeAntiAffinity", "RetryNodeAntiAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RetryNodeAntiAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RetryNodeAntiAffinity{`, + `}`, + }, "") + return s +} +func (this *RetryStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RetryStrategy{`, + `Limit:` + strings.Replace(fmt.Sprintf("%v", this.Limit), "IntOrString", "intstr.IntOrString", 1) + `,`, + `RetryPolicy:` + fmt.Sprintf("%v", this.RetryPolicy) + `,`, + `Backoff:` + strings.Replace(this.Backoff.String(), "Backoff", "Backoff", 1) + `,`, + `Affinity:` + strings.Replace(this.Affinity.String(), "RetryAffinity", "RetryAffinity", 1) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *S3Artifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&S3Artifact{`, + `S3Bucket:` + strings.Replace(strings.Replace(this.S3Bucket.String(), "S3Bucket", "S3Bucket", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `}`, + }, "") + return s +} +func (this *S3ArtifactRepository) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&S3ArtifactRepository{`, + `S3Bucket:` + strings.Replace(strings.Replace(this.S3Bucket.String(), "S3Bucket", "S3Bucket", 1), `&`, ``, 1) + `,`, + `KeyFormat:` + fmt.Sprintf("%v", this.KeyFormat) + `,`, + `KeyPrefix:` + fmt.Sprintf("%v", this.KeyPrefix) + `,`, + `}`, + }, "") + return s +} +func (this *S3Bucket) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&S3Bucket{`, + `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, + `Bucket:` + fmt.Sprintf("%v", this.Bucket) + `,`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `Insecure:` + valueToStringGenerated(this.Insecure) + `,`, + `AccessKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.AccessKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `SecretKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `RoleARN:` + fmt.Sprintf("%v", this.RoleARN) + `,`, + `UseSDKCreds:` + fmt.Sprintf("%v", this.UseSDKCreds) + `,`, + `CreateBucketIfNotPresent:` + strings.Replace(this.CreateBucketIfNotPresent.String(), "CreateS3BucketOptions", "CreateS3BucketOptions", 1) + `,`, + `EncryptionOptions:` + strings.Replace(this.EncryptionOptions.String(), "S3EncryptionOptions", "S3EncryptionOptions", 1) + `,`, + `CASecret:` + strings.Replace(fmt.Sprintf("%v", this.CASecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `SessionTokenSecret:` + strings.Replace(fmt.Sprintf("%v", this.SessionTokenSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *S3EncryptionOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&S3EncryptionOptions{`, + `KmsKeyId:` + fmt.Sprintf("%v", this.KmsKeyId) + `,`, + `KmsEncryptionContext:` + fmt.Sprintf("%v", this.KmsEncryptionContext) + `,`, + `EnableEncryption:` + fmt.Sprintf("%v", this.EnableEncryption) + `,`, + `ServerSideCustomerKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.ServerSideCustomerKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScriptTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScriptTemplate{`, + `Container:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1), `&`, ``, 1) + `,`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `}`, + }, "") + return s +} +func (this *SemaphoreHolding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SemaphoreHolding{`, + `Semaphore:` + fmt.Sprintf("%v", this.Semaphore) + `,`, + `Holders:` + fmt.Sprintf("%v", this.Holders) + `,`, + `}`, + }, "") + return s +} +func (this *SemaphoreRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SemaphoreRef{`, + `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *SemaphoreStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForHolding := "[]SemaphoreHolding{" + for _, f := range this.Holding { + repeatedStringForHolding += strings.Replace(strings.Replace(f.String(), "SemaphoreHolding", "SemaphoreHolding", 1), `&`, ``, 1) + "," + } + repeatedStringForHolding += "}" + repeatedStringForWaiting := "[]SemaphoreHolding{" + for _, f := range this.Waiting { + repeatedStringForWaiting += strings.Replace(strings.Replace(f.String(), "SemaphoreHolding", "SemaphoreHolding", 1), `&`, ``, 1) + "," + } + repeatedStringForWaiting += "}" + s := strings.Join([]string{`&SemaphoreStatus{`, + `Holding:` + repeatedStringForHolding + `,`, + `Waiting:` + repeatedStringForWaiting + `,`, + `}`, + }, "") + return s +} +func (this *Sequence) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sequence{`, + `Count:` + strings.Replace(fmt.Sprintf("%v", this.Count), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Start:` + strings.Replace(fmt.Sprintf("%v", this.Start), "IntOrString", "intstr.IntOrString", 1) + `,`, + `End:` + strings.Replace(fmt.Sprintf("%v", this.End), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `}`, + }, "") + return s +} +func (this *StopStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StopStrategy{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *Submit) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Submit{`, + `WorkflowTemplateRef:` + strings.Replace(strings.Replace(this.WorkflowTemplateRef.String(), "WorkflowTemplateRef", "WorkflowTemplateRef", 1), `&`, ``, 1) + `,`, + `Arguments:` + strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubmitOpts) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubmitOpts{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, + `Entrypoint:` + fmt.Sprintf("%v", this.Entrypoint) + `,`, + `Parameters:` + fmt.Sprintf("%v", this.Parameters) + `,`, + `ServiceAccount:` + fmt.Sprintf("%v", this.ServiceAccount) + `,`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `ServerDryRun:` + fmt.Sprintf("%v", this.ServerDryRun) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `OwnerReference:` + strings.Replace(fmt.Sprintf("%v", this.OwnerReference), "OwnerReference", "v11.OwnerReference", 1) + `,`, + `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, + `PodPriorityClassName:` + fmt.Sprintf("%v", this.PodPriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `}`, + }, "") + return s +} +func (this *SuppliedValueFrom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SuppliedValueFrom{`, + `}`, + }, "") + return s +} +func (this *SuspendTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SuspendTemplate{`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `}`, + }, "") + return s +} +func (this *Synchronization) String() string { + if this == nil { + return "nil" + } + repeatedStringForSemaphores := "[]*SemaphoreRef{" + for _, f := range this.Semaphores { + repeatedStringForSemaphores += strings.Replace(f.String(), "SemaphoreRef", "SemaphoreRef", 1) + "," + } + repeatedStringForSemaphores += "}" + repeatedStringForMutexes := "[]*Mutex{" + for _, f := range this.Mutexes { + repeatedStringForMutexes += strings.Replace(f.String(), "Mutex", "Mutex", 1) + "," + } + repeatedStringForMutexes += "}" + s := strings.Join([]string{`&Synchronization{`, + `Semaphore:` + strings.Replace(this.Semaphore.String(), "SemaphoreRef", "SemaphoreRef", 1) + `,`, + `Mutex:` + strings.Replace(this.Mutex.String(), "Mutex", "Mutex", 1) + `,`, + `Semaphores:` + repeatedStringForSemaphores + `,`, + `Mutexes:` + repeatedStringForMutexes + `,`, + `}`, + }, "") + return s +} +func (this *SynchronizationStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SynchronizationStatus{`, + `Semaphore:` + strings.Replace(this.Semaphore.String(), "SemaphoreStatus", "SemaphoreStatus", 1) + `,`, + `Mutex:` + strings.Replace(this.Mutex.String(), "MutexStatus", "MutexStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TTLStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TTLStrategy{`, + `SecondsAfterCompletion:` + valueToStringGenerated(this.SecondsAfterCompletion) + `,`, + `SecondsAfterSuccess:` + valueToStringGenerated(this.SecondsAfterSuccess) + `,`, + `SecondsAfterFailure:` + valueToStringGenerated(this.SecondsAfterFailure) + `,`, + `}`, + }, "") + return s +} +func (this *TarStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TarStrategy{`, + `CompressionLevel:` + valueToStringGenerated(this.CompressionLevel) + `,`, + `}`, + }, "") + return s +} +func (this *Template) String() string { + if this == nil { + return "nil" + } + repeatedStringForSteps := "[]ParallelSteps{" + for _, f := range this.Steps { + repeatedStringForSteps += strings.Replace(strings.Replace(f.String(), "ParallelSteps", "ParallelSteps", 1), `&`, ``, 1) + "," + } + repeatedStringForSteps += "}" + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForInitContainers := "[]UserContainer{" + for _, f := range this.InitContainers { + repeatedStringForInitContainers += strings.Replace(strings.Replace(f.String(), "UserContainer", "UserContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainers += "}" + repeatedStringForSidecars := "[]UserContainer{" + for _, f := range this.Sidecars { + repeatedStringForSidecars += strings.Replace(strings.Replace(f.String(), "UserContainer", "UserContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForSidecars += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForHostAliases := "[]HostAlias{" + for _, f := range this.HostAliases { + repeatedStringForHostAliases += fmt.Sprintf("%v", f) + "," + } + repeatedStringForHostAliases += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Template{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Inputs:` + strings.Replace(strings.Replace(this.Inputs.String(), "Inputs", "Inputs", 1), `&`, ``, 1) + `,`, + `Outputs:` + strings.Replace(strings.Replace(this.Outputs.String(), "Outputs", "Outputs", 1), `&`, ``, 1) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, + `Metadata:` + strings.Replace(strings.Replace(this.Metadata.String(), "Metadata", "Metadata", 1), `&`, ``, 1) + `,`, + `Daemon:` + valueToStringGenerated(this.Daemon) + `,`, + `Steps:` + repeatedStringForSteps + `,`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1) + `,`, + `Script:` + strings.Replace(this.Script.String(), "ScriptTemplate", "ScriptTemplate", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceTemplate", "ResourceTemplate", 1) + `,`, + `DAG:` + strings.Replace(this.DAG.String(), "DAGTemplate", "DAGTemplate", 1) + `,`, + `Suspend:` + strings.Replace(this.Suspend.String(), "SuspendTemplate", "SuspendTemplate", 1) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `InitContainers:` + repeatedStringForInitContainers + `,`, + `Sidecars:` + repeatedStringForSidecars + `,`, + `ArchiveLocation:` + strings.Replace(this.ArchiveLocation.String(), "ArtifactLocation", "ArtifactLocation", 1) + `,`, + `ActiveDeadlineSeconds:` + strings.Replace(fmt.Sprintf("%v", this.ActiveDeadlineSeconds), "IntOrString", "intstr.IntOrString", 1) + `,`, + `RetryStrategy:` + strings.Replace(this.RetryStrategy.String(), "RetryStrategy", "RetryStrategy", 1) + `,`, + `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `HostAliases:` + repeatedStringForHostAliases + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, + `PodSpecPatch:` + fmt.Sprintf("%v", this.PodSpecPatch) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Executor:` + strings.Replace(this.Executor.String(), "ExecutorConfig", "ExecutorConfig", 1) + `,`, + `Metrics:` + strings.Replace(this.Metrics.String(), "Metrics", "Metrics", 1) + `,`, + `Synchronization:` + strings.Replace(this.Synchronization.String(), "Synchronization", "Synchronization", 1) + `,`, + `Memoize:` + strings.Replace(this.Memoize.String(), "Memoize", "Memoize", 1) + `,`, + `Timeout:` + fmt.Sprintf("%v", this.Timeout) + `,`, + `Data:` + strings.Replace(this.Data.String(), "Data", "Data", 1) + `,`, + `ContainerSet:` + strings.Replace(this.ContainerSet.String(), "ContainerSetTemplate", "ContainerSetTemplate", 1) + `,`, + `FailFast:` + valueToStringGenerated(this.FailFast) + `,`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTP", "HTTP", 1) + `,`, + `Plugin:` + strings.Replace(this.Plugin.String(), "Plugin", "Plugin", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Template:` + fmt.Sprintf("%v", this.Template) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `}`, + }, "") + return s +} +func (this *TransformationStep) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TransformationStep{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *UserContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserContainer{`, + `Container:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1), `&`, ``, 1) + `,`, + `MirrorVolumeMounts:` + valueToStringGenerated(this.MirrorVolumeMounts) + `,`, + `}`, + }, "") + return s +} +func (this *ValueFrom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValueFrom{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, + `JQFilter:` + fmt.Sprintf("%v", this.JQFilter) + `,`, + `Parameter:` + fmt.Sprintf("%v", this.Parameter) + `,`, + `Default:` + valueToStringGenerated(this.Default) + `,`, + `Supplied:` + strings.Replace(this.Supplied.String(), "SuppliedValueFrom", "SuppliedValueFrom", 1) + `,`, + `Event:` + fmt.Sprintf("%v", this.Event) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Version) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Version{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `BuildDate:` + fmt.Sprintf("%v", this.BuildDate) + `,`, + `GitCommit:` + fmt.Sprintf("%v", this.GitCommit) + `,`, + `GitTag:` + fmt.Sprintf("%v", this.GitTag) + `,`, + `GitTreeState:` + fmt.Sprintf("%v", this.GitTreeState) + `,`, + `GoVersion:` + fmt.Sprintf("%v", this.GoVersion) + `,`, + `Compiler:` + fmt.Sprintf("%v", this.Compiler) + `,`, + `Platform:` + fmt.Sprintf("%v", this.Platform) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeClaimGC) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeClaimGC{`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `}`, + }, "") + return s +} +func (this *Workflow) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Workflow{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowSpec", "WorkflowSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "WorkflowStatus", "WorkflowStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowArtifactGCTask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowArtifactGCTask{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ArtifactGCSpec", "ArtifactGCSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ArtifactGCStatus", "ArtifactGCStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowArtifactGCTaskList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]WorkflowArtifactGCTask{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "WorkflowArtifactGCTask", "WorkflowArtifactGCTask", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&WorkflowArtifactGCTaskList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowEventBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowEventBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowEventBindingSpec", "WorkflowEventBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowEventBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]WorkflowEventBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "WorkflowEventBinding", "WorkflowEventBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&WorkflowEventBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowEventBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowEventBindingSpec{`, + `Event:` + strings.Replace(strings.Replace(this.Event.String(), "Event", "Event", 1), `&`, ``, 1) + `,`, + `Submit:` + strings.Replace(this.Submit.String(), "Submit", "Submit", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowLevelArtifactGC) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowLevelArtifactGC{`, + `ArtifactGC:` + strings.Replace(strings.Replace(this.ArtifactGC.String(), "ArtifactGC", "ArtifactGC", 1), `&`, ``, 1) + `,`, + `ForceFinalizerRemoval:` + fmt.Sprintf("%v", this.ForceFinalizerRemoval) + `,`, + `PodSpecPatch:` + fmt.Sprintf("%v", this.PodSpecPatch) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Workflow{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Workflow", "Workflow", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&WorkflowList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowMetadata) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + keysForLabelsFrom := make([]string, 0, len(this.LabelsFrom)) + for k := range this.LabelsFrom { + keysForLabelsFrom = append(keysForLabelsFrom, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabelsFrom) + mapStringForLabelsFrom := "map[string]LabelValueFrom{" + for _, k := range keysForLabelsFrom { + mapStringForLabelsFrom += fmt.Sprintf("%v: %v,", k, this.LabelsFrom[k]) + } + mapStringForLabelsFrom += "}" + s := strings.Join([]string{`&WorkflowMetadata{`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `LabelsFrom:` + mapStringForLabelsFrom + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTemplates := "[]Template{" + for _, f := range this.Templates { + repeatedStringForTemplates += strings.Replace(strings.Replace(f.String(), "Template", "Template", 1), `&`, ``, 1) + "," + } + repeatedStringForTemplates += "}" + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + } + repeatedStringForImagePullSecrets += "}" + repeatedStringForHostAliases := "[]HostAlias{" + for _, f := range this.HostAliases { + repeatedStringForHostAliases += fmt.Sprintf("%v", f) + "," + } + repeatedStringForHostAliases += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + keysForHooks := make([]string, 0, len(this.Hooks)) + for k := range this.Hooks { + keysForHooks = append(keysForHooks, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) + mapStringForHooks := "LifecycleHooks{" + for _, k := range keysForHooks { + mapStringForHooks += fmt.Sprintf("%v: %v,", k, this.Hooks[LifecycleEvent(k)]) + } + mapStringForHooks += "}" + s := strings.Join([]string{`&WorkflowSpec{`, + `Templates:` + repeatedStringForTemplates + `,`, + `Entrypoint:` + fmt.Sprintf("%v", this.Entrypoint) + `,`, + `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, + `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, + `ArtifactRepositoryRef:` + strings.Replace(fmt.Sprintf("%v", this.ArtifactRepositoryRef), "ArtifactRepositoryRef", "ArtifactRepositoryRef", 1) + `,`, + `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `HostNetwork:` + valueToStringGenerated(this.HostNetwork) + `,`, + `DNSPolicy:` + valueToStringGenerated(this.DNSPolicy) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "v1.PodDNSConfig", 1) + `,`, + `OnExit:` + fmt.Sprintf("%v", this.OnExit) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `PodGC:` + strings.Replace(this.PodGC.String(), "PodGC", "PodGC", 1) + `,`, + `PodPriorityClassName:` + fmt.Sprintf("%v", this.PodPriorityClassName) + `,`, + `PodPriority:` + valueToStringGenerated(this.PodPriority) + `,`, + `HostAliases:` + repeatedStringForHostAliases + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, + `PodSpecPatch:` + fmt.Sprintf("%v", this.PodSpecPatch) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Executor:` + strings.Replace(this.Executor.String(), "ExecutorConfig", "ExecutorConfig", 1) + `,`, + `TTLStrategy:` + strings.Replace(this.TTLStrategy.String(), "TTLStrategy", "TTLStrategy", 1) + `,`, + `PodDisruptionBudget:` + strings.Replace(fmt.Sprintf("%v", this.PodDisruptionBudget), "PodDisruptionBudgetSpec", "v12.PodDisruptionBudgetSpec", 1) + `,`, + `Metrics:` + strings.Replace(this.Metrics.String(), "Metrics", "Metrics", 1) + `,`, + `Shutdown:` + fmt.Sprintf("%v", this.Shutdown) + `,`, + `WorkflowTemplateRef:` + strings.Replace(this.WorkflowTemplateRef.String(), "WorkflowTemplateRef", "WorkflowTemplateRef", 1) + `,`, + `Synchronization:` + strings.Replace(this.Synchronization.String(), "Synchronization", "Synchronization", 1) + `,`, + `VolumeClaimGC:` + strings.Replace(this.VolumeClaimGC.String(), "VolumeClaimGC", "VolumeClaimGC", 1) + `,`, + `RetryStrategy:` + strings.Replace(this.RetryStrategy.String(), "RetryStrategy", "RetryStrategy", 1) + `,`, + `PodMetadata:` + strings.Replace(this.PodMetadata.String(), "Metadata", "Metadata", 1) + `,`, + `TemplateDefaults:` + strings.Replace(this.TemplateDefaults.String(), "Template", "Template", 1) + `,`, + `ArchiveLogs:` + valueToStringGenerated(this.ArchiveLogs) + `,`, + `Hooks:` + mapStringForHooks + `,`, + `WorkflowMetadata:` + strings.Replace(this.WorkflowMetadata.String(), "WorkflowMetadata", "WorkflowMetadata", 1) + `,`, + `ArtifactGC:` + strings.Replace(this.ArtifactGC.String(), "WorkflowLevelArtifactGC", "WorkflowLevelArtifactGC", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForPersistentVolumeClaims := "[]Volume{" + for _, f := range this.PersistentVolumeClaims { + repeatedStringForPersistentVolumeClaims += fmt.Sprintf("%v", f) + "," + } + repeatedStringForPersistentVolumeClaims += "}" + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + keysForNodes := make([]string, 0, len(this.Nodes)) + for k := range this.Nodes { + keysForNodes = append(keysForNodes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodes) + mapStringForNodes := "Nodes{" + for _, k := range keysForNodes { + mapStringForNodes += fmt.Sprintf("%v: %v,", k, this.Nodes[k]) + } + mapStringForNodes += "}" + keysForStoredTemplates := make([]string, 0, len(this.StoredTemplates)) + for k := range this.StoredTemplates { + keysForStoredTemplates = append(keysForStoredTemplates, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStoredTemplates) + mapStringForStoredTemplates := "map[string]Template{" + for _, k := range keysForStoredTemplates { + mapStringForStoredTemplates += fmt.Sprintf("%v: %v,", k, this.StoredTemplates[k]) + } + mapStringForStoredTemplates += "}" + keysForResourcesDuration := make([]string, 0, len(this.ResourcesDuration)) + for k := range this.ResourcesDuration { + keysForResourcesDuration = append(keysForResourcesDuration, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForResourcesDuration) + mapStringForResourcesDuration := "ResourcesDuration{" + for _, k := range keysForResourcesDuration { + mapStringForResourcesDuration += fmt.Sprintf("%v: %v,", k, this.ResourcesDuration[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForResourcesDuration += "}" + keysForTaskResultsCompletionStatus := make([]string, 0, len(this.TaskResultsCompletionStatus)) + for k := range this.TaskResultsCompletionStatus { + keysForTaskResultsCompletionStatus = append(keysForTaskResultsCompletionStatus, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTaskResultsCompletionStatus) + mapStringForTaskResultsCompletionStatus := "map[string]bool{" + for _, k := range keysForTaskResultsCompletionStatus { + mapStringForTaskResultsCompletionStatus += fmt.Sprintf("%v: %v,", k, this.TaskResultsCompletionStatus[k]) + } + mapStringForTaskResultsCompletionStatus += "}" + s := strings.Join([]string{`&WorkflowStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `CompressedNodes:` + fmt.Sprintf("%v", this.CompressedNodes) + `,`, + `Nodes:` + mapStringForNodes + `,`, + `PersistentVolumeClaims:` + repeatedStringForPersistentVolumeClaims + `,`, + `Outputs:` + strings.Replace(this.Outputs.String(), "Outputs", "Outputs", 1) + `,`, + `StoredTemplates:` + mapStringForStoredTemplates + `,`, + `OffloadNodeStatusVersion:` + fmt.Sprintf("%v", this.OffloadNodeStatusVersion) + `,`, + `ResourcesDuration:` + mapStringForResourcesDuration + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `StoredWorkflowSpec:` + strings.Replace(this.StoredWorkflowSpec.String(), "WorkflowSpec", "WorkflowSpec", 1) + `,`, + `Synchronization:` + strings.Replace(this.Synchronization.String(), "SynchronizationStatus", "SynchronizationStatus", 1) + `,`, + `EstimatedDuration:` + fmt.Sprintf("%v", this.EstimatedDuration) + `,`, + `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`, + `ArtifactRepositoryRef:` + strings.Replace(fmt.Sprintf("%v", this.ArtifactRepositoryRef), "ArtifactRepositoryRefStatus", "ArtifactRepositoryRefStatus", 1) + `,`, + `ArtifactGCStatus:` + strings.Replace(this.ArtifactGCStatus.String(), "ArtGCStatus", "ArtGCStatus", 1) + `,`, + `TaskResultsCompletionStatus:` + mapStringForTaskResultsCompletionStatus + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowStep) String() string { + if this == nil { + return "nil" + } + repeatedStringForWithItems := "[]Item{" + for _, f := range this.WithItems { + repeatedStringForWithItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForWithItems += "}" + keysForHooks := make([]string, 0, len(this.Hooks)) + for k := range this.Hooks { + keysForHooks = append(keysForHooks, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) + mapStringForHooks := "LifecycleHooks{" + for _, k := range keysForHooks { + mapStringForHooks += fmt.Sprintf("%v: %v,", k, this.Hooks[LifecycleEvent(k)]) + } + mapStringForHooks += "}" + s := strings.Join([]string{`&WorkflowStep{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Template:` + fmt.Sprintf("%v", this.Template) + `,`, + `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, + `TemplateRef:` + strings.Replace(this.TemplateRef.String(), "TemplateRef", "TemplateRef", 1) + `,`, + `WithItems:` + repeatedStringForWithItems + `,`, + `WithParam:` + fmt.Sprintf("%v", this.WithParam) + `,`, + `WithSequence:` + strings.Replace(this.WithSequence.String(), "Sequence", "Sequence", 1) + `,`, + `When:` + fmt.Sprintf("%v", this.When) + `,`, + `ContinueOn:` + strings.Replace(this.ContinueOn.String(), "ContinueOn", "ContinueOn", 1) + `,`, + `OnExit:` + fmt.Sprintf("%v", this.OnExit) + `,`, + `Hooks:` + mapStringForHooks + `,`, + `Inline:` + strings.Replace(this.Inline.String(), "Template", "Template", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTaskResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowTaskResult{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `NodeResult:` + strings.Replace(strings.Replace(this.NodeResult.String(), "NodeResult", "NodeResult", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTaskResultList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]WorkflowTaskResult{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "WorkflowTaskResult", "WorkflowTaskResult", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&WorkflowTaskResultList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTaskSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowTaskSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowTaskSetSpec", "WorkflowTaskSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "WorkflowTaskSetStatus", "WorkflowTaskSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTaskSetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]WorkflowTaskSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "WorkflowTaskSet", "WorkflowTaskSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&WorkflowTaskSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTaskSetSpec) String() string { + if this == nil { + return "nil" + } + keysForTasks := make([]string, 0, len(this.Tasks)) + for k := range this.Tasks { + keysForTasks = append(keysForTasks, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTasks) + mapStringForTasks := "map[string]Template{" + for _, k := range keysForTasks { + mapStringForTasks += fmt.Sprintf("%v: %v,", k, this.Tasks[k]) + } + mapStringForTasks += "}" + s := strings.Join([]string{`&WorkflowTaskSetSpec{`, + `Tasks:` + mapStringForTasks + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTaskSetStatus) String() string { + if this == nil { + return "nil" + } + keysForNodes := make([]string, 0, len(this.Nodes)) + for k := range this.Nodes { + keysForNodes = append(keysForNodes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodes) + mapStringForNodes := "map[string]NodeResult{" + for _, k := range keysForNodes { + mapStringForNodes += fmt.Sprintf("%v: %v,", k, this.Nodes[k]) + } + mapStringForNodes += "}" + s := strings.Join([]string{`&WorkflowTaskSetStatus{`, + `Nodes:` + mapStringForNodes + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowSpec", "WorkflowSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]WorkflowTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "WorkflowTemplate", "WorkflowTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&WorkflowTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTemplateRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowTemplateRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `}`, + }, "") + return s +} +func (this *ZipStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ZipStrategy{`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Amount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Amount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Amount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = encoding_json.Number(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArchiveStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArchiveStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArchiveStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tar == nil { + m.Tar = &TarStrategy{} + } + if err := m.Tar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field None", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.None == nil { + m.None = &NoneStrategy{} + } + if err := m.None.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zip", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Zip == nil { + m.Zip = &ZipStrategy{} + } + if err := m.Zip.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Arguments) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Arguments: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Arguments: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, Parameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifacts = append(m.Artifacts, Artifact{}) + if err := m.Artifacts[len(m.Artifacts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtGCStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtGCStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtGCStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrategiesProcessed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StrategiesProcessed == nil { + m.StrategiesProcessed = make(map[ArtifactGCStrategy]bool) + } + var mapkey ArtifactGCStrategy + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ArtifactGCStrategy(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.StrategiesProcessed[ArtifactGCStrategy(mapkey)] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodsRecouped", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodsRecouped == nil { + m.PodsRecouped = make(map[string]bool) + } + var mapkey string + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodsRecouped[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NotSpecified", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NotSpecified = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Artifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Artifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Artifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactLocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ArtifactLocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GlobalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Archive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Archive == nil { + m.Archive = &ArchiveStrategy{} + } + if err := m.Archive.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Optional = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RecurseMode", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RecurseMode = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactGC == nil { + m.ArtifactGC = &ArtifactGC{} + } + if err := m.ArtifactGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Deleted = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactGC) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactGC: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactGC: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = ArtifactGCStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodMetadata == nil { + m.PodMetadata = &Metadata{} + } + if err := m.PodMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactGCSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactGCSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactGCSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactsByNode", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactsByNode == nil { + m.ArtifactsByNode = make(map[string]ArtifactNodeSpec) + } + var mapkey string + mapvalue := &ArtifactNodeSpec{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ArtifactNodeSpec{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ArtifactsByNode[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactGCStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactGCStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactGCStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactResultsByNode", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactResultsByNode == nil { + m.ArtifactResultsByNode = make(map[string]ArtifactResultNodeStatus) + } + var mapkey string + mapvalue := &ArtifactResultNodeStatus{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ArtifactResultNodeStatus{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ArtifactResultsByNode[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactLocation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactLocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactLocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLogs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ArchiveLogs = &b + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field S3", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.S3 == nil { + m.S3 = &S3Artifact{} + } + if err := m.S3.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitArtifact{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HTTPArtifact{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifactory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Artifactory == nil { + m.Artifactory = &ArtifactoryArtifact{} + } + if err := m.Artifactory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HDFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HDFS == nil { + m.HDFS = &HDFSArtifact{} + } + if err := m.HDFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Raw == nil { + m.Raw = &RawArtifact{} + } + if err := m.Raw.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OSS == nil { + m.OSS = &OSSArtifact{} + } + if err := m.OSS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCS == nil { + m.GCS = &GCSArtifact{} + } + if err := m.GCS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Azure", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Azure == nil { + m.Azure = &AzureArtifact{} + } + if err := m.Azure.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactNodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactNodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactNodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArchiveLocation == nil { + m.ArchiveLocation = &ArtifactLocation{} + } + if err := m.ArchiveLocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Artifacts == nil { + m.Artifacts = make(map[string]Artifact) + } + var mapkey string + mapvalue := &Artifact{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Artifact{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Artifacts[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactPaths) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactPaths: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactPaths: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifact", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Artifact.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactRepository) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactRepository: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLogs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ArchiveLogs = &b + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field S3", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.S3 == nil { + m.S3 = &S3ArtifactRepository{} + } + if err := m.S3.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifactory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Artifactory == nil { + m.Artifactory = &ArtifactoryArtifactRepository{} + } + if err := m.Artifactory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HDFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HDFS == nil { + m.HDFS = &HDFSArtifactRepository{} + } + if err := m.HDFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OSS == nil { + m.OSS = &OSSArtifactRepository{} + } + if err := m.OSS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCS == nil { + m.GCS = &GCSArtifactRepository{} + } + if err := m.GCS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Azure", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Azure == nil { + m.Azure = &AzureArtifactRepository{} + } + if err := m.Azure.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactRepositoryRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactRepositoryRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactRepositoryRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigMap = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactRepositoryRefStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactRepositoryRefStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactRepositoryRefStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRepositoryRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ArtifactRepositoryRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Default = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRepository", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactRepository == nil { + m.ArtifactRepository = &ArtifactRepository{} + } + if err := m.ArtifactRepository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Success = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactResultNodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactResultNodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactResultNodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactResults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactResults == nil { + m.ArtifactResults = make(map[string]ArtifactResult) + } + var mapkey string + mapvalue := &ArtifactResult{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ArtifactResult{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ArtifactResults[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactSearchQuery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactSearchQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactSearchQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGCStrategies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactGCStrategies == nil { + m.ArtifactGCStrategies = make(map[ArtifactGCStrategy]bool) + } + var mapkey ArtifactGCStrategy + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ArtifactGCStrategy(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ArtifactGCStrategies[ArtifactGCStrategy(mapkey)] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ArtifactName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Deleted = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeTypes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeTypes == nil { + m.NodeTypes = make(map[NodeType]bool) + } + var mapkey NodeType + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = NodeType(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeTypes[NodeType(mapkey)] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactSearchResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactSearchResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactSearchResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifact", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Artifact.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactoryArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactoryArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactoryArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactoryAuth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ArtifactoryAuth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactoryArtifactRepository) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactoryArtifactRepository: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactoryArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactoryAuth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ArtifactoryAuth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RepoURL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RepoURL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyFormat", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyFormat = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactoryAuth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactoryAuth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactoryAuth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsernameSecret == nil { + m.UsernameSecret = &v1.SecretKeySelector{} + } + if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PasswordSecret == nil { + m.PasswordSecret = &v1.SecretKeySelector{} + } + if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureBlobContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AzureBlobContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blob = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureArtifactRepository) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureArtifactRepository: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureBlobContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AzureBlobContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlobNameFormat", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlobNameFormat = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureBlobContainer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureBlobContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureBlobContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccountKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AccountKeySecret == nil { + m.AccountKeySecret = &v1.SecretKeySelector{} + } + if err := m.AccountKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseSDKCreds", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UseSDKCreds = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backoff) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Backoff: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Backoff: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Duration = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Factor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Factor == nil { + m.Factor = &intstr.IntOrString{} + } + if err := m.Factor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxDuration", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MaxDuration = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BasicAuth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BasicAuth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BasicAuth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsernameSecret == nil { + m.UsernameSecret = &v1.SecretKeySelector{} + } + if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PasswordSecret == nil { + m.PasswordSecret = &v1.SecretKeySelector{} + } + if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Cache) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Cache: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Cache: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &v1.ConfigMapKeySelector{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientCertAuth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientCertAuth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientCertAuth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCertSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientCertSecret == nil { + m.ClientCertSecret = &v1.SecretKeySelector{} + } + if err := m.ClientCertSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientKeySecret == nil { + m.ClientKeySecret = &v1.SecretKeySelector{} + } + if err := m.ClientKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflowTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflowTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterWorkflowTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Column) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Column: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Column: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Condition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Condition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Condition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_apimachinery_pkg_apis_meta_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerNode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerNode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerNode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dependencies = append(m.Dependencies, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSetRetryStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerSetRetryStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerSetRetryStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Duration = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Retries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Retries == nil { + m.Retries = &intstr.IntOrString{} + } + if err := m.Retries.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSetTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerSetTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerSetTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, v1.VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, ContainerNode{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RetryStrategy == nil { + m.RetryStrategy = &ContainerSetRetryStrategy{} + } + if err := m.RetryStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContinueOn) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContinueOn: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContinueOn: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Error = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Failed = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Counter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Counter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateS3BucketOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateS3BucketOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateS3BucketOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectLocking", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ObjectLocking = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronWorkflow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronWorkflow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronWorkflow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronWorkflowList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronWorkflowList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronWorkflowList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CronWorkflow{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronWorkflowSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronWorkflowSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronWorkflowSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.WorkflowSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Suspend = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StartingDeadlineSeconds = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulJobsHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedJobsHistoryLimit = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timezone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timezone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkflowMetadata == nil { + m.WorkflowMetadata = &v11.ObjectMeta{} + } + if err := m.WorkflowMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StopStrategy == nil { + m.StopStrategy = &StopStrategy{} + } + if err := m.StopStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedules", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedules = append(m.Schedules, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field When", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.When = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronWorkflowStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronWorkflowStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronWorkflowStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Active = append(m.Active, v1.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScheduledTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScheduledTime == nil { + m.LastScheduledTime = &v11.Time{} + } + if err := m.LastScheduledTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + m.Succeeded = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Succeeded |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = CronWorkflowPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DAGTask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DAGTask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DAGTask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Template = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TemplateRef == nil { + m.TemplateRef = &TemplateRef{} + } + if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dependencies = append(m.Dependencies, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WithItems = append(m.WithItems, Item{}) + if err := m.WithItems[len(m.WithItems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithParam", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WithParam = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithSequence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WithSequence == nil { + m.WithSequence = &Sequence{} + } + if err := m.WithSequence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field When", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.When = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContinueOn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContinueOn == nil { + m.ContinueOn = &ContinueOn{} + } + if err := m.ContinueOn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnExit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OnExit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Depends", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Depends = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hooks == nil { + m.Hooks = make(LifecycleHooks) + } + var mapkey LifecycleEvent + mapvalue := &LifecycleHook{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = LifecycleEvent(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &LifecycleHook{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Hooks[LifecycleEvent(mapkey)] = *mapvalue + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inline", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Inline == nil { + m.Inline = &Template{} + } + if err := m.Inline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DAGTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DAGTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DAGTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, DAGTask{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailFast", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.FailFast = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Data) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Data: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Transformation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Transformation = append(m.Transformation, TransformationStep{}) + if err := m.Transformation[len(m.Transformation)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DataSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DataSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DataSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactPaths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactPaths == nil { + m.ArtifactPaths = &ArtifactPaths{} + } + if err := m.ArtifactPaths.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecutorConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecutorConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecutorConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCSArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCSArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCSArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCSBucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GCSBucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCSArtifactRepository) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCSArtifactRepository: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCSArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCSBucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GCSBucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyFormat", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyFormat = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCSBucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCSBucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCSBucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bucket = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccountKeySecret == nil { + m.ServiceAccountKeySecret = &v1.SecretKeySelector{} + } + if err := m.ServiceAccountKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Gauge) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Gauge: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Realtime", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Realtime = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = GaugeOperation(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Depth", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Depth = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fetch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fetch = append(m.Fetch, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsernameSecret == nil { + m.UsernameSecret = &v1.SecretKeySelector{} + } + if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PasswordSecret == nil { + m.PasswordSecret = &v1.SecretKeySelector{} + } + if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SSHPrivateKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SSHPrivateKeySecret == nil { + m.SSHPrivateKeySecret = &v1.SecretKeySelector{} + } + if err := m.SSHPrivateKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureIgnoreHostKey", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureIgnoreHostKey = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableSubmodules", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DisableSubmodules = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SingleBranch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SingleBranch = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Branch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Branch = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureSkipTLS = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HDFSArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HDFSArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HDFSArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HDFSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.HDFSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HDFSArtifactRepository) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HDFSArtifactRepository: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HDFSArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HDFSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.HDFSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathFormat", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PathFormat = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HDFSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HDFSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HDFSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HDFSKrbConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.HDFSKrbConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HDFSUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HDFSUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataTransferProtection", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataTransferProtection = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HDFSKrbConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HDFSKrbConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HDFSKrbConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbCCacheSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KrbCCacheSecret == nil { + m.KrbCCacheSecret = &v1.SecretKeySelector{} + } + if err := m.KrbCCacheSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbKeytabSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KrbKeytabSecret == nil { + m.KrbKeytabSecret = &v1.SecretKeySelector{} + } + if err := m.KrbKeytabSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KrbUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbRealm", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KrbRealm = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbConfigConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KrbConfigConfigMap == nil { + m.KrbConfigConfigMap = &v1.ConfigMapKeySelector{} + } + if err := m.KrbConfigConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbServicePrincipalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KrbServicePrincipalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Method = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Headers = append(m.Headers, HTTPHeader{}) + if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Body = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessCondition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SuccessCondition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipVerify", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureSkipVerify = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BodyFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BodyFrom == nil { + m.BodyFrom = &HTTPBodySource{} + } + if err := m.BodyFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Headers = append(m.Headers, Header{}) + if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Auth == nil { + m.Auth = &HTTPAuth{} + } + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPAuth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPAuth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPAuth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCert", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientCert.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OAuth2", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OAuth2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BasicAuth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BasicAuth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPBodySource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPBodySource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPBodySource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) + if m.Bytes == nil { + m.Bytes = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValueFrom == nil { + m.ValueFrom = &HTTPHeaderSource{} + } + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPHeaderSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPHeaderSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPHeaderSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretKeyRef == nil { + m.SecretKeyRef = &v1.SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Buckets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Buckets = append(m.Buckets, Amount{}) + if err := m.Buckets[len(m.Buckets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Inputs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Inputs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Inputs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, Parameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifacts = append(m.Artifacts, Artifact{}) + if err := m.Artifacts[len(m.Artifacts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Item) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Item: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Item: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelKeys) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelKeys: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelKeys: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValueFrom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValueFrom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValueFrom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValues) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValues: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValues: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LifecycleHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LifecycleHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LifecycleHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Template = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TemplateRef == nil { + m.TemplateRef = &TemplateRef{} + } + if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Link) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Link: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManifestFrom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManifestFrom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManifestFrom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifact", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Artifact == nil { + m.Artifact = &Artifact{} + } + if err := m.Artifact.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoizationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoizationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoizationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hit", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Hit = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CacheName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Memoize) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Memoize: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Memoize: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cache == nil { + m.Cache = &Cache{} + } + if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAge", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MaxAge = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricLabel) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricLabel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricLabel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prometheus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prometheus = append(m.Prometheus, &Prometheus{}) + if err := m.Prometheus[len(m.Prometheus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mutex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mutex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mutex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutexHolding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutexHolding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutexHolding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mutex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mutex = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Holder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Holder = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutexStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutexStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutexStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Holding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Holding = append(m.Holding, MutexHolding{}) + if err := m.Holding[len(m.Holding)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Waiting = append(m.Waiting, MutexHolding{}) + if err := m.Waiting[len(m.Waiting)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeFlag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeFlag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeFlag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hooked", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Hooked = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Retried", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Retried = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NodePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Outputs == nil { + m.Outputs = &Outputs{} + } + if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Progress = Progress(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisplayName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TemplateRef == nil { + m.TemplateRef = &TemplateRef{} + } + if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NodePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BoundaryID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BoundaryID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Daemoned", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Daemoned = &b + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Inputs == nil { + m.Inputs = &Inputs{} + } + if err := m.Inputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Outputs == nil { + m.Outputs = &Outputs{} + } + if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Children = append(m.Children, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutboundNodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OutboundNodes = append(m.OutboundNodes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateScope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateScope = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourcesDuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourcesDuration == nil { + m.ResourcesDuration = make(ResourcesDuration) + } + var mapkey k8s_io_api_core_v1.ResourceName + var mapvalue int64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ResourcesDuration[k8s_io_api_core_v1.ResourceName(mapkey)] = ((ResourceDuration)(mapvalue)) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostNodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoizationStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemoizationStatus == nil { + m.MemoizationStatus = &MemoizationStatus{} + } + if err := m.MemoizationStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EstimatedDuration", wireType) + } + m.EstimatedDuration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EstimatedDuration |= EstimatedDuration(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SynchronizationStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SynchronizationStatus == nil { + m.SynchronizationStatus = &NodeSynchronizationStatus{} + } + if err := m.SynchronizationStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Progress = Progress(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeFlag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeFlag == nil { + m.NodeFlag = &NodeFlag{} + } + if err := m.NodeFlag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSynchronizationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSynchronizationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSynchronizationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Waiting = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NoneStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NoneStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NoneStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuth2Auth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuth2Auth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuth2Auth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientIDSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientIDSecret == nil { + m.ClientIDSecret = &v1.SecretKeySelector{} + } + if err := m.ClientIDSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientSecretSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientSecretSecret == nil { + m.ClientSecretSecret = &v1.SecretKeySelector{} + } + if err := m.ClientSecretSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenURLSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TokenURLSecret == nil { + m.TokenURLSecret = &v1.SecretKeySelector{} + } + if err := m.TokenURLSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointParams = append(m.EndpointParams, OAuth2EndpointParam{}) + if err := m.EndpointParams[len(m.EndpointParams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuth2EndpointParam) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuth2EndpointParam: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuth2EndpointParam: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OSSArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OSSArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OSSArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSSBucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OSSBucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OSSArtifactRepository) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OSSArtifactRepository: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OSSArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSSBucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OSSBucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyFormat", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyFormat = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OSSBucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OSSBucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OSSBucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bucket = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AccessKeySecret == nil { + m.AccessKeySecret = &v1.SecretKeySelector{} + } + if err := m.AccessKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretKeySecret == nil { + m.SecretKeySecret = &v1.SecretKeySelector{} + } + if err := m.SecretKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateBucketIfNotPresent", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CreateBucketIfNotPresent = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecurityToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LifecycleRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LifecycleRule == nil { + m.LifecycleRule = &OSSLifecycleRule{} + } + if err := m.LifecycleRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseSDKCreds", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UseSDKCreds = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OSSLifecycleRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OSSLifecycleRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OSSLifecycleRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MarkInfrequentAccessAfterDays", wireType) + } + m.MarkInfrequentAccessAfterDays = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MarkInfrequentAccessAfterDays |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MarkDeletionAfterDays", wireType) + } + m.MarkDeletionAfterDays = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MarkDeletionAfterDays |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Object: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Outputs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Outputs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Outputs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, Parameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifacts = append(m.Artifacts, Artifact{}) + if err := m.Artifacts[len(m.Artifacts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Result = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ExitCode = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParallelSteps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParallelSteps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParallelSteps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Steps = append(m.Steps, WorkflowStep{}) + if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Parameter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Parameter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Parameter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AnyString(dAtA[iNdEx:postIndex]) + m.Default = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AnyString(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValueFrom == nil { + m.ValueFrom = &ValueFrom{} + } + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GlobalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enum = append(m.Enum, AnyString(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AnyString(dAtA[iNdEx:postIndex]) + m.Description = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Plugin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Plugin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Plugin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodGC) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodGC: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodGC: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = PodGCStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v11.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteDelayDuration", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeleteDelayDuration = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Prometheus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Prometheus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Prometheus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, &MetricLabel{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field When", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.When = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gauge == nil { + m.Gauge = &Gauge{} + } + if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = &Histogram{} + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counter == nil { + m.Counter = &Counter{} + } + if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RawArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MergeStrategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MergeStrategy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manifest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manifest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SetOwnerReference", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SetOwnerReference = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessCondition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SuccessCondition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureCondition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureCondition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Flags = append(m.Flags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManifestFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ManifestFrom == nil { + m.ManifestFrom = &ManifestFrom{} + } + if err := m.ManifestFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RetryAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RetryAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RetryAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAntiAffinity == nil { + m.NodeAntiAffinity = &RetryNodeAntiAffinity{} + } + if err := m.NodeAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RetryNodeAntiAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RetryNodeAntiAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RetryNodeAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RetryStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RetryStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RetryStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limit == nil { + m.Limit = &intstr.IntOrString{} + } + if err := m.Limit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RetryPolicy = RetryPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backoff", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Backoff == nil { + m.Backoff = &Backoff{} + } + if err := m.Backoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &RetryAffinity{} + } + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *S3Artifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: S3Artifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: S3Artifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field S3Bucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.S3Bucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *S3ArtifactRepository) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: S3ArtifactRepository: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: S3ArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field S3Bucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.S3Bucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyFormat", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyFormat = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *S3Bucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: S3Bucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: S3Bucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bucket = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Insecure = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AccessKeySecret == nil { + m.AccessKeySecret = &v1.SecretKeySelector{} + } + if err := m.AccessKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretKeySecret == nil { + m.SecretKeySecret = &v1.SecretKeySelector{} + } + if err := m.SecretKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleARN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RoleARN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseSDKCreds", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UseSDKCreds = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateBucketIfNotPresent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreateBucketIfNotPresent == nil { + m.CreateBucketIfNotPresent = &CreateS3BucketOptions{} + } + if err := m.CreateBucketIfNotPresent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EncryptionOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EncryptionOptions == nil { + m.EncryptionOptions = &S3EncryptionOptions{} + } + if err := m.EncryptionOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CASecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CASecret == nil { + m.CASecret = &v1.SecretKeySelector{} + } + if err := m.CASecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionTokenSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SessionTokenSecret == nil { + m.SessionTokenSecret = &v1.SecretKeySelector{} + } + if err := m.SessionTokenSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *S3EncryptionOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: S3EncryptionOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: S3EncryptionOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KmsKeyId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KmsKeyId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KmsEncryptionContext", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KmsEncryptionContext = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableEncryption", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EnableEncryption = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerSideCustomerKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServerSideCustomerKeySecret == nil { + m.ServerSideCustomerKeySecret = &v1.SecretKeySelector{} + } + if err := m.ServerSideCustomerKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScriptTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScriptTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScriptTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SemaphoreHolding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SemaphoreHolding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SemaphoreHolding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Semaphore", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Semaphore = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Holders", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Holders = append(m.Holders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SemaphoreRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SemaphoreRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SemaphoreRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &v1.ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SemaphoreStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SemaphoreStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SemaphoreStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Holding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Holding = append(m.Holding, SemaphoreHolding{}) + if err := m.Holding[len(m.Holding)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Waiting = append(m.Waiting, SemaphoreHolding{}) + if err := m.Waiting[len(m.Waiting)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sequence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sequence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sequence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Count == nil { + m.Count = &intstr.IntOrString{} + } + if err := m.Count.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Start == nil { + m.Start = &intstr.IntOrString{} + } + if err := m.Start.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.End == nil { + m.End = &intstr.IntOrString{} + } + if err := m.End.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Submit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Submit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Submit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowTemplateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.WorkflowTemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Arguments == nil { + m.Arguments = &Arguments{} + } + if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubmitOpts) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubmitOpts: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubmitOpts: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entrypoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entrypoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerDryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ServerDryRun = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OwnerReference == nil { + m.OwnerReference = &v11.OwnerReference{} + } + if err := m.OwnerReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotations = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodPriorityClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodPriorityClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SuppliedValueFrom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SuppliedValueFrom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SuppliedValueFrom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SuspendTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SuspendTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SuspendTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Duration = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Synchronization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Synchronization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Synchronization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Semaphore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Semaphore == nil { + m.Semaphore = &SemaphoreRef{} + } + if err := m.Semaphore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mutex", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Mutex == nil { + m.Mutex = &Mutex{} + } + if err := m.Mutex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Semaphores", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Semaphores = append(m.Semaphores, &SemaphoreRef{}) + if err := m.Semaphores[len(m.Semaphores)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mutexes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mutexes = append(m.Mutexes, &Mutex{}) + if err := m.Mutexes[len(m.Mutexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SynchronizationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SynchronizationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SynchronizationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Semaphore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Semaphore == nil { + m.Semaphore = &SemaphoreStatus{} + } + if err := m.Semaphore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mutex", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Mutex == nil { + m.Mutex = &MutexStatus{} + } + if err := m.Mutex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TTLStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TTLStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TTLStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsAfterCompletion", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SecondsAfterCompletion = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsAfterSuccess", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SecondsAfterSuccess = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsAfterFailure", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SecondsAfterFailure = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TarStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TarStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TarStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompressionLevel", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CompressionLevel = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Template) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Template: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Template: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Inputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &v1.Affinity{} + } + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Daemon", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Daemon = &b + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Steps = append(m.Steps, ParallelSteps{}) + if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &v1.Container{} + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Script == nil { + m.Script = &ScriptTemplate{} + } + if err := m.Script.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceTemplate{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DAG", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DAG == nil { + m.DAG = &DAGTemplate{} + } + if err := m.DAG.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Suspend == nil { + m.Suspend = &SuspendTemplate{} + } + if err := m.Suspend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, v1.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainers = append(m.InitContainers, UserContainer{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sidecars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sidecars = append(m.Sidecars, UserContainer{}) + if err := m.Sidecars[len(m.Sidecars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArchiveLocation == nil { + m.ArchiveLocation = &ArtifactLocation{} + } + if err := m.ArchiveLocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActiveDeadlineSeconds == nil { + m.ActiveDeadlineSeconds = &intstr.IntOrString{} + } + if err := m.ActiveDeadlineSeconds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RetryStrategy == nil { + m.RetryStrategy = &RetryStrategy{} + } + if err := m.RetryStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v1.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostAliases = append(m.HostAliases, v1.HostAlias{}) + if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &v1.PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSpecPatch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSpecPatch = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 32: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Executor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Executor == nil { + m.Executor = &ExecutorConfig{} + } + if err := m.Executor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = &Metrics{} + } + if err := m.Metrics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Synchronization", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Synchronization == nil { + m.Synchronization = &Synchronization{} + } + if err := m.Synchronization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 37: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memoize", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memoize == nil { + m.Memoize = &Memoize{} + } + if err := m.Memoize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 38: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 39: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = &Data{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 40: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerSet == nil { + m.ContainerSet = &ContainerSetTemplate{} + } + if err := m.ContainerSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 41: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailFast", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.FailFast = &b + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HTTP{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 43: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plugin", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plugin == nil { + m.Plugin = &Plugin{} + } + if err := m.Plugin.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Template = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransformationStep) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransformationStep: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransformationStep: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserContainer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MirrorVolumeMounts", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.MirrorVolumeMounts = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValueFrom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValueFrom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValueFrom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JQFilter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JQFilter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AnyString(dAtA[iNdEx:postIndex]) + m.Default = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Supplied", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Supplied == nil { + m.Supplied = &SuppliedValueFrom{} + } + if err := m.Supplied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Event = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &v1.ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Version) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Version: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildDate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildDate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitCommit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GitCommit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitTag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GitTag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitTreeState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GitTreeState = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GoVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GoVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compiler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Compiler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Platform = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeClaimGC) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeClaimGC: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeClaimGC: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = VolumeClaimGCStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowArtifactGCTask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowArtifactGCTask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowArtifactGCTask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowArtifactGCTaskList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowArtifactGCTaskList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowArtifactGCTaskList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, WorkflowArtifactGCTask{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowEventBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowEventBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowEventBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowEventBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowEventBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowEventBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, WorkflowEventBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowEventBindingSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowEventBindingSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowEventBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Submit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Submit == nil { + m.Submit = &Submit{} + } + if err := m.Submit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowLevelArtifactGC) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowLevelArtifactGC: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowLevelArtifactGC: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ArtifactGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceFinalizerRemoval", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForceFinalizerRemoval = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSpecPatch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSpecPatch = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Workflow{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelsFrom == nil { + m.LabelsFrom = make(map[string]LabelValueFrom) + } + var mapkey string + mapvalue := &LabelValueFrom{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &LabelValueFrom{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.LabelsFrom[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Templates = append(m.Templates, Template{}) + if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entrypoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entrypoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, v1.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRepositoryRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactRepositoryRef == nil { + m.ArtifactRepositoryRef = &ArtifactRepositoryRef{} + } + if err := m.ArtifactRepositoryRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &v1.Affinity{} + } + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v1.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, v1.LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.HostNetwork = &b + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.DNSPolicy(dAtA[iNdEx:postIndex]) + m.DNSPolicy = &s + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &v1.PodDNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnExit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OnExit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodGC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodGC == nil { + m.PodGC = &PodGC{} + } + if err := m.PodGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodPriorityClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodPriorityClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodPriority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PodPriority = &v + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostAliases = append(m.HostAliases, v1.HostAlias{}) + if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &v1.PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSpecPatch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSpecPatch = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Executor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Executor == nil { + m.Executor = &ExecutorConfig{} + } + if err := m.Executor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TTLStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TTLStrategy == nil { + m.TTLStrategy = &TTLStrategy{} + } + if err := m.TTLStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionBudget", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodDisruptionBudget == nil { + m.PodDisruptionBudget = &v12.PodDisruptionBudgetSpec{} + } + if err := m.PodDisruptionBudget.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = &Metrics{} + } + if err := m.Metrics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shutdown", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shutdown = ShutdownStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowTemplateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkflowTemplateRef == nil { + m.WorkflowTemplateRef = &WorkflowTemplateRef{} + } + if err := m.WorkflowTemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Synchronization", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Synchronization == nil { + m.Synchronization = &Synchronization{} + } + if err := m.Synchronization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimGC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeClaimGC == nil { + m.VolumeClaimGC = &VolumeClaimGC{} + } + if err := m.VolumeClaimGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 37: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RetryStrategy == nil { + m.RetryStrategy = &RetryStrategy{} + } + if err := m.RetryStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 38: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodMetadata == nil { + m.PodMetadata = &Metadata{} + } + if err := m.PodMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 39: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateDefaults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TemplateDefaults == nil { + m.TemplateDefaults = &Template{} + } + if err := m.TemplateDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLogs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ArchiveLogs = &b + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hooks == nil { + m.Hooks = make(LifecycleHooks) + } + var mapkey LifecycleEvent + mapvalue := &LifecycleHook{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = LifecycleEvent(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &LifecycleHook{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Hooks[LifecycleEvent(mapkey)] = *mapvalue + iNdEx = postIndex + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkflowMetadata == nil { + m.WorkflowMetadata = &WorkflowMetadata{} + } + if err := m.WorkflowMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 43: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactGC == nil { + m.ArtifactGC = &WorkflowLevelArtifactGC{} + } + if err := m.ArtifactGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = WorkflowPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompressedNodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CompressedNodes = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Nodes == nil { + m.Nodes = make(Nodes) + } + var mapkey string + mapvalue := &NodeStatus{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &NodeStatus{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Nodes[mapkey] = *mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PersistentVolumeClaims = append(m.PersistentVolumeClaims, v1.Volume{}) + if err := m.PersistentVolumeClaims[len(m.PersistentVolumeClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Outputs == nil { + m.Outputs = &Outputs{} + } + if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoredTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StoredTemplates == nil { + m.StoredTemplates = make(map[string]Template) + } + var mapkey string + mapvalue := &Template{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Template{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.StoredTemplates[mapkey] = *mapvalue + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OffloadNodeStatusVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OffloadNodeStatusVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourcesDuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourcesDuration == nil { + m.ResourcesDuration = make(ResourcesDuration) + } + var mapkey k8s_io_api_core_v1.ResourceName + var mapvalue int64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ResourcesDuration[k8s_io_api_core_v1.ResourceName(mapkey)] = ((ResourceDuration)(mapvalue)) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoredWorkflowSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StoredWorkflowSpec == nil { + m.StoredWorkflowSpec = &WorkflowSpec{} + } + if err := m.StoredWorkflowSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Synchronization", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Synchronization == nil { + m.Synchronization = &SynchronizationStatus{} + } + if err := m.Synchronization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EstimatedDuration", wireType) + } + m.EstimatedDuration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EstimatedDuration |= EstimatedDuration(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Progress = Progress(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRepositoryRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactRepositoryRef == nil { + m.ArtifactRepositoryRef = &ArtifactRepositoryRefStatus{} + } + if err := m.ArtifactRepositoryRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGCStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactGCStatus == nil { + m.ArtifactGCStatus = &ArtGCStatus{} + } + if err := m.ArtifactGCStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskResultsCompletionStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TaskResultsCompletionStatus == nil { + m.TaskResultsCompletionStatus = make(map[string]bool) + } + var mapkey string + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.TaskResultsCompletionStatus[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowStep) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowStep: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowStep: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Template = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TemplateRef == nil { + m.TemplateRef = &TemplateRef{} + } + if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WithItems = append(m.WithItems, Item{}) + if err := m.WithItems[len(m.WithItems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithParam", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WithParam = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithSequence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WithSequence == nil { + m.WithSequence = &Sequence{} + } + if err := m.WithSequence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field When", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.When = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContinueOn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContinueOn == nil { + m.ContinueOn = &ContinueOn{} + } + if err := m.ContinueOn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnExit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OnExit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hooks == nil { + m.Hooks = make(LifecycleHooks) + } + var mapkey LifecycleEvent + mapvalue := &LifecycleHook{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = LifecycleEvent(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &LifecycleHook{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Hooks[LifecycleEvent(mapkey)] = *mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inline", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Inline == nil { + m.Inline = &Template{} + } + if err := m.Inline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTaskResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTaskResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTaskResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeResult", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.NodeResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTaskResultList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTaskResultList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTaskResultList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, WorkflowTaskResult{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTaskSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTaskSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTaskSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTaskSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTaskSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTaskSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, WorkflowTaskSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTaskSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTaskSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTaskSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tasks == nil { + m.Tasks = make(map[string]Template) + } + var mapkey string + mapvalue := &Template{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Template{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tasks[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTaskSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTaskSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTaskSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Nodes == nil { + m.Nodes = make(map[string]NodeResult) + } + var mapkey string + mapvalue := &NodeResult{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &NodeResult{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Nodes[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, WorkflowTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplateRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplateRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplateRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ZipStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ZipStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ZipStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto new file mode 100644 index 00000000..5ae06f70 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto @@ -0,0 +1,2292 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/policy/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Amount represent a numeric amount. +// +kubebuilder:validation:Type=number +message Amount { + optional string value = 1; +} + +// ArchiveStrategy describes how to archive files/directory when saving artifacts +message ArchiveStrategy { + optional TarStrategy tar = 1; + + optional NoneStrategy none = 2; + + optional ZipStrategy zip = 3; +} + +// Arguments to a template +message Arguments { + // Parameters is the list of parameters to pass to the template or workflow + // +patchStrategy=merge + // +patchMergeKey=name + repeated Parameter parameters = 1; + + // Artifacts is the list of artifacts to pass to the template or workflow + // +patchStrategy=merge + // +patchMergeKey=name + repeated Artifact artifacts = 2; +} + +// ArtGCStatus maintains state related to ArtifactGC +message ArtGCStatus { + // have Pods been started to perform this strategy? (enables us not to re-process what we've already done) + map strategiesProcessed = 1; + + // have completed Pods been processed? (mapped by Pod name) + // used to prevent re-processing the Status of a Pod more than once + map podsRecouped = 2; + + // if this is true, we already checked to see if we need to do it and we don't + optional bool notSpecified = 3; +} + +// Artifact indicates an artifact to place at a specified path +message Artifact { + // name of the artifact. must be unique within a template's inputs/outputs. + optional string name = 1; + + // Path is the container path to the artifact + optional string path = 2; + + // mode bits to use on this file, must be a value between 0 and 0777 + // set when loading input artifacts. + optional int32 mode = 3; + + // From allows an artifact to reference an artifact from a previous step + optional string from = 4; + + // ArtifactLocation contains the location of the artifact + optional ArtifactLocation artifactLocation = 5; + + // GlobalName exports an output artifact to the global scope, making it available as + // '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts + optional string globalName = 6; + + // Archive controls how the artifact will be saved to the artifact repository. + optional ArchiveStrategy archive = 7; + + // Make Artifacts optional, if Artifacts doesn't generate or exist + optional bool optional = 8; + + // SubPath allows an artifact to be sourced from a subpath within the specified source + optional string subPath = 9; + + // If mode is set, apply the permission recursively into the artifact if it is a folder + optional bool recurseMode = 10; + + // FromExpression, if defined, is evaluated to specify the value for the artifact + optional string fromExpression = 11; + + // ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows + optional ArtifactGC artifactGC = 12; + + // Has this been deleted? + optional bool deleted = 13; +} + +// ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed +message ArtifactGC { + // Strategy is the strategy to use. + // +kubebuilder:validation:Enum="";OnWorkflowCompletion;OnWorkflowDeletion;Never + optional string strategy = 1; + + // PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion + optional Metadata podMetadata = 2; + + // ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion + optional string serviceAccountName = 3; +} + +// ArtifactGCSpec specifies the Artifacts that need to be deleted +message ArtifactGCSpec { + // ArtifactsByNode maps Node name to information pertaining to Artifacts on that Node + map artifactsByNode = 1; +} + +// ArtifactGCStatus describes the result of the deletion +message ArtifactGCStatus { + // ArtifactResultsByNode maps Node name to result + map artifactResultsByNode = 1; +} + +// ArtifactLocation describes a location for a single or multiple artifacts. +// It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). +// It is also used to describe the location of multiple artifacts such as the archive location +// of a single workflow step, which the executor will use as a default location to store its files. +message ArtifactLocation { + // ArchiveLogs indicates if the container logs should be archived + optional bool archiveLogs = 1; + + // S3 contains S3 artifact location details + optional S3Artifact s3 = 2; + + // Git contains git artifact location details + optional GitArtifact git = 3; + + // HTTP contains HTTP artifact location details + optional HTTPArtifact http = 4; + + // Artifactory contains artifactory artifact location details + optional ArtifactoryArtifact artifactory = 5; + + // HDFS contains HDFS artifact location details + optional HDFSArtifact hdfs = 6; + + // Raw contains raw artifact location details + optional RawArtifact raw = 7; + + // OSS contains OSS artifact location details + optional OSSArtifact oss = 8; + + // GCS contains GCS artifact location details + optional GCSArtifact gcs = 9; + + // Azure contains Azure Storage artifact location details + optional AzureArtifact azure = 10; +} + +// ArtifactNodeSpec specifies the Artifacts that need to be deleted for a given Node +message ArtifactNodeSpec { + // ArchiveLocation is the template-level Artifact location specification + optional ArtifactLocation archiveLocation = 1; + + // Artifacts maps artifact name to Artifact description + map artifacts = 2; +} + +// ArtifactPaths expands a step from a collection of artifacts +message ArtifactPaths { + // Artifact is the artifact location from which to source the artifacts, it can be a directory + optional Artifact artifact = 1; +} + +// ArtifactRepository represents an artifact repository in which a controller will store its artifacts +message ArtifactRepository { + // ArchiveLogs enables log archiving + optional bool archiveLogs = 1; + + // S3 stores artifact in a S3-compliant object store + optional S3ArtifactRepository s3 = 2; + + // Artifactory stores artifacts to JFrog Artifactory + optional ArtifactoryArtifactRepository artifactory = 3; + + // HDFS stores artifacts in HDFS + optional HDFSArtifactRepository hdfs = 4; + + // OSS stores artifact in a OSS-compliant object store + optional OSSArtifactRepository oss = 5; + + // GCS stores artifact in a GCS object store + optional GCSArtifactRepository gcs = 6; + + // Azure stores artifact in an Azure Storage account + optional AzureArtifactRepository azure = 7; +} + +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ArtifactRepositoryRef { + // The name of the config map. Defaults to "artifact-repositories". + optional string configMap = 1; + + // The config map key. Defaults to the value of the "workflows.argoproj.io/default-artifact-repository" annotation. + optional string key = 2; +} + +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ArtifactRepositoryRefStatus { + optional ArtifactRepositoryRef artifactRepositoryRef = 1; + + // The namespace of the config map. Defaults to the workflow's namespace, or the controller's namespace (if found). + optional string namespace = 2; + + // If this ref represents the default artifact repository, rather than a config map. + optional bool default = 3; + + // The repository the workflow will use. This maybe empty before v3.1. + optional ArtifactRepository artifactRepository = 4; +} + +// ArtifactResult describes the result of attempting to delete a given Artifact +message ArtifactResult { + // Name is the name of the Artifact + optional string name = 1; + + // Success describes whether the deletion succeeded + optional bool success = 2; + + // Error is an optional error message which should be set if Success==false + optional string error = 3; +} + +// ArtifactResultNodeStatus describes the result of the deletion on a given node +message ArtifactResultNodeStatus { + // ArtifactResults maps Artifact name to result of the deletion + map artifactResults = 1; +} + +message ArtifactSearchQuery { + map artifactGCStrategies = 1; + + optional string artifactName = 2; + + optional string templateName = 3; + + optional string nodeId = 4; + + optional bool deleted = 5; + + map nodeTypes = 6; +} + +message ArtifactSearchResult { + optional Artifact artifact = 1; + + optional string nodeID = 2; +} + +// ArtifactoryArtifact is the location of an artifactory artifact +message ArtifactoryArtifact { + // URL of the artifact + optional string url = 1; + + optional ArtifactoryAuth artifactoryAuth = 2; +} + +// ArtifactoryArtifactRepository defines the controller configuration for an artifactory artifact repository +message ArtifactoryArtifactRepository { + optional ArtifactoryAuth artifactoryAuth = 1; + + // RepoURL is the url for artifactory repo. + optional string repoURL = 2; + + // KeyFormat defines the format of how to store keys and can reference workflow variables. + optional string keyFormat = 3; +} + +// ArtifactoryAuth describes the secret selectors required for authenticating to artifactory +message ArtifactoryAuth { + // UsernameSecret is the secret selector to the repository username + optional k8s.io.api.core.v1.SecretKeySelector usernameSecret = 1; + + // PasswordSecret is the secret selector to the repository password + optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 2; +} + +// AzureArtifact is the location of a an Azure Storage artifact +message AzureArtifact { + optional AzureBlobContainer azureBlobContainer = 1; + + // Blob is the blob name (i.e., path) in the container where the artifact resides + optional string blob = 2; +} + +// AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository +message AzureArtifactRepository { + optional AzureBlobContainer blobContainer = 1; + + // BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables + optional string blobNameFormat = 2; +} + +// AzureBlobContainer contains the access information for interfacing with an Azure Blob Storage container +message AzureBlobContainer { + // Endpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net" + optional string endpoint = 1; + + // Container is the container where resources will be stored + optional string container = 2; + + // AccountKeySecret is the secret selector to the Azure Blob Storage account access key + optional k8s.io.api.core.v1.SecretKeySelector accountKeySecret = 3; + + // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. + optional bool useSDKCreds = 4; +} + +// Backoff is a backoff strategy to use within retryStrategy +message Backoff { + // Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h") + optional string duration = 1; + + // Factor is a factor to multiply the base duration after each failed retry + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString factor = 2; + + // MaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy. + // It is important to note that if the workflow template includes activeDeadlineSeconds, the pod's deadline is initially set with activeDeadlineSeconds. + // However, when the workflow fails, the pod's deadline is then overridden by maxDuration. + // This ensures that the workflow does not exceed the specified maximum duration when retries are involved. + optional string maxDuration = 3; +} + +// BasicAuth describes the secret selectors required for basic authentication +message BasicAuth { + // UsernameSecret is the secret selector to the repository username + optional k8s.io.api.core.v1.SecretKeySelector usernameSecret = 1; + + // PasswordSecret is the secret selector to the repository password + optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 2; +} + +// Cache is the configuration for the type of cache to be used +message Cache { + // ConfigMap sets a ConfigMap-based cache + optional k8s.io.api.core.v1.ConfigMapKeySelector configMap = 1; +} + +// ClientCertAuth holds necessary information for client authentication via certificates +message ClientCertAuth { + optional k8s.io.api.core.v1.SecretKeySelector clientCertSecret = 1; + + optional k8s.io.api.core.v1.SecretKeySelector clientKeySecret = 2; +} + +// ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope +// +genclient +// +genclient:noStatus +// +genclient:nonNamespaced +// +kubebuilder:resource:scope=Cluster,shortName=clusterwftmpl;cwft +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message ClusterWorkflowTemplate { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional WorkflowSpec spec = 2; +} + +// ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message ClusterWorkflowTemplateList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated ClusterWorkflowTemplate items = 2; +} + +// Column is a custom column that will be exposed in the Workflow List View. +// +patchStrategy=merge +// +patchMergeKey=name +message Column { + // The name of this column, e.g., "Workflow Completed". + optional string name = 1; + + // The type of this column, "label" or "annotation". + optional string type = 2; + + // The key of the label or annotation, e.g., "workflows.argoproj.io/completed". + optional string key = 3; +} + +message Condition { + // Type is the type of condition + optional string type = 1; + + // Status is the status of the condition + optional string status = 2; + + // Message is the condition message + optional string message = 3; +} + +message ContainerNode { + optional k8s.io.api.core.v1.Container container = 1; + + repeated string dependencies = 2; +} + +// ContainerSetRetryStrategy provides controls on how to retry a container set +message ContainerSetRetryStrategy { + // Duration is the time between each retry, examples values are "300ms", "1s" or "5m". + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + optional string duration = 1; + + // Retries is the maximum number of retry attempts for each container. It does not include the + // first, original attempt; the maximum number of total attempts will be `retries + 1`. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString retries = 2; +} + +message ContainerSetTemplate { + repeated ContainerNode containers = 4; + + repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 3; + + // RetryStrategy describes how to retry container nodes if the container set fails. + // Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers. + optional ContainerSetRetryStrategy retryStrategy = 5; +} + +// ContinueOn defines if a workflow should continue even if a task or step fails/errors. +// It can be specified if the workflow should continue when the pod errors, fails or both. +message ContinueOn { + // +optional + optional bool error = 1; + + // +optional + optional bool failed = 2; +} + +// Counter is a Counter prometheus metric +message Counter { + // Value is the value of the metric + optional string value = 1; +} + +// CreateS3BucketOptions options used to determine automatic automatic bucket-creation process +message CreateS3BucketOptions { + // ObjectLocking Enable object locking + optional bool objectLocking = 3; +} + +// CronWorkflow is the definition of a scheduled workflow resource +// +genclient +// +genclient:noStatus +// +kubebuilder:resource:shortName=cwf;cronwf +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message CronWorkflow { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional CronWorkflowSpec spec = 2; + + optional CronWorkflowStatus status = 3; +} + +// CronWorkflowList is list of CronWorkflow resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message CronWorkflowList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated CronWorkflow items = 2; +} + +// CronWorkflowSpec is the specification of a CronWorkflow +message CronWorkflowSpec { + // WorkflowSpec is the spec of the workflow to be run + optional WorkflowSpec workflowSpec = 1; + + // Schedule is a schedule to run the Workflow in Cron format. Deprecated, use Schedules + optional string schedule = 2; + + // ConcurrencyPolicy is the K8s-style concurrency policy that will be used + optional string concurrencyPolicy = 3; + + // Suspend is a flag that will stop new CronWorkflows from running if set to true + optional bool suspend = 4; + + // StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its + // original scheduled time if it is missed. + optional int64 startingDeadlineSeconds = 5; + + // SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time + optional int32 successfulJobsHistoryLimit = 6; + + // FailedJobsHistoryLimit is the number of failed jobs to be kept at a time + optional int32 failedJobsHistoryLimit = 7; + + // Timezone is the timezone against which the cron schedule will be calculated, e.g. "Asia/Tokyo". Default is machine's local time. + optional string timezone = 8; + + // WorkflowMetadata contains some metadata of the workflow to be run + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta workflowMeta = 9; + + // v3.6 and after: StopStrategy defines if the CronWorkflow should stop scheduling based on a condition + optional StopStrategy stopStrategy = 10; + + // v3.6 and after: Schedules is a list of schedules to run the Workflow in Cron format + repeated string schedules = 11; + + // v3.6 and after: When is an expression that determines if a run should be scheduled. + optional string when = 12; +} + +// CronWorkflowStatus is the status of a CronWorkflow +message CronWorkflowStatus { + // Active is a list of active workflows stemming from this CronWorkflow + repeated k8s.io.api.core.v1.ObjectReference active = 1; + + // LastScheduleTime is the last time the CronWorkflow was scheduled + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduledTime = 2; + + // Conditions is a list of conditions the CronWorkflow may have + repeated Condition conditions = 3; + + // v3.6 and after: Succeeded counts how many times child workflows succeeded + optional int64 succeeded = 4; + + // v3.6 and after: Failed counts how many times child workflows failed + optional int64 failed = 5; + + // v3.6 and after: Phase is an enum of Active or Stopped. It changes to Stopped when stopStrategy.expression is true + optional string phase = 6; +} + +// DAGTask represents a node in the graph during DAG execution +message DAGTask { + // Name is the name of the target + optional string name = 1; + + // Name of template to execute + optional string template = 2; + + // Inline is the template. Template must be empty if this is declared (and vice-versa). + optional Template inline = 14; + + // Arguments are the parameter and artifact arguments to the template + optional Arguments arguments = 3; + + // TemplateRef is the reference to the template resource to execute. + optional TemplateRef templateRef = 4; + + // Dependencies are name of other targets which this depends on + repeated string dependencies = 5; + + // WithItems expands a task into multiple parallel tasks from the items in the list + repeated Item withItems = 6; + + // WithParam expands a task into multiple parallel tasks from the value in the parameter, + // which is expected to be a JSON list. + optional string withParam = 7; + + // WithSequence expands a task into a numeric sequence + optional Sequence withSequence = 8; + + // When is an expression in which the task should conditionally execute + optional string when = 9; + + // ContinueOn makes argo to proceed with the following step even if this step fails. + // Errors and Failed states can be specified + optional ContinueOn continueOn = 10; + + // OnExit is a template reference which is invoked at the end of the + // template, irrespective of the success, failure, or error of the + // primary template. + // DEPRECATED: Use Hooks[exit].Template instead. + optional string onExit = 11; + + // Depends are name of other targets which this depends on + optional string depends = 12; + + // Hooks hold the lifecycle hook which is invoked at lifecycle of + // task, irrespective of the success, failure, or error status of the primary task + map hooks = 13; +} + +// DAGTemplate is a template subtype for directed acyclic graph templates +message DAGTemplate { + // Target are one or more names of targets to execute in a DAG + optional string target = 1; + + // Tasks are a list of DAG tasks + // +patchStrategy=merge + // +patchMergeKey=name + repeated DAGTask tasks = 2; + + // This flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps, + // as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed + // before failing the DAG itself. + // The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to + // completion (either success or failure), regardless of the failed outcomes of branches in the DAG. + // More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442 + optional bool failFast = 3; +} + +// Data is a data template +message Data { + // Source sources external data into a data template + optional DataSource source = 1; + + // Transformation applies a set of transformations + repeated TransformationStep transformation = 2; +} + +// DataSource sources external data into a data template +message DataSource { + // ArtifactPaths is a data transformation that collects a list of artifact paths + optional ArtifactPaths artifactPaths = 1; +} + +message Event { + // Selector (https://github.com/expr-lang/expr) that we must must match the event. E.g. `payload.message == "test"` + optional string selector = 1; +} + +// ExecutorConfig holds configurations of an executor container. +message ExecutorConfig { + // ServiceAccountName specifies the service account name of the executor container. + optional string serviceAccountName = 1; +} + +// GCSArtifact is the location of a GCS artifact +message GCSArtifact { + optional GCSBucket gCSBucket = 1; + + // Key is the path in the bucket where the artifact resides + optional string key = 2; +} + +// GCSArtifactRepository defines the controller configuration for a GCS artifact repository +message GCSArtifactRepository { + optional GCSBucket gCSBucket = 1; + + // KeyFormat defines the format of how to store keys and can reference workflow variables. + optional string keyFormat = 2; +} + +// GCSBucket contains the access information for interfacring with a GCS bucket +message GCSBucket { + // Bucket is the name of the bucket + optional string bucket = 1; + + // ServiceAccountKeySecret is the secret selector to the bucket's service account key + optional k8s.io.api.core.v1.SecretKeySelector serviceAccountKeySecret = 2; +} + +// Gauge is a Gauge prometheus metric +message Gauge { + // Value is the value to be used in the operation with the metric's current value. If no operation is set, + // value is the value of the metric + optional string value = 1; + + // Realtime emits this metric in real time if applicable + optional bool realtime = 2; + + // Operation defines the operation to apply with value and the metrics' current value + // +optional + optional string operation = 3; +} + +// GitArtifact is the location of an git artifact +message GitArtifact { + // Repo is the git repository + optional string repo = 1; + + // Revision is the git commit, tag, branch to checkout + optional string revision = 2; + + // Depth specifies clones/fetches should be shallow and include the given + // number of commits from the branch tip + optional uint64 depth = 3; + + // Fetch specifies a number of refs that should be fetched before checkout + repeated string fetch = 4; + + // UsernameSecret is the secret selector to the repository username + optional k8s.io.api.core.v1.SecretKeySelector usernameSecret = 5; + + // PasswordSecret is the secret selector to the repository password + optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 6; + + // SSHPrivateKeySecret is the secret selector to the repository ssh private key + optional k8s.io.api.core.v1.SecretKeySelector sshPrivateKeySecret = 7; + + // InsecureIgnoreHostKey disables SSH strict host key checking during git clone + optional bool insecureIgnoreHostKey = 8; + + // DisableSubmodules disables submodules during git clone + optional bool disableSubmodules = 9; + + // SingleBranch enables single branch clone, using the `branch` parameter + optional bool singleBranch = 10; + + // Branch is the branch to fetch when `SingleBranch` is enabled + optional string branch = 11; + + // InsecureSkipTLS disables server certificate verification resulting in insecure HTTPS connections + optional bool insecureSkipTLS = 12; +} + +// HDFSArtifact is the location of an HDFS artifact +message HDFSArtifact { + optional HDFSConfig hDFSConfig = 1; + + // Path is a file path in HDFS + optional string path = 2; + + // Force copies a file forcibly even if it exists + optional bool force = 3; +} + +// HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository +message HDFSArtifactRepository { + optional HDFSConfig hDFSConfig = 1; + + // PathFormat is defines the format of path to store a file. Can reference workflow variables + optional string pathFormat = 2; + + // Force copies a file forcibly even if it exists + optional bool force = 3; +} + +// HDFSConfig is configurations for HDFS +message HDFSConfig { + optional HDFSKrbConfig hDFSKrbConfig = 1; + + // Addresses is accessible addresses of HDFS name nodes + repeated string addresses = 2; + + // HDFSUser is the user to access HDFS file system. + // It is ignored if either ccache or keytab is used. + optional string hdfsUser = 3; + + // DataTransferProtection is the protection level for HDFS data transfer. + // It corresponds to the dfs.data.transfer.protection configuration in HDFS. + optional string dataTransferProtection = 4; +} + +// HDFSKrbConfig is auth configurations for Kerberos +message HDFSKrbConfig { + // KrbCCacheSecret is the secret selector for Kerberos ccache + // Either ccache or keytab can be set to use Kerberos. + optional k8s.io.api.core.v1.SecretKeySelector krbCCacheSecret = 1; + + // KrbKeytabSecret is the secret selector for Kerberos keytab + // Either ccache or keytab can be set to use Kerberos. + optional k8s.io.api.core.v1.SecretKeySelector krbKeytabSecret = 2; + + // KrbUsername is the Kerberos username used with Kerberos keytab + // It must be set if keytab is used. + optional string krbUsername = 3; + + // KrbRealm is the Kerberos realm used with Kerberos keytab + // It must be set if keytab is used. + optional string krbRealm = 4; + + // KrbConfig is the configmap selector for Kerberos config as string + // It must be set if either ccache or keytab is used. + optional k8s.io.api.core.v1.ConfigMapKeySelector krbConfigConfigMap = 5; + + // KrbServicePrincipalName is the principal name of Kerberos service + // It must be set if either ccache or keytab is used. + optional string krbServicePrincipalName = 6; +} + +message HTTP { + // Method is HTTP methods for HTTP Request + optional string method = 1; + + // URL of the HTTP Request + optional string url = 2; + + // Headers are an optional list of headers to send with HTTP requests + repeated HTTPHeader headers = 3; + + // TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds + optional int64 timeoutSeconds = 4; + + // SuccessCondition is an expression if evaluated to true is considered successful + optional string successCondition = 6; + + // Body is content of the HTTP Request + optional string body = 5; + + // BodyFrom is content of the HTTP Request as Bytes + optional HTTPBodySource bodyFrom = 8; + + // InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client + optional bool insecureSkipVerify = 7; +} + +// HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container +message HTTPArtifact { + // URL of the artifact + optional string url = 1; + + // Headers are an optional list of headers to send with HTTP requests for artifacts + repeated Header headers = 2; + + // Auth contains information for client authentication + optional HTTPAuth auth = 3; +} + +message HTTPAuth { + optional ClientCertAuth clientCert = 1; + + optional OAuth2Auth oauth2 = 2; + + optional BasicAuth basicAuth = 3; +} + +// HTTPBodySource contains the source of the HTTP body. +message HTTPBodySource { + optional bytes bytes = 1; +} + +message HTTPHeader { + optional string name = 1; + + optional string value = 2; + + optional HTTPHeaderSource valueFrom = 3; +} + +message HTTPHeaderSource { + optional k8s.io.api.core.v1.SecretKeySelector secretKeyRef = 1; +} + +// Header indicate a key-value request header to be used when fetching artifacts over HTTP +message Header { + // Name is the header name + optional string name = 1; + + // Value is the literal value to use for the header + optional string value = 2; +} + +// Histogram is a Histogram prometheus metric +message Histogram { + // Value is the value of the metric + optional string value = 3; + + // Buckets is a list of bucket divisors for the histogram + repeated Amount buckets = 4; +} + +// Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another +message Inputs { + // Parameters are a list of parameters passed as inputs + // +patchStrategy=merge + // +patchMergeKey=name + repeated Parameter parameters = 1; + + // Artifact are a list of artifacts passed as inputs + // +patchStrategy=merge + // +patchMergeKey=name + repeated Artifact artifacts = 2; +} + +// Item expands a single workflow step into multiple parallel steps +// The value of Item can be a map, string, bool, or number +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +kubebuilder:validation:Type=object +message Item { + optional bytes value = 1; +} + +// LabelKeys is list of keys +message LabelKeys { + repeated string items = 1; +} + +message LabelValueFrom { + optional string expression = 1; +} + +// Labels is list of workflow labels +message LabelValues { + repeated string items = 1; +} + +message LifecycleHook { + // Template is the name of the template to execute by the hook + optional string template = 1; + + // Arguments hold arguments to the template + optional Arguments arguments = 2; + + // TemplateRef is the reference to the template resource to execute by the hook + optional TemplateRef templateRef = 3; + + // Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not + // be retried and the retry strategy will be ignored + optional string expression = 4; +} + +// A link to another app. +// +patchStrategy=merge +// +patchMergeKey=name +message Link { + // The name of the link, E.g. "Workflow Logs" or "Pod Logs" + optional string name = 1; + + // "workflow", "pod", "pod-logs", "event-source-logs", "sensor-logs", "workflow-list" or "chat" + optional string scope = 2; + + // The URL. Can contain "${metadata.namespace}", "${metadata.name}", "${status.startedAt}", "${status.finishedAt}" or any other element in workflow yaml, e.g. "${workflow.metadata.annotations.userDefinedKey}" + optional string url = 3; +} + +message ManifestFrom { + // Artifact contains the artifact to use + optional Artifact artifact = 1; +} + +// MemoizationStatus is the status of this memoized node +message MemoizationStatus { + // Hit indicates whether this node was created from a cache entry + optional bool hit = 1; + + // Key is the name of the key used for this node's cache + optional string key = 2; + + // Cache is the name of the cache that was used + optional string cacheName = 3; +} + +// Memoization enables caching for the Outputs of the template +message Memoize { + // Key is the key to use as the caching key + optional string key = 1; + + // Cache sets and configures the kind of cache + optional Cache cache = 2; + + // MaxAge is the maximum age (e.g. "180s", "24h") of an entry that is still considered valid. If an entry is older + // than the MaxAge, it will be ignored. + optional string maxAge = 3; +} + +// Pod metdata +message Metadata { + map annotations = 1; + + map labels = 2; +} + +// MetricLabel is a single label for a prometheus metric +message MetricLabel { + optional string key = 1; + + optional string value = 2; +} + +// Metrics are a list of metrics emitted from a Workflow/Template +message Metrics { + // Prometheus is a list of prometheus metrics to be emitted + repeated Prometheus prometheus = 1; +} + +// Mutex holds Mutex configuration +message Mutex { + // name of the mutex + optional string name = 1; + + // Namespace is the namespace of the mutex, default: [namespace of workflow] + optional string namespace = 2; +} + +// MutexHolding describes the mutex and the object which is holding it. +message MutexHolding { + // Reference for the mutex + // e.g: ${namespace}/mutex/${mutexName} + optional string mutex = 1; + + // Holder is a reference to the object which holds the Mutex. + // Holding Scenario: + // 1. Current workflow's NodeID which is holding the lock. + // e.g: ${NodeID} + // Waiting Scenario: + // 1. Current workflow or other workflow NodeID which is holding the lock. + // e.g: ${WorkflowName}/${NodeID} + optional string holder = 2; +} + +// MutexStatus contains which objects hold mutex locks, and which objects this workflow is waiting on to release locks. +message MutexStatus { + // Holding is a list of mutexes and their respective objects that are held by mutex lock for this workflow. + // +listType=atomic + repeated MutexHolding holding = 1; + + // Waiting is a list of mutexes and their respective objects this workflow is waiting for. + // +listType=atomic + repeated MutexHolding waiting = 2; +} + +message NodeFlag { + // Hooked tracks whether or not this node was triggered by hook or onExit + optional bool hooked = 1; + + // Retried tracks whether or not this node was retried by retryStrategy + optional bool retried = 2; +} + +message NodeResult { + optional string phase = 1; + + optional string message = 2; + + optional Outputs outputs = 3; + + optional string progress = 4; +} + +// NodeStatus contains status information about an individual node in the workflow +message NodeStatus { + // ID is a unique identifier of a node within the worklow + // It is implemented as a hash of the node name, which makes the ID deterministic + optional string id = 1; + + // Name is unique name in the node tree used to generate the node ID + optional string name = 2; + + // DisplayName is a human readable representation of the node. Unique within a template boundary + optional string displayName = 3; + + // Type indicates type of node + optional string type = 4; + + // TemplateName is the template name which this node corresponds to. + // Not applicable to virtual nodes (e.g. Retry, StepGroup) + optional string templateName = 5; + + // TemplateRef is the reference to the template resource which this node corresponds to. + // Not applicable to virtual nodes (e.g. Retry, StepGroup) + optional TemplateRef templateRef = 6; + + // TemplateScope is the template scope in which the template of this node was retrieved. + optional string templateScope = 20; + + // Phase a simple, high-level summary of where the node is in its lifecycle. + // Can be used as a state machine. + // Will be one of these values "Pending", "Running" before the node is completed, or "Succeeded", + // "Skipped", "Failed", "Error", or "Omitted" as a final state. + optional string phase = 7; + + // BoundaryID indicates the node ID of the associated template root node in which this node belongs to + optional string boundaryID = 8; + + // A human readable message indicating details about why the node is in this condition. + optional string message = 9; + + // Time at which this node started + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 10; + + // Time at which this node completed + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 11; + + // EstimatedDuration in seconds. + optional int64 estimatedDuration = 24; + + // Progress to completion + optional string progress = 26; + + // ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes. + map resourcesDuration = 21; + + // PodIP captures the IP of the pod for daemoned steps + optional string podIP = 12; + + // Daemoned tracks whether or not this node was daemoned and need to be terminated + optional bool daemoned = 13; + + // NodeFlag tracks some history of node. e.g.) hooked, retried, etc. + optional NodeFlag nodeFlag = 27; + + // Inputs captures input parameter values and artifact locations supplied to this template invocation + optional Inputs inputs = 14; + + // Outputs captures output parameter values and artifact locations produced by this template invocation + optional Outputs outputs = 15; + + // Children is a list of child node IDs + repeated string children = 16; + + // OutboundNodes tracks the node IDs which are considered "outbound" nodes to a template invocation. + // For every invocation of a template, there are nodes which we considered as "outbound". Essentially, + // these are last nodes in the execution sequence to run, before the template is considered completed. + // These nodes are then connected as parents to a following step. + // + // In the case of single pod steps (i.e. container, script, resource templates), this list will be nil + // since the pod itself is already considered the "outbound" node. + // In the case of DAGs, outbound nodes are the "target" tasks (tasks with no children). + // In the case of steps, outbound nodes are all the containers involved in the last step group. + // NOTE: since templates are composable, the list of outbound nodes are carried upwards when + // a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of + // a template, will be a superset of the outbound nodes of its last children. + repeated string outboundNodes = 17; + + // HostNodeName name of the Kubernetes node on which the Pod is running, if applicable + optional string hostNodeName = 22; + + // MemoizationStatus holds information about cached nodes + optional MemoizationStatus memoizationStatus = 23; + + // SynchronizationStatus is the synchronization status of the node + optional NodeSynchronizationStatus synchronizationStatus = 25; +} + +// NodeSynchronizationStatus stores the status of a node +message NodeSynchronizationStatus { + // Waiting is the name of the lock that this node is waiting for + optional string waiting = 1; +} + +// NoneStrategy indicates to skip tar process and upload the files or directory tree as independent +// files. Note that if the artifact is a directory, the artifact driver must support the ability to +// save/load the directory appropriately. +message NoneStrategy { +} + +// OAuth2Auth holds all information for client authentication via OAuth2 tokens +message OAuth2Auth { + optional k8s.io.api.core.v1.SecretKeySelector clientIDSecret = 1; + + optional k8s.io.api.core.v1.SecretKeySelector clientSecretSecret = 2; + + optional k8s.io.api.core.v1.SecretKeySelector tokenURLSecret = 3; + + repeated string scopes = 5; + + repeated OAuth2EndpointParam endpointParams = 6; +} + +// EndpointParam is for requesting optional fields that should be sent in the oauth request +message OAuth2EndpointParam { + // Name is the header name + optional string key = 1; + + // Value is the literal value to use for the header + optional string value = 2; +} + +// OSSArtifact is the location of an Alibaba Cloud OSS artifact +message OSSArtifact { + optional OSSBucket oSSBucket = 1; + + // Key is the path in the bucket where the artifact resides + optional string key = 2; +} + +// OSSArtifactRepository defines the controller configuration for an OSS artifact repository +message OSSArtifactRepository { + optional OSSBucket oSSBucket = 1; + + // KeyFormat defines the format of how to store keys and can reference workflow variables. + optional string keyFormat = 2; +} + +// OSSBucket contains the access information required for interfacing with an Alibaba Cloud OSS bucket +message OSSBucket { + // Endpoint is the hostname of the bucket endpoint + optional string endpoint = 1; + + // Bucket is the name of the bucket + optional string bucket = 2; + + // AccessKeySecret is the secret selector to the bucket's access key + optional k8s.io.api.core.v1.SecretKeySelector accessKeySecret = 3; + + // SecretKeySecret is the secret selector to the bucket's secret key + optional k8s.io.api.core.v1.SecretKeySelector secretKeySecret = 4; + + // CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist + optional bool createBucketIfNotPresent = 5; + + // SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm + optional string securityToken = 6; + + // LifecycleRule specifies how to manage bucket's lifecycle + optional OSSLifecycleRule lifecycleRule = 7; + + // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. + optional bool useSDKCreds = 8; +} + +// OSSLifecycleRule specifies how to manage bucket's lifecycle +message OSSLifecycleRule { + // MarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type + optional int32 markInfrequentAccessAfterDays = 1; + + // MarkDeletionAfterDays is the number of days before we delete objects in the bucket + optional int32 markDeletionAfterDays = 2; +} + +// +kubebuilder:validation:Type=object +message Object { + optional bytes value = 1; +} + +// Outputs hold parameters, artifacts, and results from a step +message Outputs { + // Parameters holds the list of output parameters produced by a step + // +patchStrategy=merge + // +patchMergeKey=name + repeated Parameter parameters = 1; + + // Artifacts holds the list of output artifacts produced by a step + // +patchStrategy=merge + // +patchMergeKey=name + repeated Artifact artifacts = 2; + + // Result holds the result (stdout) of a script template + optional string result = 3; + + // ExitCode holds the exit code of a script template + optional string exitCode = 4; +} + +// +kubebuilder:validation:Type=array +message ParallelSteps { + repeated WorkflowStep steps = 1; +} + +// Parameter indicate a passed string parameter to a service template with an optional default value +message Parameter { + // Name is the parameter name + optional string name = 1; + + // Default is the default value to use for an input parameter if a value was not supplied + optional string default = 2; + + // Value is the literal value to use for the parameter. + // If specified in the context of an input parameter, the value takes precedence over any passed values + optional string value = 3; + + // ValueFrom is the source for the output parameter's value + optional ValueFrom valueFrom = 4; + + // GlobalName exports an output parameter to the global scope, making it available as + // '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters + optional string globalName = 5; + + // Enum holds a list of string values to choose from, for the actual value of the parameter + repeated string enum = 6; + + // Description is the parameter description + optional string description = 7; +} + +// Plugin is an Object with exactly one key +message Plugin { + optional Object object = 1; +} + +// PodGC describes how to delete completed pods as they complete +message PodGC { + // Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess". If unset, does not delete Pods + optional string strategy = 1; + + // LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 2; + + // DeleteDelayDuration specifies the duration before pods in the GC queue get deleted. + optional string deleteDelayDuration = 3; +} + +// Prometheus is a prometheus metric to be emitted +message Prometheus { + // Name is the name of the metric + optional string name = 1; + + // Labels is a list of metric labels + repeated MetricLabel labels = 2; + + // Help is a string that describes the metric + optional string help = 3; + + // When is a conditional statement that decides when to emit the metric + optional string when = 4; + + // Gauge is a gauge metric + optional Gauge gauge = 5; + + // Histogram is a histogram metric + optional Histogram histogram = 6; + + // Counter is a counter metric + optional Counter counter = 7; +} + +// RawArtifact allows raw string content to be placed as an artifact in a container +message RawArtifact { + // Data is the string contents of the artifact + optional string data = 1; +} + +// ResourceTemplate is a template subtype to manipulate kubernetes resources +message ResourceTemplate { + // Action is the action to perform to the resource. + // Must be one of: get, create, apply, delete, replace, patch + optional string action = 1; + + // MergeStrategy is the strategy used to merge a patch. It defaults to "strategic" + // Must be one of: strategic, merge, json + optional string mergeStrategy = 2; + + // Manifest contains the kubernetes manifest + optional string manifest = 3; + + // ManifestFrom is the source for a single kubernetes manifest + optional ManifestFrom manifestFrom = 8; + + // SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource. + optional bool setOwnerReference = 4; + + // SuccessCondition is a label selector expression which describes the conditions + // of the k8s resource in which it is acceptable to proceed to the following step + optional string successCondition = 5; + + // FailureCondition is a label selector expression which describes the conditions + // of the k8s resource in which the step was considered failed + optional string failureCondition = 6; + + // Flags is a set of additional options passed to kubectl before submitting a resource + // I.e. to disable resource validation: + // flags: [ + // "--validate=false" # disable resource validation + // ] + repeated string flags = 7; +} + +// RetryAffinity prevents running steps on the same host. +message RetryAffinity { + optional RetryNodeAntiAffinity nodeAntiAffinity = 1; +} + +// RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. +// In order to prevent running steps on the same host, it uses "kubernetes.io/hostname". +message RetryNodeAntiAffinity { +} + +// RetryStrategy provides controls on how to retry a workflow step +message RetryStrategy { + // Limit is the maximum number of retry attempts when retrying a container. It does not include the original + // container; the maximum number of total attempts will be `limit + 1`. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString limit = 1; + + // RetryPolicy is a policy of NodePhase statuses that will be retried + optional string retryPolicy = 2; + + // Backoff is a backoff strategy + optional Backoff backoff = 3; + + // Affinity prevents running workflow's step on the same host + optional RetryAffinity affinity = 4; + + // Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not + // be retried and the retry strategy will be ignored + optional string expression = 5; +} + +// S3Artifact is the location of an S3 artifact +message S3Artifact { + optional S3Bucket s3Bucket = 1; + + // Key is the key in the bucket where the artifact resides + optional string key = 2; +} + +// S3ArtifactRepository defines the controller configuration for an S3 artifact repository +message S3ArtifactRepository { + optional S3Bucket s3Bucket = 1; + + // KeyFormat defines the format of how to store keys and can reference workflow variables. + optional string keyFormat = 2; + + // KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts. + // DEPRECATED. Use KeyFormat instead + optional string keyPrefix = 3; +} + +// S3Bucket contains the access information required for interfacing with an S3 bucket +message S3Bucket { + // Endpoint is the hostname of the bucket endpoint + optional string endpoint = 1; + + // Bucket is the name of the bucket + optional string bucket = 2; + + // Region contains the optional bucket region + optional string region = 3; + + // Insecure will connect to the service with TLS + optional bool insecure = 4; + + // AccessKeySecret is the secret selector to the bucket's access key + optional k8s.io.api.core.v1.SecretKeySelector accessKeySecret = 5; + + // SecretKeySecret is the secret selector to the bucket's secret key + optional k8s.io.api.core.v1.SecretKeySelector secretKeySecret = 6; + + // SessionTokenSecret is used for ephemeral credentials like an IAM assume role or S3 access grant + optional k8s.io.api.core.v1.SecretKeySelector sessionTokenSecret = 12; + + // RoleARN is the Amazon Resource Name (ARN) of the role to assume. + optional string roleARN = 7; + + // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. + optional bool useSDKCreds = 8; + + // CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is. + optional CreateS3BucketOptions createBucketIfNotPresent = 9; + + optional S3EncryptionOptions encryptionOptions = 10; + + // CASecret specifies the secret that contains the CA, used to verify the TLS connection + optional k8s.io.api.core.v1.SecretKeySelector caSecret = 11; +} + +// S3EncryptionOptions used to determine encryption options during s3 operations +message S3EncryptionOptions { + // KMSKeyId tells the driver to encrypt the object using the specified KMS Key. + optional string kmsKeyId = 1; + + // KmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information + optional string kmsEncryptionContext = 2; + + // EnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used + optional bool enableEncryption = 3; + + // ServerSideCustomerKeySecret tells the driver to encrypt the output artifacts using SSE-C with the specified secret. + optional k8s.io.api.core.v1.SecretKeySelector serverSideCustomerKeySecret = 4; +} + +// ScriptTemplate is a template subtype to enable scripting through code steps +message ScriptTemplate { + optional k8s.io.api.core.v1.Container container = 1; + + // Source contains the source code of the script to execute + optional string source = 2; +} + +message SemaphoreHolding { + // Semaphore stores the semaphore name. + optional string semaphore = 1; + + // Holders stores the list of current holder names in the workflow. + // +listType=atomic + repeated string holders = 2; +} + +// SemaphoreRef is a reference of Semaphore +message SemaphoreRef { + // ConfigMapKeyRef is configmap selector for Semaphore configuration + optional k8s.io.api.core.v1.ConfigMapKeySelector configMapKeyRef = 1; + + // Namespace is the namespace of the configmap, default: [namespace of workflow] + optional string namespace = 2; +} + +message SemaphoreStatus { + // Holding stores the list of resource acquired synchronization lock for workflows. + repeated SemaphoreHolding holding = 1; + + // Waiting indicates the list of current synchronization lock holders. + repeated SemaphoreHolding waiting = 2; +} + +// Sequence expands a workflow step into numeric range +message Sequence { + // Count is number of elements in the sequence (default: 0). Not to be used with end + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString count = 1; + + // Number at which to start the sequence (default: 0) + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString start = 2; + + // Number at which to end the sequence (default: 0). Not to be used with Count + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString end = 3; + + // Format is a printf format string to format the value in the sequence + optional string format = 4; +} + +// StopStrategy defines if the CronWorkflow should stop scheduling based on an expression. v3.6 and after +message StopStrategy { + // v3.6 and after: Expression is an expression that stops scheduling workflows when true. Use the variables + // `cronworkflow`.`failed` or `cronworkflow`.`succeeded` to access the number of failed or successful child workflows. + optional string expression = 1; +} + +message Submit { + // WorkflowTemplateRef the workflow template to submit + optional WorkflowTemplateRef workflowTemplateRef = 1; + + // Metadata optional means to customize select fields of the workflow metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; + + // Arguments extracted from the event and then set as arguments to the workflow created. + optional Arguments arguments = 2; +} + +// SubmitOpts are workflow submission options +message SubmitOpts { + // Name overrides metadata.name + optional string name = 1; + + // GenerateName overrides metadata.generateName + optional string generateName = 2; + + // Entrypoint overrides spec.entrypoint + optional string entrypoint = 4; + + // Parameters passes input parameters to workflow + repeated string parameters = 5; + + // ServiceAccount runs all pods in the workflow using specified ServiceAccount. + optional string serviceAccount = 7; + + // DryRun validates the workflow on the client-side without creating it. This option is not supported in API + optional bool dryRun = 8; + + // ServerDryRun validates the workflow on the server-side without creating it + optional bool serverDryRun = 9; + + // Labels adds to metadata.labels + optional string labels = 10; + + // OwnerReference creates a metadata.ownerReference + optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReference = 11; + + // Annotations adds to metadata.labels + optional string annotations = 12; + + // Set the podPriorityClassName of the workflow + optional string podPriorityClassName = 13; + + // Priority is used if controller is configured to process limited number of workflows in parallel, higher priority workflows + // are processed first. + optional int32 priority = 14; +} + +// SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc. +message SuppliedValueFrom { +} + +// SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time +message SuspendTemplate { + // Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. + // Could also be a Duration, e.g.: "2m", "6h" + optional string duration = 1; +} + +// Synchronization holds synchronization lock configuration +message Synchronization { + // Semaphore holds the Semaphore configuration - deprecated, use semaphores instead + optional SemaphoreRef semaphore = 1; + + // Mutex holds the Mutex lock details - deprecated, use mutexes instead + optional Mutex mutex = 2; + + // v3.6 and after: Semaphores holds the list of Semaphores configuration + repeated SemaphoreRef semaphores = 3; + + // v3.6 and after: Mutexes holds the list of Mutex lock details + repeated Mutex mutexes = 4; +} + +// SynchronizationStatus stores the status of semaphore and mutex. +message SynchronizationStatus { + // Semaphore stores this workflow's Semaphore holder details + optional SemaphoreStatus semaphore = 1; + + // Mutex stores this workflow's mutex holder details + optional MutexStatus mutex = 2; +} + +// TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed +message TTLStrategy { + // SecondsAfterCompletion is the number of seconds to live after completion + optional int32 secondsAfterCompletion = 1; + + // SecondsAfterSuccess is the number of seconds to live after success + optional int32 secondsAfterSuccess = 2; + + // SecondsAfterFailure is the number of seconds to live after failure + optional int32 secondsAfterFailure = 3; +} + +// TarStrategy will tar and gzip the file or directory when saving +message TarStrategy { + // CompressionLevel specifies the gzip compression level to use for the artifact. + // Defaults to gzip.DefaultCompression. + optional int32 compressionLevel = 1; +} + +// Template is a reusable and composable unit of execution in a workflow +message Template { + // Name is the name of the template + optional string name = 1; + + // Inputs describe what inputs parameters and artifacts are supplied to this template + optional Inputs inputs = 5; + + // Outputs describe the parameters and artifacts that this template produces + optional Outputs outputs = 6; + + // NodeSelector is a selector to schedule this step of the workflow to be + // run on the selected node(s). Overrides the selector set at the workflow level. + map nodeSelector = 7; + + // Affinity sets the pod's scheduling constraints + // Overrides the affinity set at the workflow level (if any) + optional k8s.io.api.core.v1.Affinity affinity = 8; + + // Metdata sets the pods's metadata, i.e. annotations and labels + optional Metadata metadata = 9; + + // Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness + optional bool daemon = 10; + + // Steps define a series of sequential/parallel workflow steps + repeated ParallelSteps steps = 11; + + // Container is the main container image to run in the pod + optional k8s.io.api.core.v1.Container container = 12; + + // ContainerSet groups multiple containers within a single pod. + optional ContainerSetTemplate containerSet = 40; + + // Script runs a portion of code against an interpreter + optional ScriptTemplate script = 13; + + // Resource template subtype which can run k8s resources + optional ResourceTemplate resource = 14; + + // DAG template subtype which runs a DAG + optional DAGTemplate dag = 15; + + // Suspend template subtype which can suspend a workflow when reaching the step + optional SuspendTemplate suspend = 16; + + // Data is a data template + optional Data data = 39; + + // HTTP makes a HTTP request + optional HTTP http = 42; + + // Plugin is a plugin template + optional Plugin plugin = 43; + + // Volumes is a list of volumes that can be mounted by containers in a template. + // +patchStrategy=merge + // +patchMergeKey=name + repeated k8s.io.api.core.v1.Volume volumes = 17; + + // InitContainers is a list of containers which run before the main container. + // +patchStrategy=merge + // +patchMergeKey=name + repeated UserContainer initContainers = 18; + + // Sidecars is a list of containers which run alongside the main container + // Sidecars are automatically killed when the main container completes + // +patchStrategy=merge + // +patchMergeKey=name + repeated UserContainer sidecars = 19; + + // Location in which all files related to the step will be stored (logs, artifacts, etc...). + // Can be overridden by individual items in Outputs. If omitted, will use the default + // artifact repository location configured in the controller, appended with the + // / in the key. + optional ArtifactLocation archiveLocation = 20; + + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // This field is only applicable to container and script templates. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString activeDeadlineSeconds = 21; + + // RetryStrategy describes how to retry a template when it fails + optional RetryStrategy retryStrategy = 22; + + // Parallelism limits the max total parallel pods that can execute at the same time within the + // boundaries of this template invocation. If additional steps/dag templates are invoked, the + // pods created by those templates will not be counted towards this total. + optional int64 parallelism = 23; + + // FailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this + // template is expanded with `withItems`, etc. + optional bool failFast = 41; + + // Tolerations to apply to workflow pods. + // +patchStrategy=merge + // +patchMergeKey=key + repeated k8s.io.api.core.v1.Toleration tolerations = 24; + + // If specified, the pod will be dispatched by specified scheduler. + // Or it will be dispatched by workflow scope scheduler if specified. + // If neither specified, the pod will be dispatched by default scheduler. + // +optional + optional string schedulerName = 25; + + // PriorityClassName to apply to workflow pods. + optional string priorityClassName = 26; + + // Priority to apply to workflow pods. + optional int32 priority = 27; + + // ServiceAccountName to apply to workflow pods + optional string serviceAccountName = 28; + + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. + // ServiceAccountName of ExecutorConfig must be specified if this value is false. + optional bool automountServiceAccountToken = 32; + + // Executor holds configurations of the executor container. + optional ExecutorConfig executor = 33; + + // HostAliases is an optional list of hosts and IPs that will be injected into the pod spec + // +patchStrategy=merge + // +patchMergeKey=ip + repeated k8s.io.api.core.v1.HostAlias hostAliases = 29; + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + optional k8s.io.api.core.v1.PodSecurityContext securityContext = 30; + + // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of + // container fields which are not strings (e.g. resource limits). + optional string podSpecPatch = 31; + + // Metrics are a list of metrics emitted from this template + optional Metrics metrics = 35; + + // Synchronization holds synchronization lock configuration for this template + optional Synchronization synchronization = 36; + + // Memoize allows templates to use outputs generated from already executed templates + optional Memoize memoize = 37; + + // Timeout allows to set the total node execution timeout duration counting from the node's start time. + // This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates. + optional string timeout = 38; +} + +// TemplateRef is a reference of template resource. +message TemplateRef { + // Name is the resource name of the template. + optional string name = 1; + + // Template is the name of referred template in the resource. + optional string template = 2; + + // ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate). + optional bool clusterScope = 4; +} + +message TransformationStep { + // Expression defines an expr expression to apply + optional string expression = 1; +} + +// UserContainer is a container specified by a user. +message UserContainer { + optional k8s.io.api.core.v1.Container container = 1; + + // MirrorVolumeMounts will mount the same volumes specified in the main container + // to the container (including artifacts), at the same mountPaths. This enables + // dind daemon to partially see the same filesystem as the main container in + // order to use features such as docker volume binding + optional bool mirrorVolumeMounts = 2; +} + +// ValueFrom describes a location in which to obtain the value to a parameter +message ValueFrom { + // Path in the container to retrieve an output parameter value from in container templates + optional string path = 1; + + // JSONPath of a resource to retrieve an output parameter value from in resource templates + optional string jsonPath = 2; + + // JQFilter expression against the resource object in resource templates + optional string jqFilter = 3; + + // Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message` + optional string event = 7; + + // Parameter reference to a step or dag task in which to retrieve an output parameter value from + // (e.g. '{{steps.mystep.outputs.myparam}}') + optional string parameter = 4; + + // Supplied value to be filled in directly, either through the CLI, API, etc. + optional SuppliedValueFrom supplied = 6; + + // ConfigMapKeyRef is configmap selector for input parameter configuration + optional k8s.io.api.core.v1.ConfigMapKeySelector configMapKeyRef = 9; + + // Default specifies a value to be used if retrieving the value from the specified source fails + optional string default = 5; + + // Expression, if defined, is evaluated to specify the value for the parameter + optional string expression = 8; +} + +message Version { + optional string version = 1; + + optional string buildDate = 2; + + optional string gitCommit = 3; + + optional string gitTag = 4; + + optional string gitTreeState = 5; + + optional string goVersion = 6; + + optional string compiler = 7; + + optional string platform = 8; +} + +// VolumeClaimGC describes how to delete volumes from completed Workflows +message VolumeClaimGC { + // Strategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess". Defaults to "OnWorkflowSuccess" + optional string strategy = 1; +} + +// Workflow is the definition of a workflow resource +// +genclient +// +genclient:noStatus +// +kubebuilder:resource:shortName=wf +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Status of the workflow" +// +kubebuilder:printcolumn:name="Age",type="date",format="date-time",JSONPath=".status.startedAt",description="When the workflow was started" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Human readable message indicating details about why the workflow is in this condition." +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message Workflow { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional WorkflowSpec spec = 2; + + optional WorkflowStatus status = 3; +} + +// WorkflowArtifactGCTask specifies the Artifacts that need to be deleted as well as the status of deletion +// +genclient +// +kubebuilder:resource:shortName=wfat +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status +message WorkflowArtifactGCTask { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional ArtifactGCSpec spec = 2; + + optional ArtifactGCStatus status = 3; +} + +// WorkflowArtifactGCTaskList is list of WorkflowArtifactGCTask resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowArtifactGCTaskList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated WorkflowArtifactGCTask items = 2; +} + +// WorkflowEventBinding is the definition of an event resource +// +genclient +// +genclient:noStatus +// +kubebuilder:resource:shortName=wfeb +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowEventBinding { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional WorkflowEventBindingSpec spec = 2; +} + +// WorkflowEventBindingList is list of event resources +// +kubebuilder:resource:shortName=wfebs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowEventBindingList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated WorkflowEventBinding items = 2; +} + +message WorkflowEventBindingSpec { + // Event is the event to bind to + optional Event event = 1; + + // Submit is the workflow template to submit + optional Submit submit = 2; +} + +// WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level +message WorkflowLevelArtifactGC { + // ArtifactGC is an embedded struct + optional ArtifactGC artifactGC = 1; + + // ForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails + optional bool forceFinalizerRemoval = 2; + + // PodSpecPatch holds strategic merge patch to apply against the artgc pod spec. + optional string podSpecPatch = 3; +} + +// WorkflowList is list of Workflow resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Workflow items = 2; +} + +message WorkflowMetadata { + map labels = 1; + + map annotations = 2; + + map labelsFrom = 3; +} + +// WorkflowSpec is the specification of a Workflow. +message WorkflowSpec { + // Templates is a list of workflow templates used in a workflow + // +patchStrategy=merge + // +patchMergeKey=name + repeated Template templates = 1; + + // Entrypoint is a template reference to the starting point of the workflow. + optional string entrypoint = 2; + + // Arguments contain the parameters and artifacts sent to the workflow entrypoint + // Parameters are referencable globally using the 'workflow' variable prefix. + // e.g. {{workflow.parameters.myparam}} + optional Arguments arguments = 3; + + // ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as. + optional string serviceAccountName = 4; + + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. + // ServiceAccountName of ExecutorConfig must be specified if this value is false. + optional bool automountServiceAccountToken = 28; + + // Executor holds configurations of executor containers of the workflow. + optional ExecutorConfig executor = 29; + + // Volumes is a list of volumes that can be mounted by containers in a workflow. + // +patchStrategy=merge + // +patchMergeKey=name + repeated k8s.io.api.core.v1.Volume volumes = 5; + + // VolumeClaimTemplates is a list of claims that containers are allowed to reference. + // The Workflow controller will create the claims at the beginning of the workflow + // and delete the claims upon completion of the workflow + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 6; + + // Parallelism limits the max total parallel pods that can execute at the same time in a workflow + optional int64 parallelism = 7; + + // ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config. + optional ArtifactRepositoryRef artifactRepositoryRef = 8; + + // Suspend will suspend the workflow and prevent execution of any future steps in the workflow + optional bool suspend = 9; + + // NodeSelector is a selector which will result in all pods of the workflow + // to be scheduled on the selected node(s). This is able to be overridden by + // a nodeSelector specified in the template. + map nodeSelector = 10; + + // Affinity sets the scheduling constraints for all pods in the workflow. + // Can be overridden by an affinity specified in the template + optional k8s.io.api.core.v1.Affinity affinity = 11; + + // Tolerations to apply to workflow pods. + // +patchStrategy=merge + // +patchMergeKey=key + repeated k8s.io.api.core.v1.Toleration tolerations = 12; + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + // +patchStrategy=merge + // +patchMergeKey=name + repeated k8s.io.api.core.v1.LocalObjectReference imagePullSecrets = 13; + + // Host networking requested for this workflow pod. Default to false. + optional bool hostNetwork = 14; + + // Set DNS policy for workflow pods. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + optional string dnsPolicy = 15; + + // PodDNSConfig defines the DNS parameters of a pod in addition to + // those generated from DNSPolicy. + optional k8s.io.api.core.v1.PodDNSConfig dnsConfig = 16; + + // OnExit is a template reference which is invoked at the end of the + // workflow, irrespective of the success, failure, or error of the + // primary workflow. + optional string onExit = 17; + + // TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it + // Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be + // deleted after the time to live expires. If this field is unset, + // the controller config map will hold the default values. + optional TTLStrategy ttlStrategy = 30; + + // Optional duration in seconds relative to the workflow start time which the workflow is + // allowed to run before the controller terminates the workflow. A value of zero is used to + // terminate a Running workflow + optional int64 activeDeadlineSeconds = 19; + + // Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first. + optional int32 priority = 20; + + // Set scheduler name for all pods. + // Will be overridden if container/script template's scheduler name is set. + // Default scheduler will be used if neither specified. + // +optional + optional string schedulerName = 21; + + // PodGC describes the strategy to use when deleting completed pods + optional PodGC podGC = 22; + + // PriorityClassName to apply to workflow pods. + optional string podPriorityClassName = 23; + + // Priority to apply to workflow pods. + // DEPRECATED: Use PodPriorityClassName instead. + optional int32 podPriority = 24; + + // +patchStrategy=merge + // +patchMergeKey=ip + repeated k8s.io.api.core.v1.HostAlias hostAliases = 25; + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + optional k8s.io.api.core.v1.PodSecurityContext securityContext = 26; + + // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of + // container fields which are not strings (e.g. resource limits). + optional string podSpecPatch = 27; + + // PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. + // Controller will automatically add the selector with workflow name, if selector is empty. + // Optional: Defaults to empty. + // +optional + optional k8s.io.api.policy.v1.PodDisruptionBudgetSpec podDisruptionBudget = 31; + + // Metrics are a list of metrics emitted from this Workflow + optional Metrics metrics = 32; + + // Shutdown will shutdown the workflow according to its ShutdownStrategy + optional string shutdown = 33; + + // WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution + optional WorkflowTemplateRef workflowTemplateRef = 34; + + // Synchronization holds synchronization lock configuration for this Workflow + optional Synchronization synchronization = 35; + + // VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows + optional VolumeClaimGC volumeClaimGC = 36; + + // RetryStrategy for all templates in the workflow. + optional RetryStrategy retryStrategy = 37; + + // PodMetadata defines additional metadata that should be applied to workflow pods + optional Metadata podMetadata = 38; + + // TemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level + optional Template templateDefaults = 39; + + // ArchiveLogs indicates if the container logs should be archived + optional bool archiveLogs = 40; + + // Hooks holds the lifecycle hook which is invoked at lifecycle of + // step, irrespective of the success, failure, or error status of the primary step + map hooks = 41; + + // WorkflowMetadata contains some metadata of the workflow to refer to + optional WorkflowMetadata workflowMetadata = 42; + + // ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts + // unless Artifact.ArtifactGC is specified, which overrides this) + optional WorkflowLevelArtifactGC artifactGC = 43; +} + +// WorkflowStatus contains overall status information about a workflow +message WorkflowStatus { + // Phase a simple, high-level summary of where the workflow is in its lifecycle. + // Will be "" (Unknown), "Pending", or "Running" before the workflow is completed, and "Succeeded", + // "Failed" or "Error" once the workflow has completed. + optional string phase = 1; + + // Time at which this workflow started + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 2; + + // Time at which this workflow completed + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 3; + + // EstimatedDuration in seconds. + optional int64 estimatedDuration = 16; + + // Progress to completion + optional string progress = 17; + + // A human readable message indicating details about why the workflow is in this condition. + optional string message = 4; + + // Compressed and base64 decoded Nodes map + optional string compressedNodes = 5; + + // Nodes is a mapping between a node ID and the node's status. + map nodes = 6; + + // Whether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. + // This will actually be populated with a hash of the offloaded data. + optional string offloadNodeStatusVersion = 10; + + // StoredTemplates is a mapping between a template ref and the node's status. + map storedTemplates = 9; + + // PersistentVolumeClaims tracks all PVCs that were created as part of the workflow. + // The contents of this list are drained at the end of the workflow. + repeated k8s.io.api.core.v1.Volume persistentVolumeClaims = 7; + + // Outputs captures output values and artifact locations produced by the workflow via global outputs + optional Outputs outputs = 8; + + // Conditions is a list of conditions the Workflow may have + repeated Condition conditions = 13; + + // ResourcesDuration is the total for the workflow + map resourcesDuration = 12; + + // StoredWorkflowSpec stores the WorkflowTemplate spec for future execution. + optional WorkflowSpec storedWorkflowTemplateSpec = 14; + + // Synchronization stores the status of synchronization locks + optional SynchronizationStatus synchronization = 15; + + // ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile. + optional ArtifactRepositoryRefStatus artifactRepositoryRef = 18; + + // ArtifactGCStatus maintains the status of Artifact Garbage Collection + optional ArtGCStatus artifactGCStatus = 19; + + // TaskResultsCompletionStatus tracks task result completion status (mapped by node ID). Used to prevent premature archiving and garbage collection. + map taskResultsCompletionStatus = 20; +} + +// WorkflowStep is a reference to a template to execute in a series of step +message WorkflowStep { + // Name of the step + optional string name = 1; + + // Template is the name of the template to execute as the step + optional string template = 2; + + // Inline is the template. Template must be empty if this is declared (and vice-versa). + optional Template inline = 13; + + // Arguments hold arguments to the template + optional Arguments arguments = 3; + + // TemplateRef is the reference to the template resource to execute as the step. + optional TemplateRef templateRef = 4; + + // WithItems expands a step into multiple parallel steps from the items in the list + repeated Item withItems = 5; + + // WithParam expands a step into multiple parallel steps from the value in the parameter, + // which is expected to be a JSON list. + optional string withParam = 6; + + // WithSequence expands a step into a numeric sequence + optional Sequence withSequence = 7; + + // When is an expression in which the step should conditionally execute + optional string when = 8; + + // ContinueOn makes argo to proceed with the following step even if this step fails. + // Errors and Failed states can be specified + optional ContinueOn continueOn = 9; + + // OnExit is a template reference which is invoked at the end of the + // template, irrespective of the success, failure, or error of the + // primary template. + // DEPRECATED: Use Hooks[exit].Template instead. + optional string onExit = 11; + + // Hooks holds the lifecycle hook which is invoked at lifecycle of + // step, irrespective of the success, failure, or error status of the primary step + map hooks = 12; +} + +// WorkflowTaskResult is a used to communicate a result back to the controller. Unlike WorkflowTaskSet, it has +// more capacity. This is an internal type. Users should never create this resource directly, much like you would +// never create a ReplicaSet directly. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowTaskResult { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional NodeResult nodeResult = 2; +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowTaskResultList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated WorkflowTaskResult items = 2; +} + +// +genclient +// +kubebuilder:resource:shortName=wfts +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status +message WorkflowTaskSet { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional WorkflowTaskSetSpec spec = 2; + + optional WorkflowTaskSetStatus status = 3; +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowTaskSetList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated WorkflowTaskSet items = 2; +} + +message WorkflowTaskSetSpec { + map tasks = 1; +} + +message WorkflowTaskSetStatus { + map nodes = 1; +} + +// WorkflowTemplate is the definition of a workflow template resource +// +genclient +// +genclient:noStatus +// +kubebuilder:resource:shortName=wftmpl +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowTemplate { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional WorkflowSpec spec = 2; +} + +// WorkflowTemplateList is list of WorkflowTemplate resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowTemplateList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated WorkflowTemplate items = 2; +} + +// WorkflowTemplateRef is a reference to a WorkflowTemplate resource. +message WorkflowTemplateRef { + // Name is the resource name of the workflow template. + optional string name = 1; + + // ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate). + optional bool clusterScope = 2; +} + +// ZipStrategy will unzip zipped input artifacts +message ZipStrategy { +} + diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.swagger.json b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.swagger.json new file mode 100644 index 00000000..4cf065b4 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.swagger.json @@ -0,0 +1,15 @@ +{ + "swagger": "2.0", + "info": { + "title": "pkg/apis/workflow/v1alpha1/generated.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": {} +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/http_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/http_types.go new file mode 100644 index 00000000..79f9c3e2 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/http_types.go @@ -0,0 +1,62 @@ +package v1alpha1 + +import ( + "net/http" + + v1 "k8s.io/api/core/v1" +) + +type HTTPHeaderSource struct { + SecretKeyRef *v1.SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,1,opt,name=secretKeyRef"` +} + +type HTTPHeaders []HTTPHeader + +// HTTPBodySource contains the source of the HTTP body. +type HTTPBodySource struct { + Bytes []byte `json:"bytes,omitempty" protobuf:"bytes,1,opt,name=bytes"` +} + +func (h HTTPHeaders) ToHeader() http.Header { + outHeader := make(http.Header) + for _, header := range h { + // When this is used, header valueFrom should already be resolved + if header.ValueFrom != nil { + continue + } + outHeader[header.Name] = []string{header.Value} + } + return outHeader +} + +type HTTPHeader struct { + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + ValueFrom *HTTPHeaderSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` +} + +type HTTP struct { + // Method is HTTP methods for HTTP Request + Method string `json:"method,omitempty" protobuf:"bytes,1,opt,name=method"` + // URL of the HTTP Request + URL string `json:"url" protobuf:"bytes,2,opt,name=url"` + // Headers are an optional list of headers to send with HTTP requests + Headers HTTPHeaders `json:"headers,omitempty" protobuf:"bytes,3,rep,name=headers"` + // TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"bytes,4,opt,name=timeoutSeconds"` + // SuccessCondition is an expression if evaluated to true is considered successful + SuccessCondition string `json:"successCondition,omitempty" protobuf:"bytes,6,opt,name=successCondition"` + // Body is content of the HTTP Request + Body string `json:"body,omitempty" protobuf:"bytes,5,opt,name=body"` + // BodyFrom is content of the HTTP Request as Bytes + BodyFrom *HTTPBodySource `json:"bodyFrom,omitempty" protobuf:"bytes,8,opt,name=bodyFrom"` + // InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty" protobuf:"bytes,7,opt,name=insecureSkipVerify"` +} + +func (h *HTTP) GetBodyBytes() []byte { + if h.BodyFrom != nil { + return h.BodyFrom.Bytes + } + return nil +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go new file mode 100644 index 00000000..19390e43 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go @@ -0,0 +1,25 @@ +package v1alpha1 + +// A link to another app. +// +patchStrategy=merge +// +patchMergeKey=name +type Link struct { + // The name of the link, E.g. "Workflow Logs" or "Pod Logs" + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // "workflow", "pod", "pod-logs", "event-source-logs", "sensor-logs", "workflow-list" or "chat" + Scope string `json:"scope" protobuf:"bytes,2,opt,name=scope"` + // The URL. Can contain "${metadata.namespace}", "${metadata.name}", "${status.startedAt}", "${status.finishedAt}" or any other element in workflow yaml, e.g. "${workflow.metadata.annotations.userDefinedKey}" + URL string `json:"url" protobuf:"bytes,3,opt,name=url"` +} + +// Column is a custom column that will be exposed in the Workflow List View. +// +patchStrategy=merge +// +patchMergeKey=name +type Column struct { + // The name of this column, e.g., "Workflow Completed". + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The type of this column, "label" or "annotation". + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // The key of the label or annotation, e.g., "workflows.argoproj.io/completed". + Key string `json:"key" protobuf:"bytes,3,opt,name=key"` +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/item.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/item.go new file mode 100644 index 00000000..35e53c31 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/item.go @@ -0,0 +1,119 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "strconv" + + jsonutil "github.com/argoproj/argo-workflows/v3/util/json" +) + +// Type represents the stored type of Item. +type Type int + +const ( + Number Type = iota + String + Bool + Map + List +) + +// Item expands a single workflow step into multiple parallel steps +// The value of Item can be a map, string, bool, or number +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +kubebuilder:validation:Type=object +type Item struct { + Value json.RawMessage `json:"-" protobuf:"bytes,1,opt,name=value,casttype=encoding/json.RawMessage"` +} + +func ParseItem(s string) (Item, error) { + item := Item{} + return item, json.Unmarshal([]byte(s), &item) +} + +func (i *Item) GetType() Type { + strValue := string(i.Value) + if _, err := strconv.Atoi(strValue); err == nil { + return Number + } + if _, err := strconv.ParseFloat(strValue, 64); err == nil { + return Number + } + if _, err := strconv.ParseBool(strValue); err == nil { + return Bool + } + var list []interface{} + if err := json.Unmarshal(i.Value, &list); err == nil { + return List + } + var object map[string]interface{} + if err := json.Unmarshal(i.Value, &object); err == nil { + return Map + } + return String +} + +func (i *Item) UnmarshalJSON(value []byte) error { + return i.Value.UnmarshalJSON(value) +} + +func (i *Item) String() string { + x, err := json.Marshal(i) // this produces a normalised string, e.g. white-space + if err != nil { + panic(err) + } + // this convenience to remove quotes from strings will cause many problems + if x[0] == '"' { + return jsonutil.Fix(string(x[1 : len(x)-1])) + } + return jsonutil.Fix(string(x)) +} + +func (i Item) Format(s fmt.State, _ rune) { + _, _ = fmt.Fprintf(s, "%s", i.String()) //nolint +} + +func (i Item) MarshalJSON() ([]byte, error) { + return i.Value.MarshalJSON() +} + +func (i *Item) DeepCopyInto(out *Item) { + inBytes, err := json.Marshal(i) + if err != nil { + panic(err) + } + err = json.Unmarshal(inBytes, out) + if err != nil { + panic(err) + } +} + +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (i Item) OpenAPISchemaType() []string { + return nil +} + +func (i Item) OpenAPISchemaFormat() string { return "" } + +// you MUST assert `GetType() == Map` before invocation as this does not return errors +func (i *Item) GetMapVal() map[string]Item { + val := make(map[string]Item) + _ = json.Unmarshal(i.Value, &val) + return val +} + +// you MUST assert `GetType() == List` before invocation as this does not return errors +func (i *Item) GetListVal() []Item { + val := make([]Item, 0) + _ = json.Unmarshal(i.Value, &val) + return val +} + +// you MUST assert `GetType() == String` before invocation as this does not return errors +func (i *Item) GetStrVal() string { + val := "" + _ = json.Unmarshal(i.Value, &val) + return val +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/label.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/label.go new file mode 100644 index 00000000..6c38754b --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/label.go @@ -0,0 +1,11 @@ +package v1alpha1 + +// Labels is list of workflow labels +type LabelValues struct { + Items []string `json:"items,omitempty" protobuf:"bytes,1,opt,name=items"` +} + +// LabelKeys is list of keys +type LabelKeys struct { + Items []string `json:"items,omitempty" protobuf:"bytes,1,opt,name=items"` +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go new file mode 100644 index 00000000..2d35e5ba --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go @@ -0,0 +1,86 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "sigs.k8s.io/yaml" +) + +// MustUnmarshal is a utility function to unmarshall either a file, byte array, or string of JSON or YAMl into a object. +// text - a byte array or string, if starts with "@" it assumed to be a file and read from disk, is starts with "{" assumed to be JSON, otherwise assumed to be YAML +// v - a pointer to an object +func MustUnmarshal(text, v interface{}) { + switch x := text.(type) { + case string: + MustUnmarshal([]byte(x), v) + case []byte: + if len(x) == 0 { + panic("no text to unmarshal") + } + if x[0] == '@' { + filename := string(x[1:]) + y, err := os.ReadFile(filepath.Clean(filename)) + if err != nil { + panic(fmt.Errorf("failed to read file %s: %w", filename, err)) + } + MustUnmarshal(y, v) + } else if x[0] == '{' { + if err := json.Unmarshal(x, v); err != nil { + panic(fmt.Errorf("failed to unmarshal JSON %q: %w", string(x), err)) + } + } else { + if err := yaml.UnmarshalStrict(x, v); err != nil { + panic(fmt.Errorf("failed to unmarshal YAML %q: %w", string(x), err)) + } + } + default: + panic(fmt.Errorf("cannot unmarshal type %T", text)) + } +} + +func MustMarshallJSON(v interface{}) string { + data, err := json.Marshal(v) + if err != nil { + panic(err) + } + return string(data) +} + +func MustUnmarshalClusterWorkflowTemplate(text interface{}) *ClusterWorkflowTemplate { + x := &ClusterWorkflowTemplate{} + MustUnmarshal(text, &x) + return x +} + +func MustUnmarshalCronWorkflow(text interface{}) *CronWorkflow { + x := &CronWorkflow{} + MustUnmarshal(text, &x) + return x +} + +func MustUnmarshalTemplate(text interface{}) *Template { + x := &Template{} + MustUnmarshal(text, &x) + return x +} + +func MustUnmarshalWorkflow(text interface{}) *Workflow { + x := &Workflow{} + MustUnmarshal(text, &x) + return x +} + +func MustUnmarshalWorkflowTemplate(text interface{}) *WorkflowTemplate { + x := &WorkflowTemplate{} + MustUnmarshal(text, &x) + return x +} + +func MustUnmarshalWorkflowArtifactGCTask(text interface{}) *WorkflowArtifactGCTask { + x := &WorkflowArtifactGCTask{} + MustUnmarshal(text, &x) + return x +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go new file mode 100644 index 00000000..a21ee90c --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go @@ -0,0 +1,24 @@ +package v1alpha1 + +import ( + "encoding/json" +) + +// +kubebuilder:validation:Type=object +type Object struct { + Value json.RawMessage `json:"-" protobuf:"bytes,1,opt,name=value,casttype=encoding/json.RawMessage"` +} + +func (i *Object) UnmarshalJSON(value []byte) error { + return i.Value.UnmarshalJSON(value) +} + +func (i Object) MarshalJSON() ([]byte, error) { + return i.Value.MarshalJSON() +} + +func (i Object) OpenAPISchemaType() []string { + return []string{"object"} +} + +func (i Object) OpenAPISchemaFormat() string { return "" } diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go new file mode 100644 index 00000000..a89ff071 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -0,0 +1,8669 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by openapi-gen. DO NOT EDIT. + +// This file was autogenerated by openapi-gen. Do not edit it manually! + +package v1alpha1 + +import ( + common "k8s.io/kube-openapi/pkg/common" + spec "k8s.io/kube-openapi/pkg/validation/spec" +) + +func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { + return map[string]common.OpenAPIDefinition{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Amount": schema_pkg_apis_workflow_v1alpha1_Amount(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy": schema_pkg_apis_workflow_v1alpha1_ArchiveStrategy(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments": schema_pkg_apis_workflow_v1alpha1_Arguments(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtGCStatus": schema_pkg_apis_workflow_v1alpha1_ArtGCStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact": schema_pkg_apis_workflow_v1alpha1_Artifact(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC": schema_pkg_apis_workflow_v1alpha1_ArtifactGC(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCSpec": schema_pkg_apis_workflow_v1alpha1_ArtifactGCSpec(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCStatus": schema_pkg_apis_workflow_v1alpha1_ArtifactGCStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation": schema_pkg_apis_workflow_v1alpha1_ArtifactLocation(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactNodeSpec": schema_pkg_apis_workflow_v1alpha1_ArtifactNodeSpec(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactPaths": schema_pkg_apis_workflow_v1alpha1_ArtifactPaths(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepository": schema_pkg_apis_workflow_v1alpha1_ArtifactRepository(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef": schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRef(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRefStatus": schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRefStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResult": schema_pkg_apis_workflow_v1alpha1_ArtifactResult(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResultNodeStatus": schema_pkg_apis_workflow_v1alpha1_ArtifactResultNodeStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactSearchQuery": schema_pkg_apis_workflow_v1alpha1_ArtifactSearchQuery(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactSearchResult": schema_pkg_apis_workflow_v1alpha1_ArtifactSearchResult(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact": schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifact(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifactRepository": schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifactRepository(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryAuth": schema_pkg_apis_workflow_v1alpha1_ArtifactoryAuth(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact": schema_pkg_apis_workflow_v1alpha1_AzureArtifact(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifactRepository": schema_pkg_apis_workflow_v1alpha1_AzureArtifactRepository(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureBlobContainer": schema_pkg_apis_workflow_v1alpha1_AzureBlobContainer(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Backoff": schema_pkg_apis_workflow_v1alpha1_Backoff(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.BasicAuth": schema_pkg_apis_workflow_v1alpha1_BasicAuth(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Cache": schema_pkg_apis_workflow_v1alpha1_Cache(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClientCertAuth": schema_pkg_apis_workflow_v1alpha1_ClientCertAuth(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplate": schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplate(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplateList": schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplateList(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Column": schema_pkg_apis_workflow_v1alpha1_Column(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition": schema_pkg_apis_workflow_v1alpha1_Condition(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerNode": schema_pkg_apis_workflow_v1alpha1_ContainerNode(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetRetryStrategy": schema_pkg_apis_workflow_v1alpha1_ContainerSetRetryStrategy(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetTemplate": schema_pkg_apis_workflow_v1alpha1_ContainerSetTemplate(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn": schema_pkg_apis_workflow_v1alpha1_ContinueOn(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Counter": schema_pkg_apis_workflow_v1alpha1_Counter(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions": schema_pkg_apis_workflow_v1alpha1_CreateS3BucketOptions(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflow": schema_pkg_apis_workflow_v1alpha1_CronWorkflow(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowList": schema_pkg_apis_workflow_v1alpha1_CronWorkflowList(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowSpec": schema_pkg_apis_workflow_v1alpha1_CronWorkflowSpec(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowStatus": schema_pkg_apis_workflow_v1alpha1_CronWorkflowStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTask": schema_pkg_apis_workflow_v1alpha1_DAGTask(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTemplate": schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Data": schema_pkg_apis_workflow_v1alpha1_Data(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DataSource": schema_pkg_apis_workflow_v1alpha1_DataSource(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Event": schema_pkg_apis_workflow_v1alpha1_Event(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig": schema_pkg_apis_workflow_v1alpha1_ExecutorConfig(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact": schema_pkg_apis_workflow_v1alpha1_GCSArtifact(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifactRepository": schema_pkg_apis_workflow_v1alpha1_GCSArtifactRepository(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSBucket": schema_pkg_apis_workflow_v1alpha1_GCSBucket(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Gauge": schema_pkg_apis_workflow_v1alpha1_Gauge(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact": schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact": schema_pkg_apis_workflow_v1alpha1_HDFSArtifact(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifactRepository": schema_pkg_apis_workflow_v1alpha1_HDFSArtifactRepository(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSConfig": schema_pkg_apis_workflow_v1alpha1_HDFSConfig(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSKrbConfig": schema_pkg_apis_workflow_v1alpha1_HDFSKrbConfig(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTP": schema_pkg_apis_workflow_v1alpha1_HTTP(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact": schema_pkg_apis_workflow_v1alpha1_HTTPArtifact(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPAuth": schema_pkg_apis_workflow_v1alpha1_HTTPAuth(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPBodySource": schema_pkg_apis_workflow_v1alpha1_HTTPBodySource(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeader": schema_pkg_apis_workflow_v1alpha1_HTTPHeader(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeaderSource": schema_pkg_apis_workflow_v1alpha1_HTTPHeaderSource(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Header": schema_pkg_apis_workflow_v1alpha1_Header(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Histogram": schema_pkg_apis_workflow_v1alpha1_Histogram(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs": schema_pkg_apis_workflow_v1alpha1_Inputs(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item": schema_pkg_apis_workflow_v1alpha1_Item(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelKeys": schema_pkg_apis_workflow_v1alpha1_LabelKeys(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValueFrom": schema_pkg_apis_workflow_v1alpha1_LabelValueFrom(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValues": schema_pkg_apis_workflow_v1alpha1_LabelValues(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook": schema_pkg_apis_workflow_v1alpha1_LifecycleHook(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Link": schema_pkg_apis_workflow_v1alpha1_Link(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ManifestFrom": schema_pkg_apis_workflow_v1alpha1_ManifestFrom(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MemoizationStatus": schema_pkg_apis_workflow_v1alpha1_MemoizationStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Memoize": schema_pkg_apis_workflow_v1alpha1_Memoize(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata": schema_pkg_apis_workflow_v1alpha1_Metadata(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MetricLabel": schema_pkg_apis_workflow_v1alpha1_MetricLabel(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics": schema_pkg_apis_workflow_v1alpha1_Metrics(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Mutex": schema_pkg_apis_workflow_v1alpha1_Mutex(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding": schema_pkg_apis_workflow_v1alpha1_MutexHolding(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexStatus": schema_pkg_apis_workflow_v1alpha1_MutexStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeFlag": schema_pkg_apis_workflow_v1alpha1_NodeFlag(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeResult": schema_pkg_apis_workflow_v1alpha1_NodeResult(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeStatus": schema_pkg_apis_workflow_v1alpha1_NodeStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeSynchronizationStatus": schema_pkg_apis_workflow_v1alpha1_NodeSynchronizationStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NoneStrategy": schema_pkg_apis_workflow_v1alpha1_NoneStrategy(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2Auth": schema_pkg_apis_workflow_v1alpha1_OAuth2Auth(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2EndpointParam": schema_pkg_apis_workflow_v1alpha1_OAuth2EndpointParam(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact": schema_pkg_apis_workflow_v1alpha1_OSSArtifact(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifactRepository": schema_pkg_apis_workflow_v1alpha1_OSSArtifactRepository(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSBucket": schema_pkg_apis_workflow_v1alpha1_OSSBucket(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule": schema_pkg_apis_workflow_v1alpha1_OSSLifecycleRule(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Object": schema_pkg_apis_workflow_v1alpha1_Object(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs": schema_pkg_apis_workflow_v1alpha1_Outputs(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ParallelSteps": schema_pkg_apis_workflow_v1alpha1_ParallelSteps(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter": schema_pkg_apis_workflow_v1alpha1_Parameter(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Plugin": schema_pkg_apis_workflow_v1alpha1_Plugin(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.PodGC": schema_pkg_apis_workflow_v1alpha1_PodGC(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Prometheus": schema_pkg_apis_workflow_v1alpha1_Prometheus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact": schema_pkg_apis_workflow_v1alpha1_RawArtifact(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ResourceTemplate": schema_pkg_apis_workflow_v1alpha1_ResourceTemplate(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryAffinity": schema_pkg_apis_workflow_v1alpha1_RetryAffinity(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryNodeAntiAffinity": schema_pkg_apis_workflow_v1alpha1_RetryNodeAntiAffinity(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy": schema_pkg_apis_workflow_v1alpha1_RetryStrategy(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact": schema_pkg_apis_workflow_v1alpha1_S3Artifact(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3ArtifactRepository": schema_pkg_apis_workflow_v1alpha1_S3ArtifactRepository(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Bucket": schema_pkg_apis_workflow_v1alpha1_S3Bucket(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions": schema_pkg_apis_workflow_v1alpha1_S3EncryptionOptions(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ScriptTemplate": schema_pkg_apis_workflow_v1alpha1_ScriptTemplate(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding": schema_pkg_apis_workflow_v1alpha1_SemaphoreHolding(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreRef": schema_pkg_apis_workflow_v1alpha1_SemaphoreRef(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreStatus": schema_pkg_apis_workflow_v1alpha1_SemaphoreStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence": schema_pkg_apis_workflow_v1alpha1_Sequence(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.StopStrategy": schema_pkg_apis_workflow_v1alpha1_StopStrategy(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Submit": schema_pkg_apis_workflow_v1alpha1_Submit(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SubmitOpts": schema_pkg_apis_workflow_v1alpha1_SubmitOpts(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuppliedValueFrom": schema_pkg_apis_workflow_v1alpha1_SuppliedValueFrom(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuspendTemplate": schema_pkg_apis_workflow_v1alpha1_SuspendTemplate(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization": schema_pkg_apis_workflow_v1alpha1_Synchronization(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SynchronizationStatus": schema_pkg_apis_workflow_v1alpha1_SynchronizationStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TTLStrategy": schema_pkg_apis_workflow_v1alpha1_TTLStrategy(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TarStrategy": schema_pkg_apis_workflow_v1alpha1_TarStrategy(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template": schema_pkg_apis_workflow_v1alpha1_Template(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef": schema_pkg_apis_workflow_v1alpha1_TemplateRef(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TransformationStep": schema_pkg_apis_workflow_v1alpha1_TransformationStep(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer": schema_pkg_apis_workflow_v1alpha1_UserContainer(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ValueFrom": schema_pkg_apis_workflow_v1alpha1_ValueFrom(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Version": schema_pkg_apis_workflow_v1alpha1_Version(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.VolumeClaimGC": schema_pkg_apis_workflow_v1alpha1_VolumeClaimGC(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Workflow": schema_pkg_apis_workflow_v1alpha1_Workflow(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTask": schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTask(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTaskList": schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTaskList(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBinding": schema_pkg_apis_workflow_v1alpha1_WorkflowEventBinding(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingList": schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingList(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingSpec": schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingSpec(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowLevelArtifactGC": schema_pkg_apis_workflow_v1alpha1_WorkflowLevelArtifactGC(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowList": schema_pkg_apis_workflow_v1alpha1_WorkflowList(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowMetadata": schema_pkg_apis_workflow_v1alpha1_WorkflowMetadata(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec": schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStatus": schema_pkg_apis_workflow_v1alpha1_WorkflowStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStep": schema_pkg_apis_workflow_v1alpha1_WorkflowStep(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResult": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResult(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResultList": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResultList(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSet": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSet(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetList": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetList(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetSpec": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetSpec(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetStatus": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetStatus(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplate": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplate(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateList": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateList(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateRef(ref), + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ZipStrategy": schema_pkg_apis_workflow_v1alpha1_ZipStrategy(ref), + } +} + +func schema_pkg_apis_workflow_v1alpha1_Amount(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Amount represent a numeric amount.", + Type: Amount{}.OpenAPISchemaType(), + Format: Amount{}.OpenAPISchemaFormat(), + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArchiveStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArchiveStrategy describes how to archive files/directory when saving artifacts", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "tar": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TarStrategy"), + }, + }, + "none": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NoneStrategy"), + }, + }, + "zip": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ZipStrategy"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NoneStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TarStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ZipStrategy"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Arguments(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Arguments to a template", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "parameters": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Parameters is the list of parameters to pass to the template or workflow", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"), + }, + }, + }, + }, + }, + "artifacts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Artifacts is the list of artifacts to pass to the template or workflow", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtGCStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtGCStatus maintains state related to ArtifactGC", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "strategiesProcessed": { + SchemaProps: spec.SchemaProps{ + Description: "have Pods been started to perform this strategy? (enables us not to re-process what we've already done)", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + "podsRecouped": { + SchemaProps: spec.SchemaProps{ + Description: "have completed Pods been processed? (mapped by Pod name) used to prevent re-processing the Status of a Pod more than once", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + "notSpecified": { + SchemaProps: spec.SchemaProps{ + Description: "if this is true, we already checked to see if we need to do it and we don't", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Artifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Artifact indicates an artifact to place at a specified path", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the artifact. must be unique within a template's inputs/outputs.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is the container path to the artifact", + Type: []string{"string"}, + Format: "", + }, + }, + "mode": { + SchemaProps: spec.SchemaProps{ + Description: "mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "from": { + SchemaProps: spec.SchemaProps{ + Description: "From allows an artifact to reference an artifact from a previous step", + Type: []string{"string"}, + Format: "", + }, + }, + "archiveLogs": { + SchemaProps: spec.SchemaProps{ + Description: "ArchiveLogs indicates if the container logs should be archived", + Type: []string{"boolean"}, + Format: "", + }, + }, + "s3": { + SchemaProps: spec.SchemaProps{ + Description: "S3 contains S3 artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"), + }, + }, + "git": { + SchemaProps: spec.SchemaProps{ + Description: "Git contains git artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact"), + }, + }, + "http": { + SchemaProps: spec.SchemaProps{ + Description: "HTTP contains HTTP artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact"), + }, + }, + "artifactory": { + SchemaProps: spec.SchemaProps{ + Description: "Artifactory contains artifactory artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), + }, + }, + "hdfs": { + SchemaProps: spec.SchemaProps{ + Description: "HDFS contains HDFS artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact"), + }, + }, + "raw": { + SchemaProps: spec.SchemaProps{ + Description: "Raw contains raw artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact"), + }, + }, + "oss": { + SchemaProps: spec.SchemaProps{ + Description: "OSS contains OSS artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact"), + }, + }, + "gcs": { + SchemaProps: spec.SchemaProps{ + Description: "GCS contains GCS artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact"), + }, + }, + "azure": { + SchemaProps: spec.SchemaProps{ + Description: "Azure contains Azure Storage artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact"), + }, + }, + "globalName": { + SchemaProps: spec.SchemaProps{ + Description: "GlobalName exports an output artifact to the global scope, making it available as '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts", + Type: []string{"string"}, + Format: "", + }, + }, + "archive": { + SchemaProps: spec.SchemaProps{ + Description: "Archive controls how the artifact will be saved to the artifact repository.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy"), + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Make Artifacts optional, if Artifacts doesn't generate or exist", + Type: []string{"boolean"}, + Format: "", + }, + }, + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath allows an artifact to be sourced from a subpath within the specified source", + Type: []string{"string"}, + Format: "", + }, + }, + "recurseMode": { + SchemaProps: spec.SchemaProps{ + Description: "If mode is set, apply the permission recursively into the artifact if it is a folder", + Type: []string{"boolean"}, + Format: "", + }, + }, + "fromExpression": { + SchemaProps: spec.SchemaProps{ + Description: "FromExpression, if defined, is evaluated to specify the value for the artifact", + Type: []string{"string"}, + Format: "", + }, + }, + "artifactGC": { + SchemaProps: spec.SchemaProps{ + Description: "ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC"), + }, + }, + "deleted": { + SchemaProps: spec.SchemaProps{ + Description: "Has this been deleted?", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactGC(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "strategy": { + SchemaProps: spec.SchemaProps{ + Description: "Strategy is the strategy to use.", + Type: []string{"string"}, + Format: "", + }, + }, + "podMetadata": { + SchemaProps: spec.SchemaProps{ + Description: "PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), + }, + }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactGCSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactGCSpec specifies the Artifacts that need to be deleted", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "artifactsByNode": { + SchemaProps: spec.SchemaProps{ + Description: "ArtifactsByNode maps Node name to information pertaining to Artifacts on that Node", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactNodeSpec"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactNodeSpec"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactGCStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactGCStatus describes the result of the deletion", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "artifactResultsByNode": { + SchemaProps: spec.SchemaProps{ + Description: "ArtifactResultsByNode maps Node name to result", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResultNodeStatus"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResultNodeStatus"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactLocation(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactLocation describes a location for a single or multiple artifacts. It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). It is also used to describe the location of multiple artifacts such as the archive location of a single workflow step, which the executor will use as a default location to store its files.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "archiveLogs": { + SchemaProps: spec.SchemaProps{ + Description: "ArchiveLogs indicates if the container logs should be archived", + Type: []string{"boolean"}, + Format: "", + }, + }, + "s3": { + SchemaProps: spec.SchemaProps{ + Description: "S3 contains S3 artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"), + }, + }, + "git": { + SchemaProps: spec.SchemaProps{ + Description: "Git contains git artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact"), + }, + }, + "http": { + SchemaProps: spec.SchemaProps{ + Description: "HTTP contains HTTP artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact"), + }, + }, + "artifactory": { + SchemaProps: spec.SchemaProps{ + Description: "Artifactory contains artifactory artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), + }, + }, + "hdfs": { + SchemaProps: spec.SchemaProps{ + Description: "HDFS contains HDFS artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact"), + }, + }, + "raw": { + SchemaProps: spec.SchemaProps{ + Description: "Raw contains raw artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact"), + }, + }, + "oss": { + SchemaProps: spec.SchemaProps{ + Description: "OSS contains OSS artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact"), + }, + }, + "gcs": { + SchemaProps: spec.SchemaProps{ + Description: "GCS contains GCS artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact"), + }, + }, + "azure": { + SchemaProps: spec.SchemaProps{ + Description: "Azure contains Azure Storage artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactNodeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactNodeSpec specifies the Artifacts that need to be deleted for a given Node", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "archiveLocation": { + SchemaProps: spec.SchemaProps{ + Description: "ArchiveLocation is the template-level Artifact location specification", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation"), + }, + }, + "artifacts": { + SchemaProps: spec.SchemaProps{ + Description: "Artifacts maps artifact name to Artifact description", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactPaths(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactPaths expands a step from a collection of artifacts", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the artifact. must be unique within a template's inputs/outputs.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is the container path to the artifact", + Type: []string{"string"}, + Format: "", + }, + }, + "mode": { + SchemaProps: spec.SchemaProps{ + Description: "mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "from": { + SchemaProps: spec.SchemaProps{ + Description: "From allows an artifact to reference an artifact from a previous step", + Type: []string{"string"}, + Format: "", + }, + }, + "archiveLogs": { + SchemaProps: spec.SchemaProps{ + Description: "ArchiveLogs indicates if the container logs should be archived", + Type: []string{"boolean"}, + Format: "", + }, + }, + "s3": { + SchemaProps: spec.SchemaProps{ + Description: "S3 contains S3 artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"), + }, + }, + "git": { + SchemaProps: spec.SchemaProps{ + Description: "Git contains git artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact"), + }, + }, + "http": { + SchemaProps: spec.SchemaProps{ + Description: "HTTP contains HTTP artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact"), + }, + }, + "artifactory": { + SchemaProps: spec.SchemaProps{ + Description: "Artifactory contains artifactory artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), + }, + }, + "hdfs": { + SchemaProps: spec.SchemaProps{ + Description: "HDFS contains HDFS artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact"), + }, + }, + "raw": { + SchemaProps: spec.SchemaProps{ + Description: "Raw contains raw artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact"), + }, + }, + "oss": { + SchemaProps: spec.SchemaProps{ + Description: "OSS contains OSS artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact"), + }, + }, + "gcs": { + SchemaProps: spec.SchemaProps{ + Description: "GCS contains GCS artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact"), + }, + }, + "azure": { + SchemaProps: spec.SchemaProps{ + Description: "Azure contains Azure Storage artifact location details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact"), + }, + }, + "globalName": { + SchemaProps: spec.SchemaProps{ + Description: "GlobalName exports an output artifact to the global scope, making it available as '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts", + Type: []string{"string"}, + Format: "", + }, + }, + "archive": { + SchemaProps: spec.SchemaProps{ + Description: "Archive controls how the artifact will be saved to the artifact repository.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy"), + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Make Artifacts optional, if Artifacts doesn't generate or exist", + Type: []string{"boolean"}, + Format: "", + }, + }, + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath allows an artifact to be sourced from a subpath within the specified source", + Type: []string{"string"}, + Format: "", + }, + }, + "recurseMode": { + SchemaProps: spec.SchemaProps{ + Description: "If mode is set, apply the permission recursively into the artifact if it is a folder", + Type: []string{"boolean"}, + Format: "", + }, + }, + "fromExpression": { + SchemaProps: spec.SchemaProps{ + Description: "FromExpression, if defined, is evaluated to specify the value for the artifact", + Type: []string{"string"}, + Format: "", + }, + }, + "artifactGC": { + SchemaProps: spec.SchemaProps{ + Description: "ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC"), + }, + }, + "deleted": { + SchemaProps: spec.SchemaProps{ + Description: "Has this been deleted?", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactRepository represents an artifact repository in which a controller will store its artifacts", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "archiveLogs": { + SchemaProps: spec.SchemaProps{ + Description: "ArchiveLogs enables log archiving", + Type: []string{"boolean"}, + Format: "", + }, + }, + "s3": { + SchemaProps: spec.SchemaProps{ + Description: "S3 stores artifact in a S3-compliant object store", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3ArtifactRepository"), + }, + }, + "artifactory": { + SchemaProps: spec.SchemaProps{ + Description: "Artifactory stores artifacts to JFrog Artifactory", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifactRepository"), + }, + }, + "hdfs": { + SchemaProps: spec.SchemaProps{ + Description: "HDFS stores artifacts in HDFS", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifactRepository"), + }, + }, + "oss": { + SchemaProps: spec.SchemaProps{ + Description: "OSS stores artifact in a OSS-compliant object store", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifactRepository"), + }, + }, + "gcs": { + SchemaProps: spec.SchemaProps{ + Description: "GCS stores artifact in a GCS object store", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifactRepository"), + }, + }, + "azure": { + SchemaProps: spec.SchemaProps{ + Description: "Azure stores artifact in an Azure Storage account", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifactRepository"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3ArtifactRepository"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the config map. Defaults to \"artifact-repositories\".", + Type: []string{"string"}, + Format: "", + }, + }, + "key": { + SchemaProps: spec.SchemaProps{ + Description: "The config map key. Defaults to the value of the \"workflows.argoproj.io/default-artifact-repository\" annotation.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRefStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the config map. Defaults to \"artifact-repositories\".", + Type: []string{"string"}, + Format: "", + }, + }, + "key": { + SchemaProps: spec.SchemaProps{ + Description: "The config map key. Defaults to the value of the \"workflows.argoproj.io/default-artifact-repository\" annotation.", + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "The namespace of the config map. Defaults to the workflow's namespace, or the controller's namespace (if found).", + Type: []string{"string"}, + Format: "", + }, + }, + "default": { + SchemaProps: spec.SchemaProps{ + Description: "If this ref represents the default artifact repository, rather than a config map.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "artifactRepository": { + SchemaProps: spec.SchemaProps{ + Description: "The repository the workflow will use. This maybe empty before v3.1.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepository"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepository"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactResult describes the result of attempting to delete a given Artifact", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the Artifact", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "success": { + SchemaProps: spec.SchemaProps{ + Description: "Success describes whether the deletion succeeded", + Type: []string{"boolean"}, + Format: "", + }, + }, + "error": { + SchemaProps: spec.SchemaProps{ + Description: "Error is an optional error message which should be set if Success==false", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactResultNodeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactResultNodeStatus describes the result of the deletion on a given node", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "artifactResults": { + SchemaProps: spec.SchemaProps{ + Description: "ArtifactResults maps Artifact name to result of the deletion", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResult"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResult"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactSearchQuery(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "artifactGCStrategies": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + "artifactName": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "templateName": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "nodeId": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "deleted": { + SchemaProps: spec.SchemaProps{ + Type: []string{"boolean"}, + Format: "", + }, + }, + "nodeTypes": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactSearchResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Artifact": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), + }, + }, + "NodeID": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"Artifact", "NodeID"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactoryArtifact is the location of an artifactory artifact", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "url": { + SchemaProps: spec.SchemaProps{ + Description: "URL of the artifact", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "usernameSecret": { + SchemaProps: spec.SchemaProps{ + Description: "UsernameSecret is the secret selector to the repository username", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "passwordSecret": { + SchemaProps: spec.SchemaProps{ + Description: "PasswordSecret is the secret selector to the repository password", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + Required: []string{"url"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactoryArtifactRepository defines the controller configuration for an artifactory artifact repository", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "usernameSecret": { + SchemaProps: spec.SchemaProps{ + Description: "UsernameSecret is the secret selector to the repository username", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "passwordSecret": { + SchemaProps: spec.SchemaProps{ + Description: "PasswordSecret is the secret selector to the repository password", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "repoURL": { + SchemaProps: spec.SchemaProps{ + Description: "RepoURL is the url for artifactory repo.", + Type: []string{"string"}, + Format: "", + }, + }, + "keyFormat": { + SchemaProps: spec.SchemaProps{ + Description: "KeyFormat defines the format of how to store keys and can reference workflow variables.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ArtifactoryAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArtifactoryAuth describes the secret selectors required for authenticating to artifactory", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "usernameSecret": { + SchemaProps: spec.SchemaProps{ + Description: "UsernameSecret is the secret selector to the repository username", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "passwordSecret": { + SchemaProps: spec.SchemaProps{ + Description: "PasswordSecret is the secret selector to the repository password", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_AzureArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AzureArtifact is the location of a an Azure Storage artifact", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint is the service url associated with an account. It is most likely \"https://.blob.core.windows.net\"", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Description: "Container is the container where resources will be stored", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "accountKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "AccountKeySecret is the secret selector to the Azure Blob Storage account access key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "useSDKCreds": { + SchemaProps: spec.SchemaProps{ + Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "blob": { + SchemaProps: spec.SchemaProps{ + Description: "Blob is the blob name (i.e., path) in the container where the artifact resides", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"endpoint", "container", "blob"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_AzureArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint is the service url associated with an account. It is most likely \"https://.blob.core.windows.net\"", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Description: "Container is the container where resources will be stored", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "accountKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "AccountKeySecret is the secret selector to the Azure Blob Storage account access key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "useSDKCreds": { + SchemaProps: spec.SchemaProps{ + Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "blobNameFormat": { + SchemaProps: spec.SchemaProps{ + Description: "BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"endpoint", "container"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_AzureBlobContainer(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AzureBlobContainer contains the access information for interfacing with an Azure Blob Storage container", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint is the service url associated with an account. It is most likely \"https://.blob.core.windows.net\"", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Description: "Container is the container where resources will be stored", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "accountKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "AccountKeySecret is the secret selector to the Azure Blob Storage account access key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "useSDKCreds": { + SchemaProps: spec.SchemaProps{ + Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"endpoint", "container"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Backoff(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Backoff is a backoff strategy to use within retryStrategy", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "duration": { + SchemaProps: spec.SchemaProps{ + Description: "Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. \"2m\", \"1h\")", + Type: []string{"string"}, + Format: "", + }, + }, + "factor": { + SchemaProps: spec.SchemaProps{ + Description: "Factor is a factor to multiply the base duration after each failed retry", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + "maxDuration": { + SchemaProps: spec.SchemaProps{ + Description: "MaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy. It is important to note that if the workflow template includes activeDeadlineSeconds, the pod's deadline is initially set with activeDeadlineSeconds. However, when the workflow fails, the pod's deadline is then overridden by maxDuration. This ensures that the workflow does not exceed the specified maximum duration when retries are involved.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_BasicAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BasicAuth describes the secret selectors required for basic authentication", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "usernameSecret": { + SchemaProps: spec.SchemaProps{ + Description: "UsernameSecret is the secret selector to the repository username", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "passwordSecret": { + SchemaProps: spec.SchemaProps{ + Description: "PasswordSecret is the secret selector to the repository password", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Cache(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Cache is the configuration for the type of cache to be used", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigMap sets a ConfigMap-based cache", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + }, + Required: []string{"configMap"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ClientCertAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClientCertAuth holds necessary information for client authentication via certificates", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "clientCertSecret": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "clientKeySecret": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), + }, + }, + }, + Required: []string{"metadata", "spec"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplate"), + }, + }, + }, + }, + }, + }, + Required: []string{"metadata", "items"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Column(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Column is a custom column that will be exposed in the Workflow List View.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "The name of this column, e.g., \"Workflow Completed\".", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "The type of this column, \"label\" or \"annotation\".", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "key": { + SchemaProps: spec.SchemaProps{ + Description: "The key of the label or annotation, e.g., \"workflows.argoproj.io/completed\".", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "type", "key"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Condition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the type of condition", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status is the status of the condition", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Message is the condition message", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ContainerNode(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "containerPort", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "name", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "resizePolicy": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Resources resize policy for the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerResizePolicy"), + }, + }, + }, + }, + }, + "restartPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "mountPath", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "devicePath", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "dependencies": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.ContainerResizePolicy", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ContainerSetRetryStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ContainerSetRetryStrategy provides controls on how to retry a container set", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "duration": { + SchemaProps: spec.SchemaProps{ + Description: "Duration is the time between each retry, examples values are \"300ms\", \"1s\" or \"5m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", + Type: []string{"string"}, + Format: "", + }, + }, + "retries": { + SchemaProps: spec.SchemaProps{ + Description: "Retries is the maximum number of retry attempts for each container. It does not include the first, original attempt; the maximum number of total attempts will be `retries + 1`.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + }, + Required: []string{"retries"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ContainerSetTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "containers": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerNode"), + }, + }, + }, + }, + }, + "volumeMounts": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "retryStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "RetryStrategy describes how to retry container nodes if the container set fails. Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetRetryStrategy"), + }, + }, + }, + Required: []string{"containers"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerNode", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetRetryStrategy", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ContinueOn(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ContinueOn defines if a workflow should continue even if a task or step fails/errors. It can be specified if the workflow should continue when the pod errors, fails or both.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "error": { + SchemaProps: spec.SchemaProps{ + Type: []string{"boolean"}, + Format: "", + }, + }, + "failed": { + SchemaProps: spec.SchemaProps{ + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Counter(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Counter is a Counter prometheus metric", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the value of the metric", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"value"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_CreateS3BucketOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CreateS3BucketOptions options used to determine automatic automatic bucket-creation process", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "objectLocking": { + SchemaProps: spec.SchemaProps{ + Description: "ObjectLocking Enable object locking", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_CronWorkflow(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CronWorkflow is the definition of a scheduled workflow resource", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowStatus"), + }, + }, + }, + Required: []string{"metadata", "spec"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_CronWorkflowList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CronWorkflowList is list of CronWorkflow resources", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflow"), + }, + }, + }, + }, + }, + }, + Required: []string{"metadata", "items"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflow", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_CronWorkflowSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CronWorkflowSpec is the specification of a CronWorkflow", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "workflowSpec": { + SchemaProps: spec.SchemaProps{ + Description: "WorkflowSpec is the spec of the workflow to be run", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), + }, + }, + "schedule": { + SchemaProps: spec.SchemaProps{ + Description: "Schedule is a schedule to run the Workflow in Cron format. Deprecated, use Schedules", + Type: []string{"string"}, + Format: "", + }, + }, + "concurrencyPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "ConcurrencyPolicy is the K8s-style concurrency policy that will be used", + Type: []string{"string"}, + Format: "", + }, + }, + "suspend": { + SchemaProps: spec.SchemaProps{ + Description: "Suspend is a flag that will stop new CronWorkflows from running if set to true", + Type: []string{"boolean"}, + Format: "", + }, + }, + "startingDeadlineSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "successfulJobsHistoryLimit": { + SchemaProps: spec.SchemaProps{ + Description: "SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "failedJobsHistoryLimit": { + SchemaProps: spec.SchemaProps{ + Description: "FailedJobsHistoryLimit is the number of failed jobs to be kept at a time", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "timezone": { + SchemaProps: spec.SchemaProps{ + Description: "Timezone is the timezone against which the cron schedule will be calculated, e.g. \"Asia/Tokyo\". Default is machine's local time.", + Type: []string{"string"}, + Format: "", + }, + }, + "workflowMetadata": { + SchemaProps: spec.SchemaProps{ + Description: "WorkflowMetadata contains some metadata of the workflow to be run", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "stopStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "v3.6 and after: StopStrategy defines if the CronWorkflow should stop scheduling based on a condition", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.StopStrategy"), + }, + }, + "schedules": { + SchemaProps: spec.SchemaProps{ + Description: "v3.6 and after: Schedules is a list of schedules to run the Workflow in Cron format", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "when": { + SchemaProps: spec.SchemaProps{ + Description: "v3.6 and after: When is an expression that determines if a run should be scheduled.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"workflowSpec"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.StopStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_CronWorkflowStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CronWorkflowStatus is the status of a CronWorkflow", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "active": { + SchemaProps: spec.SchemaProps{ + Description: "Active is a list of active workflows stemming from this CronWorkflow", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + }, + }, + }, + "lastScheduledTime": { + SchemaProps: spec.SchemaProps{ + Description: "LastScheduleTime is the last time the CronWorkflow was scheduled", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "conditions": { + SchemaProps: spec.SchemaProps{ + Description: "Conditions is a list of conditions the CronWorkflow may have", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition"), + }, + }, + }, + }, + }, + "succeeded": { + SchemaProps: spec.SchemaProps{ + Description: "v3.6 and after: Succeeded counts how many times child workflows succeeded", + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, + "failed": { + SchemaProps: spec.SchemaProps{ + Description: "v3.6 and after: Failed counts how many times child workflows failed", + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "v3.6 and after: Phase is an enum of Active or Stopped. It changes to Stopped when stopStrategy.expression is true", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"active", "lastScheduledTime", "conditions", "succeeded", "failed", "phase"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_DAGTask(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DAGTask represents a node in the graph during DAG execution", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the target", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Name of template to execute", + Type: []string{"string"}, + Format: "", + }, + }, + "inline": { + SchemaProps: spec.SchemaProps{ + Description: "Inline is the template. Template must be empty if this is declared (and vice-versa).", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), + }, + }, + "arguments": { + SchemaProps: spec.SchemaProps{ + Description: "Arguments are the parameter and artifact arguments to the template", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), + }, + }, + "templateRef": { + SchemaProps: spec.SchemaProps{ + Description: "TemplateRef is the reference to the template resource to execute.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), + }, + }, + "dependencies": { + SchemaProps: spec.SchemaProps{ + Description: "Dependencies are name of other targets which this depends on", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "withItems": { + SchemaProps: spec.SchemaProps{ + Description: "WithItems expands a task into multiple parallel tasks from the items in the list", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item"), + }, + }, + }, + }, + }, + "withParam": { + SchemaProps: spec.SchemaProps{ + Description: "WithParam expands a task into multiple parallel tasks from the value in the parameter, which is expected to be a JSON list.", + Type: []string{"string"}, + Format: "", + }, + }, + "withSequence": { + SchemaProps: spec.SchemaProps{ + Description: "WithSequence expands a task into a numeric sequence", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence"), + }, + }, + "when": { + SchemaProps: spec.SchemaProps{ + Description: "When is an expression in which the task should conditionally execute", + Type: []string{"string"}, + Format: "", + }, + }, + "continueOn": { + SchemaProps: spec.SchemaProps{ + Description: "ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn"), + }, + }, + "onExit": { + SchemaProps: spec.SchemaProps{ + Description: "OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template. DEPRECATED: Use Hooks[exit].Template instead.", + Type: []string{"string"}, + Format: "", + }, + }, + "depends": { + SchemaProps: spec.SchemaProps{ + Description: "Depends are name of other targets which this depends on", + Type: []string{"string"}, + Format: "", + }, + }, + "hooks": { + SchemaProps: spec.SchemaProps{ + Description: "Hooks hold the lifecycle hook which is invoked at lifecycle of task, irrespective of the success, failure, or error status of the primary task", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook"), + }, + }, + }, + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DAGTemplate is a template subtype for directed acyclic graph templates", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "target": { + SchemaProps: spec.SchemaProps{ + Description: "Target are one or more names of targets to execute in a DAG", + Type: []string{"string"}, + Format: "", + }, + }, + "tasks": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Tasks are a list of DAG tasks", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTask"), + }, + }, + }, + }, + }, + "failFast": { + SchemaProps: spec.SchemaProps{ + Description: "This flag is for DAG logic. The DAG logic has a built-in \"fail fast\" feature to stop scheduling new steps, as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed before failing the DAG itself. The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to completion (either success or failure), regardless of the failed outcomes of branches in the DAG. More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"tasks"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTask"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Data(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Data is a data template", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "source": { + SchemaProps: spec.SchemaProps{ + Description: "Source sources external data into a data template", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DataSource"), + }, + }, + "transformation": { + SchemaProps: spec.SchemaProps{ + Description: "Transformation applies a set of transformations", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TransformationStep"), + }, + }, + }, + }, + }, + }, + Required: []string{"source", "transformation"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DataSource", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TransformationStep"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_DataSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DataSource sources external data into a data template", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "artifactPaths": { + SchemaProps: spec.SchemaProps{ + Description: "ArtifactPaths is a data transformation that collects a list of artifact paths", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactPaths"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactPaths"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Event(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "Selector (https://github.com/expr-lang/expr) that we must must match the event. E.g. `payload.message == \"test\"`", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"selector"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ExecutorConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ExecutorConfig holds configurations of an executor container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountName specifies the service account name of the executor container.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_GCSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GCSArtifact is the location of a GCS artifact", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "bucket": { + SchemaProps: spec.SchemaProps{ + Description: "Bucket is the name of the bucket", + Type: []string{"string"}, + Format: "", + }, + }, + "serviceAccountKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountKeySecret is the secret selector to the bucket's service account key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "key": { + SchemaProps: spec.SchemaProps{ + Description: "Key is the path in the bucket where the artifact resides", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"key"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_GCSArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GCSArtifactRepository defines the controller configuration for a GCS artifact repository", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "bucket": { + SchemaProps: spec.SchemaProps{ + Description: "Bucket is the name of the bucket", + Type: []string{"string"}, + Format: "", + }, + }, + "serviceAccountKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountKeySecret is the secret selector to the bucket's service account key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "keyFormat": { + SchemaProps: spec.SchemaProps{ + Description: "KeyFormat defines the format of how to store keys and can reference workflow variables.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_GCSBucket(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GCSBucket contains the access information for interfacring with a GCS bucket", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "bucket": { + SchemaProps: spec.SchemaProps{ + Description: "Bucket is the name of the bucket", + Type: []string{"string"}, + Format: "", + }, + }, + "serviceAccountKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountKeySecret is the secret selector to the bucket's service account key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Gauge(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Gauge is a Gauge prometheus metric", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the value to be used in the operation with the metric's current value. If no operation is set, value is the value of the metric", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "realtime": { + SchemaProps: spec.SchemaProps{ + Description: "Realtime emits this metric in real time if applicable", + Type: []string{"boolean"}, + Format: "", + }, + }, + "operation": { + SchemaProps: spec.SchemaProps{ + Description: "Operation defines the operation to apply with value and the metrics' current value", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"value", "realtime"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GitArtifact is the location of an git artifact", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "repo": { + SchemaProps: spec.SchemaProps{ + Description: "Repo is the git repository", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "revision": { + SchemaProps: spec.SchemaProps{ + Description: "Revision is the git commit, tag, branch to checkout", + Type: []string{"string"}, + Format: "", + }, + }, + "depth": { + SchemaProps: spec.SchemaProps{ + Description: "Depth specifies clones/fetches should be shallow and include the given number of commits from the branch tip", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "fetch": { + SchemaProps: spec.SchemaProps{ + Description: "Fetch specifies a number of refs that should be fetched before checkout", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "usernameSecret": { + SchemaProps: spec.SchemaProps{ + Description: "UsernameSecret is the secret selector to the repository username", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "passwordSecret": { + SchemaProps: spec.SchemaProps{ + Description: "PasswordSecret is the secret selector to the repository password", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "sshPrivateKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "SSHPrivateKeySecret is the secret selector to the repository ssh private key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "insecureIgnoreHostKey": { + SchemaProps: spec.SchemaProps{ + Description: "InsecureIgnoreHostKey disables SSH strict host key checking during git clone", + Type: []string{"boolean"}, + Format: "", + }, + }, + "disableSubmodules": { + SchemaProps: spec.SchemaProps{ + Description: "DisableSubmodules disables submodules during git clone", + Type: []string{"boolean"}, + Format: "", + }, + }, + "singleBranch": { + SchemaProps: spec.SchemaProps{ + Description: "SingleBranch enables single branch clone, using the `branch` parameter", + Type: []string{"boolean"}, + Format: "", + }, + }, + "branch": { + SchemaProps: spec.SchemaProps{ + Description: "Branch is the branch to fetch when `SingleBranch` is enabled", + Type: []string{"string"}, + Format: "", + }, + }, + "insecureSkipTLS": { + SchemaProps: spec.SchemaProps{ + Description: "InsecureSkipTLS disables server certificate verification resulting in insecure HTTPS connections", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"repo"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HDFSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HDFSArtifact is the location of an HDFS artifact", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "krbCCacheSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbKeytabSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbUsername": { + SchemaProps: spec.SchemaProps{ + Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbRealm": { + SchemaProps: spec.SchemaProps{ + Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbConfigConfigMap": { + SchemaProps: spec.SchemaProps{ + Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + "krbServicePrincipalName": { + SchemaProps: spec.SchemaProps{ + Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "addresses": { + SchemaProps: spec.SchemaProps{ + Description: "Addresses is accessible addresses of HDFS name nodes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "hdfsUser": { + SchemaProps: spec.SchemaProps{ + Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "dataTransferProtection": { + SchemaProps: spec.SchemaProps{ + Description: "DataTransferProtection is the protection level for HDFS data transfer. It corresponds to the dfs.data.transfer.protection configuration in HDFS.", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is a file path in HDFS", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "force": { + SchemaProps: spec.SchemaProps{ + Description: "Force copies a file forcibly even if it exists", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"path"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HDFSArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "krbCCacheSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbKeytabSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbUsername": { + SchemaProps: spec.SchemaProps{ + Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbRealm": { + SchemaProps: spec.SchemaProps{ + Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbConfigConfigMap": { + SchemaProps: spec.SchemaProps{ + Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + "krbServicePrincipalName": { + SchemaProps: spec.SchemaProps{ + Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "addresses": { + SchemaProps: spec.SchemaProps{ + Description: "Addresses is accessible addresses of HDFS name nodes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "hdfsUser": { + SchemaProps: spec.SchemaProps{ + Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "dataTransferProtection": { + SchemaProps: spec.SchemaProps{ + Description: "DataTransferProtection is the protection level for HDFS data transfer. It corresponds to the dfs.data.transfer.protection configuration in HDFS.", + Type: []string{"string"}, + Format: "", + }, + }, + "pathFormat": { + SchemaProps: spec.SchemaProps{ + Description: "PathFormat is defines the format of path to store a file. Can reference workflow variables", + Type: []string{"string"}, + Format: "", + }, + }, + "force": { + SchemaProps: spec.SchemaProps{ + Description: "Force copies a file forcibly even if it exists", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HDFSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HDFSConfig is configurations for HDFS", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "krbCCacheSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbKeytabSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbUsername": { + SchemaProps: spec.SchemaProps{ + Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbRealm": { + SchemaProps: spec.SchemaProps{ + Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbConfigConfigMap": { + SchemaProps: spec.SchemaProps{ + Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + "krbServicePrincipalName": { + SchemaProps: spec.SchemaProps{ + Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "addresses": { + SchemaProps: spec.SchemaProps{ + Description: "Addresses is accessible addresses of HDFS name nodes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "hdfsUser": { + SchemaProps: spec.SchemaProps{ + Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "dataTransferProtection": { + SchemaProps: spec.SchemaProps{ + Description: "DataTransferProtection is the protection level for HDFS data transfer. It corresponds to the dfs.data.transfer.protection configuration in HDFS.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HDFSKrbConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HDFSKrbConfig is auth configurations for Kerberos", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "krbCCacheSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbKeytabSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbUsername": { + SchemaProps: spec.SchemaProps{ + Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbRealm": { + SchemaProps: spec.SchemaProps{ + Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbConfigConfigMap": { + SchemaProps: spec.SchemaProps{ + Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + "krbServicePrincipalName": { + SchemaProps: spec.SchemaProps{ + Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HTTP(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "method": { + SchemaProps: spec.SchemaProps{ + Description: "Method is HTTP methods for HTTP Request", + Type: []string{"string"}, + Format: "", + }, + }, + "url": { + SchemaProps: spec.SchemaProps{ + Description: "URL of the HTTP Request", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "headers": { + SchemaProps: spec.SchemaProps{ + Description: "Headers are an optional list of headers to send with HTTP requests", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeader"), + }, + }, + }, + }, + }, + "timeoutSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "successCondition": { + SchemaProps: spec.SchemaProps{ + Description: "SuccessCondition is an expression if evaluated to true is considered successful", + Type: []string{"string"}, + Format: "", + }, + }, + "body": { + SchemaProps: spec.SchemaProps{ + Description: "Body is content of the HTTP Request", + Type: []string{"string"}, + Format: "", + }, + }, + "bodyFrom": { + SchemaProps: spec.SchemaProps{ + Description: "BodyFrom is content of the HTTP Request as Bytes", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPBodySource"), + }, + }, + "insecureSkipVerify": { + SchemaProps: spec.SchemaProps{ + Description: "InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"url"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPBodySource", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeader"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HTTPArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "url": { + SchemaProps: spec.SchemaProps{ + Description: "URL of the artifact", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "headers": { + SchemaProps: spec.SchemaProps{ + Description: "Headers are an optional list of headers to send with HTTP requests for artifacts", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Header"), + }, + }, + }, + }, + }, + "auth": { + SchemaProps: spec.SchemaProps{ + Description: "Auth contains information for client authentication", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPAuth"), + }, + }, + }, + Required: []string{"url"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPAuth", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Header"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HTTPAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "clientCert": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClientCertAuth"), + }, + }, + "oauth2": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2Auth"), + }, + }, + "basicAuth": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.BasicAuth"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.BasicAuth", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClientCertAuth", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2Auth"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HTTPBodySource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HTTPBodySource contains the source of the HTTP body.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "bytes": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "byte", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HTTPHeader(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "valueFrom": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeaderSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeaderSource"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HTTPHeaderSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "secretKeyRef": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Header(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Header indicate a key-value request header to be used when fetching artifacts over HTTP", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the header name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the literal value to use for the header", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Histogram(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Histogram is a Histogram prometheus metric", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the value of the metric", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "buckets": { + SchemaProps: spec.SchemaProps{ + Description: "Buckets is a list of bucket divisors for the histogram", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Amount"), + }, + }, + }, + }, + }, + }, + Required: []string{"value", "buckets"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Amount"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Inputs(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "parameters": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Parameters are a list of parameters passed as inputs", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"), + }, + }, + }, + }, + }, + "artifacts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Artifact are a list of artifacts passed as inputs", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Item(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Item expands a single workflow step into multiple parallel steps The value of Item can be a map, string, bool, or number", + Type: Item{}.OpenAPISchemaType(), + Format: Item{}.OpenAPISchemaFormat(), + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_LabelKeys(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "LabelKeys is list of keys", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_LabelValueFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "expression": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"expression"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_LabelValues(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Labels is list of workflow labels", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_LifecycleHook(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template is the name of the template to execute by the hook", + Type: []string{"string"}, + Format: "", + }, + }, + "arguments": { + SchemaProps: spec.SchemaProps{ + Description: "Arguments hold arguments to the template", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), + }, + }, + "templateRef": { + SchemaProps: spec.SchemaProps{ + Description: "TemplateRef is the reference to the template resource to execute by the hook", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), + }, + }, + "expression": { + SchemaProps: spec.SchemaProps{ + Description: "Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Link(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A link to another app.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the link, E.g. \"Workflow Logs\" or \"Pod Logs\"", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "scope": { + SchemaProps: spec.SchemaProps{ + Description: "\"workflow\", \"pod\", \"pod-logs\", \"event-source-logs\", \"sensor-logs\", \"workflow-list\" or \"chat\"", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "url": { + SchemaProps: spec.SchemaProps{ + Description: "The URL. Can contain \"${metadata.namespace}\", \"${metadata.name}\", \"${status.startedAt}\", \"${status.finishedAt}\" or any other element in workflow yaml, e.g. \"${workflow.metadata.annotations.userDefinedKey}\"", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "scope", "url"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ManifestFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "artifact": { + SchemaProps: spec.SchemaProps{ + Description: "Artifact contains the artifact to use", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), + }, + }, + }, + Required: []string{"artifact"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_MemoizationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MemoizationStatus is the status of this memoized node", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hit": { + SchemaProps: spec.SchemaProps{ + Description: "Hit indicates whether this node was created from a cache entry", + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + "key": { + SchemaProps: spec.SchemaProps{ + Description: "Key is the name of the key used for this node's cache", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "cacheName": { + SchemaProps: spec.SchemaProps{ + Description: "Cache is the name of the cache that was used", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"hit", "key", "cacheName"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Memoize(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Memoization enables caching for the Outputs of the template", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Description: "Key is the key to use as the caching key", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "cache": { + SchemaProps: spec.SchemaProps{ + Description: "Cache sets and configures the kind of cache", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Cache"), + }, + }, + "maxAge": { + SchemaProps: spec.SchemaProps{ + Description: "MaxAge is the maximum age (e.g. \"180s\", \"24h\") of an entry that is still considered valid. If an entry is older than the MaxAge, it will be ignored.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"key", "cache", "maxAge"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Cache"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Metadata(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Pod metdata", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "annotations": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "labels": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_MetricLabel(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MetricLabel is a single label for a prometheus metric", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"key", "value"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Metrics(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Metrics are a list of metrics emitted from a Workflow/Template", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "prometheus": { + SchemaProps: spec.SchemaProps{ + Description: "Prometheus is a list of prometheus metrics to be emitted", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Prometheus"), + }, + }, + }, + }, + }, + }, + Required: []string{"prometheus"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Prometheus"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Mutex(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Mutex holds Mutex configuration", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the mutex", + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace is the namespace of the mutex, default: [namespace of workflow]", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_MutexHolding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MutexHolding describes the mutex and the object which is holding it.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "mutex": { + SchemaProps: spec.SchemaProps{ + Description: "Reference for the mutex e.g: ${namespace}/mutex/${mutexName}", + Type: []string{"string"}, + Format: "", + }, + }, + "holder": { + SchemaProps: spec.SchemaProps{ + Description: "Holder is a reference to the object which holds the Mutex. Holding Scenario:\n 1. Current workflow's NodeID which is holding the lock.\n e.g: ${NodeID}\nWaiting Scenario:\n 1. Current workflow or other workflow NodeID which is holding the lock.\n e.g: ${WorkflowName}/${NodeID}", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_MutexStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MutexStatus contains which objects hold mutex locks, and which objects this workflow is waiting on to release locks.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "holding": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Holding is a list of mutexes and their respective objects that are held by mutex lock for this workflow.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding"), + }, + }, + }, + }, + }, + "waiting": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Waiting is a list of mutexes and their respective objects this workflow is waiting for.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_NodeFlag(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hooked": { + SchemaProps: spec.SchemaProps{ + Description: "Hooked tracks whether or not this node was triggered by hook or onExit", + Type: []string{"boolean"}, + Format: "", + }, + }, + "retried": { + SchemaProps: spec.SchemaProps{ + Description: "Retried tracks whether or not this node was retried by retryStrategy", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_NodeResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "phase": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "outputs": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), + }, + }, + "progress": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_NodeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeStatus contains status information about an individual node in the workflow", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "id": { + SchemaProps: spec.SchemaProps{ + Description: "ID is a unique identifier of a node within the worklow It is implemented as a hash of the node name, which makes the ID deterministic", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is unique name in the node tree used to generate the node ID", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "displayName": { + SchemaProps: spec.SchemaProps{ + Description: "DisplayName is a human readable representation of the node. Unique within a template boundary", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type indicates type of node", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "templateName": { + SchemaProps: spec.SchemaProps{ + Description: "TemplateName is the template name which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)", + Type: []string{"string"}, + Format: "", + }, + }, + "templateRef": { + SchemaProps: spec.SchemaProps{ + Description: "TemplateRef is the reference to the template resource which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), + }, + }, + "templateScope": { + SchemaProps: spec.SchemaProps{ + Description: "TemplateScope is the template scope in which the template of this node was retrieved.", + Type: []string{"string"}, + Format: "", + }, + }, + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "Phase a simple, high-level summary of where the node is in its lifecycle. Can be used as a state machine. Will be one of these values \"Pending\", \"Running\" before the node is completed, or \"Succeeded\", \"Skipped\", \"Failed\", \"Error\", or \"Omitted\" as a final state.", + Type: []string{"string"}, + Format: "", + }, + }, + "boundaryID": { + SchemaProps: spec.SchemaProps{ + Description: "BoundaryID indicates the node ID of the associated template root node in which this node belongs to", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about why the node is in this condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "startedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which this node started", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "finishedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which this node completed", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "estimatedDuration": { + SchemaProps: spec.SchemaProps{ + Description: "EstimatedDuration in seconds.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "progress": { + SchemaProps: spec.SchemaProps{ + Description: "Progress to completion", + Type: []string{"string"}, + Format: "", + }, + }, + "resourcesDuration": { + SchemaProps: spec.SchemaProps{ + Description: "ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + }, + }, + "podIP": { + SchemaProps: spec.SchemaProps{ + Description: "PodIP captures the IP of the pod for daemoned steps", + Type: []string{"string"}, + Format: "", + }, + }, + "daemoned": { + SchemaProps: spec.SchemaProps{ + Description: "Daemoned tracks whether or not this node was daemoned and need to be terminated", + Type: []string{"boolean"}, + Format: "", + }, + }, + "nodeFlag": { + SchemaProps: spec.SchemaProps{ + Description: "NodeFlag tracks some history of node. e.g.) hooked, retried, etc.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeFlag"), + }, + }, + "inputs": { + SchemaProps: spec.SchemaProps{ + Description: "Inputs captures input parameter values and artifact locations supplied to this template invocation", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs"), + }, + }, + "outputs": { + SchemaProps: spec.SchemaProps{ + Description: "Outputs captures output parameter values and artifact locations produced by this template invocation", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), + }, + }, + "children": { + SchemaProps: spec.SchemaProps{ + Description: "Children is a list of child node IDs", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "outboundNodes": { + SchemaProps: spec.SchemaProps{ + Description: "OutboundNodes tracks the node IDs which are considered \"outbound\" nodes to a template invocation. For every invocation of a template, there are nodes which we considered as \"outbound\". Essentially, these are last nodes in the execution sequence to run, before the template is considered completed. These nodes are then connected as parents to a following step.\n\nIn the case of single pod steps (i.e. container, script, resource templates), this list will be nil since the pod itself is already considered the \"outbound\" node. In the case of DAGs, outbound nodes are the \"target\" tasks (tasks with no children). In the case of steps, outbound nodes are all the containers involved in the last step group. NOTE: since templates are composable, the list of outbound nodes are carried upwards when a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of a template, will be a superset of the outbound nodes of its last children.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "hostNodeName": { + SchemaProps: spec.SchemaProps{ + Description: "HostNodeName name of the Kubernetes node on which the Pod is running, if applicable", + Type: []string{"string"}, + Format: "", + }, + }, + "memoizationStatus": { + SchemaProps: spec.SchemaProps{ + Description: "MemoizationStatus holds information about cached nodes", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MemoizationStatus"), + }, + }, + "synchronizationStatus": { + SchemaProps: spec.SchemaProps{ + Description: "SynchronizationStatus is the synchronization status of the node", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeSynchronizationStatus"), + }, + }, + }, + Required: []string{"id", "name", "type"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MemoizationStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeFlag", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeSynchronizationStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_NodeSynchronizationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeSynchronizationStatus stores the status of a node", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "waiting": { + SchemaProps: spec.SchemaProps{ + Description: "Waiting is the name of the lock that this node is waiting for", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_NoneStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NoneStrategy indicates to skip tar process and upload the files or directory tree as independent files. Note that if the artifact is a directory, the artifact driver must support the ability to save/load the directory appropriately.", + Type: []string{"object"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_OAuth2Auth(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OAuth2Auth holds all information for client authentication via OAuth2 tokens", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "clientIDSecret": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "clientSecretSecret": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "tokenURLSecret": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "scopes": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "endpointParams": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2EndpointParam"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2EndpointParam", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_OAuth2EndpointParam(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EndpointParam is for requesting optional fields that should be sent in the oauth request", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the header name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the literal value to use for the header", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"key"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_OSSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OSSArtifact is the location of an Alibaba Cloud OSS artifact", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint is the hostname of the bucket endpoint", + Type: []string{"string"}, + Format: "", + }, + }, + "bucket": { + SchemaProps: spec.SchemaProps{ + Description: "Bucket is the name of the bucket", + Type: []string{"string"}, + Format: "", + }, + }, + "accessKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "AccessKeySecret is the secret selector to the bucket's access key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "secretKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "SecretKeySecret is the secret selector to the bucket's secret key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "createBucketIfNotPresent": { + SchemaProps: spec.SchemaProps{ + Description: "CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist", + Type: []string{"boolean"}, + Format: "", + }, + }, + "securityToken": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm", + Type: []string{"string"}, + Format: "", + }, + }, + "lifecycleRule": { + SchemaProps: spec.SchemaProps{ + Description: "LifecycleRule specifies how to manage bucket's lifecycle", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule"), + }, + }, + "useSDKCreds": { + SchemaProps: spec.SchemaProps{ + Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "key": { + SchemaProps: spec.SchemaProps{ + Description: "Key is the path in the bucket where the artifact resides", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"key"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_OSSArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OSSArtifactRepository defines the controller configuration for an OSS artifact repository", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint is the hostname of the bucket endpoint", + Type: []string{"string"}, + Format: "", + }, + }, + "bucket": { + SchemaProps: spec.SchemaProps{ + Description: "Bucket is the name of the bucket", + Type: []string{"string"}, + Format: "", + }, + }, + "accessKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "AccessKeySecret is the secret selector to the bucket's access key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "secretKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "SecretKeySecret is the secret selector to the bucket's secret key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "createBucketIfNotPresent": { + SchemaProps: spec.SchemaProps{ + Description: "CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist", + Type: []string{"boolean"}, + Format: "", + }, + }, + "securityToken": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm", + Type: []string{"string"}, + Format: "", + }, + }, + "lifecycleRule": { + SchemaProps: spec.SchemaProps{ + Description: "LifecycleRule specifies how to manage bucket's lifecycle", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule"), + }, + }, + "useSDKCreds": { + SchemaProps: spec.SchemaProps{ + Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "keyFormat": { + SchemaProps: spec.SchemaProps{ + Description: "KeyFormat defines the format of how to store keys and can reference workflow variables.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_OSSBucket(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OSSBucket contains the access information required for interfacing with an Alibaba Cloud OSS bucket", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint is the hostname of the bucket endpoint", + Type: []string{"string"}, + Format: "", + }, + }, + "bucket": { + SchemaProps: spec.SchemaProps{ + Description: "Bucket is the name of the bucket", + Type: []string{"string"}, + Format: "", + }, + }, + "accessKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "AccessKeySecret is the secret selector to the bucket's access key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "secretKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "SecretKeySecret is the secret selector to the bucket's secret key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "createBucketIfNotPresent": { + SchemaProps: spec.SchemaProps{ + Description: "CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist", + Type: []string{"boolean"}, + Format: "", + }, + }, + "securityToken": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm", + Type: []string{"string"}, + Format: "", + }, + }, + "lifecycleRule": { + SchemaProps: spec.SchemaProps{ + Description: "LifecycleRule specifies how to manage bucket's lifecycle", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule"), + }, + }, + "useSDKCreds": { + SchemaProps: spec.SchemaProps{ + Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_OSSLifecycleRule(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OSSLifecycleRule specifies how to manage bucket's lifecycle", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "markInfrequentAccessAfterDays": { + SchemaProps: spec.SchemaProps{ + Description: "MarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "markDeletionAfterDays": { + SchemaProps: spec.SchemaProps{ + Description: "MarkDeletionAfterDays is the number of days before we delete objects in the bucket", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Object(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: Object{}.OpenAPISchemaType(), + Format: Object{}.OpenAPISchemaFormat(), + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Outputs(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Outputs hold parameters, artifacts, and results from a step", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "parameters": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Parameters holds the list of output parameters produced by a step", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"), + }, + }, + }, + }, + }, + "artifacts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Artifacts holds the list of output artifacts produced by a step", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), + }, + }, + }, + }, + }, + "result": { + SchemaProps: spec.SchemaProps{ + Description: "Result holds the result (stdout) of a script template", + Type: []string{"string"}, + Format: "", + }, + }, + "exitCode": { + SchemaProps: spec.SchemaProps{ + Description: "ExitCode holds the exit code of a script template", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ParallelSteps(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: ParallelSteps{}.OpenAPISchemaType(), + Format: ParallelSteps{}.OpenAPISchemaFormat(), + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Parameter(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Parameter indicate a passed string parameter to a service template with an optional default value", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the parameter name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "default": { + SchemaProps: spec.SchemaProps{ + Description: "Default is the default value to use for an input parameter if a value was not supplied", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the literal value to use for the parameter. If specified in the context of an input parameter, the value takes precedence over any passed values", + Type: []string{"string"}, + Format: "", + }, + }, + "valueFrom": { + SchemaProps: spec.SchemaProps{ + Description: "ValueFrom is the source for the output parameter's value", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ValueFrom"), + }, + }, + "globalName": { + SchemaProps: spec.SchemaProps{ + Description: "GlobalName exports an output parameter to the global scope, making it available as '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters", + Type: []string{"string"}, + Format: "", + }, + }, + "enum": { + SchemaProps: spec.SchemaProps{ + Description: "Enum holds a list of string values to choose from, for the actual value of the parameter", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is the parameter description", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ValueFrom"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Plugin(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Plugin is an Object with exactly one key", + Type: []string{"object"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_PodGC(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodGC describes how to delete completed pods as they complete", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "strategy": { + SchemaProps: spec.SchemaProps{ + Description: "Strategy is the strategy to use. One of \"OnPodCompletion\", \"OnPodSuccess\", \"OnWorkflowCompletion\", \"OnWorkflowSuccess\". If unset, does not delete Pods", + Type: []string{"string"}, + Format: "", + }, + }, + "labelSelector": { + SchemaProps: spec.SchemaProps{ + Description: "LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + "deleteDelayDuration": { + SchemaProps: spec.SchemaProps{ + Description: "DeleteDelayDuration specifies the duration before pods in the GC queue get deleted.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Prometheus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Prometheus is a prometheus metric to be emitted", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the metric", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "labels": { + SchemaProps: spec.SchemaProps{ + Description: "Labels is a list of metric labels", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MetricLabel"), + }, + }, + }, + }, + }, + "help": { + SchemaProps: spec.SchemaProps{ + Description: "Help is a string that describes the metric", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "when": { + SchemaProps: spec.SchemaProps{ + Description: "When is a conditional statement that decides when to emit the metric", + Type: []string{"string"}, + Format: "", + }, + }, + "gauge": { + SchemaProps: spec.SchemaProps{ + Description: "Gauge is a gauge metric", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Gauge"), + }, + }, + "histogram": { + SchemaProps: spec.SchemaProps{ + Description: "Histogram is a histogram metric", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Histogram"), + }, + }, + "counter": { + SchemaProps: spec.SchemaProps{ + Description: "Counter is a counter metric", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Counter"), + }, + }, + }, + Required: []string{"name", "help"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Counter", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Gauge", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Histogram", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MetricLabel"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_RawArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RawArtifact allows raw string content to be placed as an artifact in a container", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "data": { + SchemaProps: spec.SchemaProps{ + Description: "Data is the string contents of the artifact", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"data"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ResourceTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResourceTemplate is a template subtype to manipulate kubernetes resources", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "action": { + SchemaProps: spec.SchemaProps{ + Description: "Action is the action to perform to the resource. Must be one of: get, create, apply, delete, replace, patch", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "mergeStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "MergeStrategy is the strategy used to merge a patch. It defaults to \"strategic\" Must be one of: strategic, merge, json", + Type: []string{"string"}, + Format: "", + }, + }, + "manifest": { + SchemaProps: spec.SchemaProps{ + Description: "Manifest contains the kubernetes manifest", + Type: []string{"string"}, + Format: "", + }, + }, + "manifestFrom": { + SchemaProps: spec.SchemaProps{ + Description: "ManifestFrom is the source for a single kubernetes manifest", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ManifestFrom"), + }, + }, + "setOwnerReference": { + SchemaProps: spec.SchemaProps{ + Description: "SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "successCondition": { + SchemaProps: spec.SchemaProps{ + Description: "SuccessCondition is a label selector expression which describes the conditions of the k8s resource in which it is acceptable to proceed to the following step", + Type: []string{"string"}, + Format: "", + }, + }, + "failureCondition": { + SchemaProps: spec.SchemaProps{ + Description: "FailureCondition is a label selector expression which describes the conditions of the k8s resource in which the step was considered failed", + Type: []string{"string"}, + Format: "", + }, + }, + "flags": { + SchemaProps: spec.SchemaProps{ + Description: "Flags is a set of additional options passed to kubectl before submitting a resource I.e. to disable resource validation: flags: [\n\t\"--validate=false\" # disable resource validation\n]", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"action"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ManifestFrom"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_RetryAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RetryAffinity prevents running steps on the same host.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodeAntiAffinity": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryNodeAntiAffinity"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryNodeAntiAffinity"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_RetryNodeAntiAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. In order to prevent running steps on the same host, it uses \"kubernetes.io/hostname\".", + Type: []string{"object"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_RetryStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RetryStrategy provides controls on how to retry a workflow step", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "limit": { + SchemaProps: spec.SchemaProps{ + Description: "Limit is the maximum number of retry attempts when retrying a container. It does not include the original container; the maximum number of total attempts will be `limit + 1`.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + "retryPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "RetryPolicy is a policy of NodePhase statuses that will be retried", + Type: []string{"string"}, + Format: "", + }, + }, + "backoff": { + SchemaProps: spec.SchemaProps{ + Description: "Backoff is a backoff strategy", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Backoff"), + }, + }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "Affinity prevents running workflow's step on the same host", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryAffinity"), + }, + }, + "expression": { + SchemaProps: spec.SchemaProps{ + Description: "Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Backoff", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryAffinity", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_S3Artifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "S3Artifact is the location of an S3 artifact", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint is the hostname of the bucket endpoint", + Type: []string{"string"}, + Format: "", + }, + }, + "bucket": { + SchemaProps: spec.SchemaProps{ + Description: "Bucket is the name of the bucket", + Type: []string{"string"}, + Format: "", + }, + }, + "region": { + SchemaProps: spec.SchemaProps{ + Description: "Region contains the optional bucket region", + Type: []string{"string"}, + Format: "", + }, + }, + "insecure": { + SchemaProps: spec.SchemaProps{ + Description: "Insecure will connect to the service with TLS", + Type: []string{"boolean"}, + Format: "", + }, + }, + "accessKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "AccessKeySecret is the secret selector to the bucket's access key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "secretKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "SecretKeySecret is the secret selector to the bucket's secret key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "sessionTokenSecret": { + SchemaProps: spec.SchemaProps{ + Description: "SessionTokenSecret is used for ephemeral credentials like an IAM assume role or S3 access grant", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "roleARN": { + SchemaProps: spec.SchemaProps{ + Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", + Type: []string{"string"}, + Format: "", + }, + }, + "useSDKCreds": { + SchemaProps: spec.SchemaProps{ + Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "createBucketIfNotPresent": { + SchemaProps: spec.SchemaProps{ + Description: "CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions"), + }, + }, + "encryptionOptions": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions"), + }, + }, + "caSecret": { + SchemaProps: spec.SchemaProps{ + Description: "CASecret specifies the secret that contains the CA, used to verify the TLS connection", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "key": { + SchemaProps: spec.SchemaProps{ + Description: "Key is the key in the bucket where the artifact resides", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_S3ArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "S3ArtifactRepository defines the controller configuration for an S3 artifact repository", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint is the hostname of the bucket endpoint", + Type: []string{"string"}, + Format: "", + }, + }, + "bucket": { + SchemaProps: spec.SchemaProps{ + Description: "Bucket is the name of the bucket", + Type: []string{"string"}, + Format: "", + }, + }, + "region": { + SchemaProps: spec.SchemaProps{ + Description: "Region contains the optional bucket region", + Type: []string{"string"}, + Format: "", + }, + }, + "insecure": { + SchemaProps: spec.SchemaProps{ + Description: "Insecure will connect to the service with TLS", + Type: []string{"boolean"}, + Format: "", + }, + }, + "accessKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "AccessKeySecret is the secret selector to the bucket's access key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "secretKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "SecretKeySecret is the secret selector to the bucket's secret key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "sessionTokenSecret": { + SchemaProps: spec.SchemaProps{ + Description: "SessionTokenSecret is used for ephemeral credentials like an IAM assume role or S3 access grant", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "roleARN": { + SchemaProps: spec.SchemaProps{ + Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", + Type: []string{"string"}, + Format: "", + }, + }, + "useSDKCreds": { + SchemaProps: spec.SchemaProps{ + Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "createBucketIfNotPresent": { + SchemaProps: spec.SchemaProps{ + Description: "CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions"), + }, + }, + "encryptionOptions": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions"), + }, + }, + "caSecret": { + SchemaProps: spec.SchemaProps{ + Description: "CASecret specifies the secret that contains the CA, used to verify the TLS connection", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "keyFormat": { + SchemaProps: spec.SchemaProps{ + Description: "KeyFormat defines the format of how to store keys and can reference workflow variables.", + Type: []string{"string"}, + Format: "", + }, + }, + "keyPrefix": { + SchemaProps: spec.SchemaProps{ + Description: "KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts. DEPRECATED. Use KeyFormat instead", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_S3Bucket(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "S3Bucket contains the access information required for interfacing with an S3 bucket", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint is the hostname of the bucket endpoint", + Type: []string{"string"}, + Format: "", + }, + }, + "bucket": { + SchemaProps: spec.SchemaProps{ + Description: "Bucket is the name of the bucket", + Type: []string{"string"}, + Format: "", + }, + }, + "region": { + SchemaProps: spec.SchemaProps{ + Description: "Region contains the optional bucket region", + Type: []string{"string"}, + Format: "", + }, + }, + "insecure": { + SchemaProps: spec.SchemaProps{ + Description: "Insecure will connect to the service with TLS", + Type: []string{"boolean"}, + Format: "", + }, + }, + "accessKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "AccessKeySecret is the secret selector to the bucket's access key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "secretKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "SecretKeySecret is the secret selector to the bucket's secret key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "sessionTokenSecret": { + SchemaProps: spec.SchemaProps{ + Description: "SessionTokenSecret is used for ephemeral credentials like an IAM assume role or S3 access grant", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "roleARN": { + SchemaProps: spec.SchemaProps{ + Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", + Type: []string{"string"}, + Format: "", + }, + }, + "useSDKCreds": { + SchemaProps: spec.SchemaProps{ + Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "createBucketIfNotPresent": { + SchemaProps: spec.SchemaProps{ + Description: "CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions"), + }, + }, + "encryptionOptions": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions"), + }, + }, + "caSecret": { + SchemaProps: spec.SchemaProps{ + Description: "CASecret specifies the secret that contains the CA, used to verify the TLS connection", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_S3EncryptionOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "S3EncryptionOptions used to determine encryption options during s3 operations", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kmsKeyId": { + SchemaProps: spec.SchemaProps{ + Description: "KMSKeyId tells the driver to encrypt the object using the specified KMS Key.", + Type: []string{"string"}, + Format: "", + }, + }, + "kmsEncryptionContext": { + SchemaProps: spec.SchemaProps{ + Description: "KmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information", + Type: []string{"string"}, + Format: "", + }, + }, + "enableEncryption": { + SchemaProps: spec.SchemaProps{ + Description: "EnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used", + Type: []string{"boolean"}, + Format: "", + }, + }, + "serverSideCustomerKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "ServerSideCustomerKeySecret tells the driver to encrypt the output artifacts using SSE-C with the specified secret.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ScriptTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ScriptTemplate is a template subtype to enable scripting through code steps", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "containerPort", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "name", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "resizePolicy": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Resources resize policy for the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerResizePolicy"), + }, + }, + }, + }, + }, + "restartPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "mountPath", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "devicePath", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "source": { + SchemaProps: spec.SchemaProps{ + Description: "Source contains the source code of the script to execute", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "source"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.ContainerResizePolicy", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_SemaphoreHolding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "semaphore": { + SchemaProps: spec.SchemaProps{ + Description: "Semaphore stores the semaphore name.", + Type: []string{"string"}, + Format: "", + }, + }, + "holders": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Holders stores the list of current holder names in the workflow.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_SemaphoreRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SemaphoreRef is a reference of Semaphore", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "configMapKeyRef": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigMapKeyRef is configmap selector for Semaphore configuration", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace is the namespace of the configmap, default: [namespace of workflow]", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_SemaphoreStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "holding": { + SchemaProps: spec.SchemaProps{ + Description: "Holding stores the list of resource acquired synchronization lock for workflows.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding"), + }, + }, + }, + }, + }, + "waiting": { + SchemaProps: spec.SchemaProps{ + Description: "Waiting indicates the list of current synchronization lock holders.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Sequence(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Sequence expands a workflow step into numeric range", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "count": { + SchemaProps: spec.SchemaProps{ + Description: "Count is number of elements in the sequence (default: 0). Not to be used with end", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + "start": { + SchemaProps: spec.SchemaProps{ + Description: "Number at which to start the sequence (default: 0)", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + "end": { + SchemaProps: spec.SchemaProps{ + Description: "Number at which to end the sequence (default: 0). Not to be used with Count", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + "format": { + SchemaProps: spec.SchemaProps{ + Description: "Format is a printf format string to format the value in the sequence", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_StopStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StopStrategy defines if the CronWorkflow should stop scheduling based on an expression. v3.6 and after", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "expression": { + SchemaProps: spec.SchemaProps{ + Description: "v3.6 and after: Expression is an expression that stops scheduling workflows when true. Use the variables `cronworkflow`.`failed` or `cronworkflow`.`succeeded` to access the number of failed or successful child workflows.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"expression"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Submit(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "workflowTemplateRef": { + SchemaProps: spec.SchemaProps{ + Description: "WorkflowTemplateRef the workflow template to submit", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef"), + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata optional means to customize select fields of the workflow metadata", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "arguments": { + SchemaProps: spec.SchemaProps{ + Description: "Arguments extracted from the event and then set as arguments to the workflow created.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), + }, + }, + }, + Required: []string{"workflowTemplateRef"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_SubmitOpts(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SubmitOpts are workflow submission options", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name overrides metadata.name", + Type: []string{"string"}, + Format: "", + }, + }, + "generateName": { + SchemaProps: spec.SchemaProps{ + Description: "GenerateName overrides metadata.generateName", + Type: []string{"string"}, + Format: "", + }, + }, + "entryPoint": { + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint overrides spec.entrypoint", + Type: []string{"string"}, + Format: "", + }, + }, + "parameters": { + SchemaProps: spec.SchemaProps{ + Description: "Parameters passes input parameters to workflow", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "serviceAccount": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccount runs all pods in the workflow using specified ServiceAccount.", + Type: []string{"string"}, + Format: "", + }, + }, + "dryRun": { + SchemaProps: spec.SchemaProps{ + Description: "DryRun validates the workflow on the client-side without creating it. This option is not supported in API", + Type: []string{"boolean"}, + Format: "", + }, + }, + "serverDryRun": { + SchemaProps: spec.SchemaProps{ + Description: "ServerDryRun validates the workflow on the server-side without creating it", + Type: []string{"boolean"}, + Format: "", + }, + }, + "labels": { + SchemaProps: spec.SchemaProps{ + Description: "Labels adds to metadata.labels", + Type: []string{"string"}, + Format: "", + }, + }, + "ownerReference": { + SchemaProps: spec.SchemaProps{ + Description: "OwnerReference creates a metadata.ownerReference", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"), + }, + }, + "annotations": { + SchemaProps: spec.SchemaProps{ + Description: "Annotations adds to metadata.labels", + Type: []string{"string"}, + Format: "", + }, + }, + "podPriorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "Set the podPriorityClassName of the workflow", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "Priority is used if controller is configured to process limited number of workflows in parallel, higher priority workflows are processed first.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_SuppliedValueFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc.", + Type: []string{"object"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_SuspendTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "duration": { + SchemaProps: spec.SchemaProps{ + Description: "Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. Could also be a Duration, e.g.: \"2m\", \"6h\"", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Synchronization(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Synchronization holds synchronization lock configuration", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "semaphore": { + SchemaProps: spec.SchemaProps{ + Description: "Semaphore holds the Semaphore configuration - deprecated, use semaphores instead", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreRef"), + }, + }, + "mutex": { + SchemaProps: spec.SchemaProps{ + Description: "Mutex holds the Mutex lock details - deprecated, use mutexes instead", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Mutex"), + }, + }, + "semaphores": { + SchemaProps: spec.SchemaProps{ + Description: "v3.6 and after: Semaphores holds the list of Semaphores configuration", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreRef"), + }, + }, + }, + }, + }, + "mutexes": { + SchemaProps: spec.SchemaProps{ + Description: "v3.6 and after: Mutexes holds the list of Mutex lock details", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Mutex"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Mutex", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreRef"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_SynchronizationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SynchronizationStatus stores the status of semaphore and mutex.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "semaphore": { + SchemaProps: spec.SchemaProps{ + Description: "Semaphore stores this workflow's Semaphore holder details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreStatus"), + }, + }, + "mutex": { + SchemaProps: spec.SchemaProps{ + Description: "Mutex stores this workflow's mutex holder details", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreStatus"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_TTLStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "secondsAfterCompletion": { + SchemaProps: spec.SchemaProps{ + Description: "SecondsAfterCompletion is the number of seconds to live after completion", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "secondsAfterSuccess": { + SchemaProps: spec.SchemaProps{ + Description: "SecondsAfterSuccess is the number of seconds to live after success", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "secondsAfterFailure": { + SchemaProps: spec.SchemaProps{ + Description: "SecondsAfterFailure is the number of seconds to live after failure", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_TarStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TarStrategy will tar and gzip the file or directory when saving", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "compressionLevel": { + SchemaProps: spec.SchemaProps{ + Description: "CompressionLevel specifies the gzip compression level to use for the artifact. Defaults to gzip.DefaultCompression.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Template is a reusable and composable unit of execution in a workflow", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the template", + Type: []string{"string"}, + Format: "", + }, + }, + "inputs": { + SchemaProps: spec.SchemaProps{ + Description: "Inputs describe what inputs parameters and artifacts are supplied to this template", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs"), + }, + }, + "outputs": { + SchemaProps: spec.SchemaProps{ + Description: "Outputs describe the parameters and artifacts that this template produces", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), + }, + }, + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector to schedule this step of the workflow to be run on the selected node(s). Overrides the selector set at the workflow level.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "Affinity sets the pod's scheduling constraints Overrides the affinity set at the workflow level (if any)", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metdata sets the pods's metadata, i.e. annotations and labels", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), + }, + }, + "daemon": { + SchemaProps: spec.SchemaProps{ + Description: "Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness", + Type: []string{"boolean"}, + Format: "", + }, + }, + "steps": { + SchemaProps: spec.SchemaProps{ + Description: "Steps define a series of sequential/parallel workflow steps", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ParallelSteps"), + }, + }, + }, + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Description: "Container is the main container image to run in the pod", + Ref: ref("k8s.io/api/core/v1.Container"), + }, + }, + "containerSet": { + SchemaProps: spec.SchemaProps{ + Description: "ContainerSet groups multiple containers within a single pod.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetTemplate"), + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script runs a portion of code against an interpreter", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ScriptTemplate"), + }, + }, + "resource": { + SchemaProps: spec.SchemaProps{ + Description: "Resource template subtype which can run k8s resources", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ResourceTemplate"), + }, + }, + "dag": { + SchemaProps: spec.SchemaProps{ + Description: "DAG template subtype which runs a DAG", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTemplate"), + }, + }, + "suspend": { + SchemaProps: spec.SchemaProps{ + Description: "Suspend template subtype which can suspend a workflow when reaching the step", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuspendTemplate"), + }, + }, + "data": { + SchemaProps: spec.SchemaProps{ + Description: "Data is a data template", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Data"), + }, + }, + "http": { + SchemaProps: spec.SchemaProps{ + Description: "HTTP makes a HTTP request", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTP"), + }, + }, + "plugin": { + SchemaProps: spec.SchemaProps{ + Description: "Plugin is a plugin template", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Plugin"), + }, + }, + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Volumes is a list of volumes that can be mounted by containers in a template.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "InitContainers is a list of containers which run before the main container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer"), + }, + }, + }, + }, + }, + "sidecars": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer"), + }, + }, + }, + }, + }, + "archiveLocation": { + SchemaProps: spec.SchemaProps{ + Description: "Location in which all files related to the step will be stored (logs, artifacts, etc...). Can be overridden by individual items in Outputs. If omitted, will use the default artifact repository location configured in the controller, appended with the / in the key.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation"), + }, + }, + "activeDeadlineSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Optional duration in seconds relative to the StartTime that the pod may be active on a node before the system actively tries to terminate the pod; value must be positive integer This field is only applicable to container and script templates.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + "retryStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "RetryStrategy describes how to retry a template when it fails", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy"), + }, + }, + "parallelism": { + SchemaProps: spec.SchemaProps{ + Description: "Parallelism limits the max total parallel pods that can execute at the same time within the boundaries of this template invocation. If additional steps/dag templates are invoked, the pods created by those templates will not be counted towards this total.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "failFast": { + SchemaProps: spec.SchemaProps{ + Description: "FailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this template is expanded with `withItems`, etc.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tolerations": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "key", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Tolerations to apply to workflow pods.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", + }, + }, + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "PriorityClassName to apply to workflow pods.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "Priority to apply to workflow pods.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountName to apply to workflow pods", + Type: []string{"string"}, + Format: "", + }, + }, + "automountServiceAccountToken": { + SchemaProps: spec.SchemaProps{ + Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "executor": { + SchemaProps: spec.SchemaProps{ + Description: "Executor holds configurations of the executor container.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig"), + }, + }, + "hostAliases": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.HostAlias"), + }, + }, + }, + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + }, + }, + "podSpecPatch": { + SchemaProps: spec.SchemaProps{ + Description: "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).", + Type: []string{"string"}, + Format: "", + }, + }, + "metrics": { + SchemaProps: spec.SchemaProps{ + Description: "Metrics are a list of metrics emitted from this template", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics"), + }, + }, + "synchronization": { + SchemaProps: spec.SchemaProps{ + Description: "Synchronization holds synchronization lock configuration for this template", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization"), + }, + }, + "memoize": { + SchemaProps: spec.SchemaProps{ + Description: "Memoize allows templates to use outputs generated from already executed templates", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Memoize"), + }, + }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "Timeout allows to set the total node execution timeout duration counting from the node's start time. This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Data", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTP", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Memoize", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ParallelSteps", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Plugin", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ResourceTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ScriptTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuspendTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_TemplateRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TemplateRef is a reference of template resource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the resource name of the template.", + Type: []string{"string"}, + Format: "", + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template is the name of referred template in the resource.", + Type: []string{"string"}, + Format: "", + }, + }, + "clusterScope": { + SchemaProps: spec.SchemaProps{ + Description: "ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_TransformationStep(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "expression": { + SchemaProps: spec.SchemaProps{ + Description: "Expression defines an expr expression to apply", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"expression"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_UserContainer(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UserContainer is a container specified by a user.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "containerPort", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "name", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "resizePolicy": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Resources resize policy for the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerResizePolicy"), + }, + }, + }, + }, + }, + "restartPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "mountPath", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "devicePath", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "mirrorVolumeMounts": { + SchemaProps: spec.SchemaProps{ + Description: "MirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.ContainerResizePolicy", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ValueFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ValueFrom describes a location in which to obtain the value to a parameter", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path in the container to retrieve an output parameter value from in container templates", + Type: []string{"string"}, + Format: "", + }, + }, + "jsonPath": { + SchemaProps: spec.SchemaProps{ + Description: "JSONPath of a resource to retrieve an output parameter value from in resource templates", + Type: []string{"string"}, + Format: "", + }, + }, + "jqFilter": { + SchemaProps: spec.SchemaProps{ + Description: "JQFilter expression against the resource object in resource templates", + Type: []string{"string"}, + Format: "", + }, + }, + "event": { + SchemaProps: spec.SchemaProps{ + Description: "Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message`", + Type: []string{"string"}, + Format: "", + }, + }, + "parameter": { + SchemaProps: spec.SchemaProps{ + Description: "Parameter reference to a step or dag task in which to retrieve an output parameter value from (e.g. '{{steps.mystep.outputs.myparam}}')", + Type: []string{"string"}, + Format: "", + }, + }, + "supplied": { + SchemaProps: spec.SchemaProps{ + Description: "Supplied value to be filled in directly, either through the CLI, API, etc.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuppliedValueFrom"), + }, + }, + "configMapKeyRef": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigMapKeyRef is configmap selector for input parameter configuration", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + "default": { + SchemaProps: spec.SchemaProps{ + Description: "Default specifies a value to be used if retrieving the value from the specified source fails", + Type: []string{"string"}, + Format: "", + }, + }, + "expression": { + SchemaProps: spec.SchemaProps{ + Description: "Expression, if defined, is evaluated to specify the value for the parameter", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuppliedValueFrom", "k8s.io/api/core/v1.ConfigMapKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Version(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "version": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "buildDate": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "gitCommit": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "gitTag": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "gitTreeState": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "goVersion": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "compiler": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "platform": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"version", "buildDate", "gitCommit", "gitTag", "gitTreeState", "goVersion", "compiler", "platform"}, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_VolumeClaimGC(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "VolumeClaimGC describes how to delete volumes from completed Workflows", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "strategy": { + SchemaProps: spec.SchemaProps{ + Description: "Strategy is the strategy to use. One of \"OnWorkflowCompletion\", \"OnWorkflowSuccess\". Defaults to \"OnWorkflowSuccess\"", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Workflow(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Workflow is the definition of a workflow resource", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStatus"), + }, + }, + }, + Required: []string{"metadata", "spec"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTask(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowArtifactGCTask specifies the Artifacts that need to be deleted as well as the status of deletion", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCStatus"), + }, + }, + }, + Required: []string{"metadata", "spec"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTaskList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowArtifactGCTaskList is list of WorkflowArtifactGCTask resources", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTask"), + }, + }, + }, + }, + }, + }, + Required: []string{"metadata", "items"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTask", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowEventBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowEventBinding is the definition of an event resource", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingSpec"), + }, + }, + }, + Required: []string{"metadata", "spec"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowEventBindingList is list of event resources", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBinding"), + }, + }, + }, + }, + }, + }, + Required: []string{"metadata", "items"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "event": { + SchemaProps: spec.SchemaProps{ + Description: "Event is the event to bind to", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Event"), + }, + }, + "submit": { + SchemaProps: spec.SchemaProps{ + Description: "Submit is the workflow template to submit", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Submit"), + }, + }, + }, + Required: []string{"event"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Event", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Submit"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowLevelArtifactGC(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "strategy": { + SchemaProps: spec.SchemaProps{ + Description: "Strategy is the strategy to use.", + Type: []string{"string"}, + Format: "", + }, + }, + "podMetadata": { + SchemaProps: spec.SchemaProps{ + Description: "PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), + }, + }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion", + Type: []string{"string"}, + Format: "", + }, + }, + "forceFinalizerRemoval": { + SchemaProps: spec.SchemaProps{ + Description: "ForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails", + Type: []string{"boolean"}, + Format: "", + }, + }, + "podSpecPatch": { + SchemaProps: spec.SchemaProps{ + Description: "PodSpecPatch holds strategic merge patch to apply against the artgc pod spec.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowList is list of Workflow resources", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Workflow"), + }, + }, + }, + }, + }, + }, + Required: []string{"metadata", "items"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Workflow", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "labels": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "annotations": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "labelsFrom": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValueFrom"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValueFrom"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowSpec is the specification of a Workflow.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "templates": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Templates is a list of workflow templates used in a workflow", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), + }, + }, + }, + }, + }, + "entrypoint": { + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint is a template reference to the starting point of the workflow.", + Type: []string{"string"}, + Format: "", + }, + }, + "arguments": { + SchemaProps: spec.SchemaProps{ + Description: "Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{workflow.parameters.myparam}}", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), + }, + }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.", + Type: []string{"string"}, + Format: "", + }, + }, + "automountServiceAccountToken": { + SchemaProps: spec.SchemaProps{ + Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "executor": { + SchemaProps: spec.SchemaProps{ + Description: "Executor holds configurations of executor containers of the workflow.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig"), + }, + }, + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Volumes is a list of volumes that can be mounted by containers in a workflow.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "volumeClaimTemplates": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeClaimTemplates is a list of claims that containers are allowed to reference. The Workflow controller will create the claims at the beginning of the workflow and delete the claims upon completion of the workflow", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), + }, + }, + }, + }, + }, + "parallelism": { + SchemaProps: spec.SchemaProps{ + Description: "Parallelism limits the max total parallel pods that can execute at the same time in a workflow", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "artifactRepositoryRef": { + SchemaProps: spec.SchemaProps{ + Description: "ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef"), + }, + }, + "suspend": { + SchemaProps: spec.SchemaProps{ + Description: "Suspend will suspend the workflow and prevent execution of any future steps in the workflow", + Type: []string{"boolean"}, + Format: "", + }, + }, + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which will result in all pods of the workflow to be scheduled on the selected node(s). This is able to be overridden by a nodeSelector specified in the template.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "Affinity sets the scheduling constraints for all pods in the workflow. Can be overridden by an affinity specified in the template", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "tolerations": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "key", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Tolerations to apply to workflow pods.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "imagePullSecrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + "hostNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "Host networking requested for this workflow pod. Default to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for workflow pods. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + Type: []string{"string"}, + Format: "", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "onExit": { + SchemaProps: spec.SchemaProps{ + Description: "OnExit is a template reference which is invoked at the end of the workflow, irrespective of the success, failure, or error of the primary workflow.", + Type: []string{"string"}, + Format: "", + }, + }, + "ttlStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be deleted after the time to live expires. If this field is unset, the controller config map will hold the default values.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TTLStrategy"), + }, + }, + "activeDeadlineSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Optional duration in seconds relative to the workflow start time which the workflow is allowed to run before the controller terminates the workflow. A value of zero is used to terminate a Running workflow", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "Set scheduler name for all pods. Will be overridden if container/script template's scheduler name is set. Default scheduler will be used if neither specified.", + Type: []string{"string"}, + Format: "", + }, + }, + "podGC": { + SchemaProps: spec.SchemaProps{ + Description: "PodGC describes the strategy to use when deleting completed pods", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.PodGC"), + }, + }, + "podPriorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "PriorityClassName to apply to workflow pods.", + Type: []string{"string"}, + Format: "", + }, + }, + "podPriority": { + SchemaProps: spec.SchemaProps{ + Description: "Priority to apply to workflow pods. DEPRECATED: Use PodPriorityClassName instead.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "hostAliases": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.HostAlias"), + }, + }, + }, + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + }, + }, + "podSpecPatch": { + SchemaProps: spec.SchemaProps{ + Description: "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).", + Type: []string{"string"}, + Format: "", + }, + }, + "podDisruptionBudget": { + SchemaProps: spec.SchemaProps{ + Description: "PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. Controller will automatically add the selector with workflow name, if selector is empty. Optional: Defaults to empty.", + Ref: ref("k8s.io/api/policy/v1.PodDisruptionBudgetSpec"), + }, + }, + "metrics": { + SchemaProps: spec.SchemaProps{ + Description: "Metrics are a list of metrics emitted from this Workflow", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics"), + }, + }, + "shutdown": { + SchemaProps: spec.SchemaProps{ + Description: "Shutdown will shutdown the workflow according to its ShutdownStrategy", + Type: []string{"string"}, + Format: "", + }, + }, + "workflowTemplateRef": { + SchemaProps: spec.SchemaProps{ + Description: "WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef"), + }, + }, + "synchronization": { + SchemaProps: spec.SchemaProps{ + Description: "Synchronization holds synchronization lock configuration for this Workflow", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization"), + }, + }, + "volumeClaimGC": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.VolumeClaimGC"), + }, + }, + "retryStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "RetryStrategy for all templates in the workflow.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy"), + }, + }, + "podMetadata": { + SchemaProps: spec.SchemaProps{ + Description: "PodMetadata defines additional metadata that should be applied to workflow pods", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), + }, + }, + "templateDefaults": { + SchemaProps: spec.SchemaProps{ + Description: "TemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), + }, + }, + "archiveLogs": { + SchemaProps: spec.SchemaProps{ + Description: "ArchiveLogs indicates if the container logs should be archived", + Type: []string{"boolean"}, + Format: "", + }, + }, + "hooks": { + SchemaProps: spec.SchemaProps{ + Description: "Hooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook"), + }, + }, + }, + }, + }, + "workflowMetadata": { + SchemaProps: spec.SchemaProps{ + Description: "WorkflowMetadata contains some metadata of the workflow to refer to", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowMetadata"), + }, + }, + "artifactGC": { + SchemaProps: spec.SchemaProps{ + Description: "ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts unless Artifact.ArtifactGC is specified, which overrides this)", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowLevelArtifactGC"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.PodGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TTLStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.VolumeClaimGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowLevelArtifactGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowMetadata", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume", "k8s.io/api/policy/v1.PodDisruptionBudgetSpec"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowStatus contains overall status information about a workflow", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "Phase a simple, high-level summary of where the workflow is in its lifecycle. Will be \"\" (Unknown), \"Pending\", or \"Running\" before the workflow is completed, and \"Succeeded\", \"Failed\" or \"Error\" once the workflow has completed.", + Type: []string{"string"}, + Format: "", + }, + }, + "startedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which this workflow started", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "finishedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which this workflow completed", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "estimatedDuration": { + SchemaProps: spec.SchemaProps{ + Description: "EstimatedDuration in seconds.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "progress": { + SchemaProps: spec.SchemaProps{ + Description: "Progress to completion", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about why the workflow is in this condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "compressedNodes": { + SchemaProps: spec.SchemaProps{ + Description: "Compressed and base64 decoded Nodes map", + Type: []string{"string"}, + Format: "", + }, + }, + "nodes": { + SchemaProps: spec.SchemaProps{ + Description: "Nodes is a mapping between a node ID and the node's status.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeStatus"), + }, + }, + }, + }, + }, + "offloadNodeStatusVersion": { + SchemaProps: spec.SchemaProps{ + Description: "Whether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. This will actually be populated with a hash of the offloaded data.", + Type: []string{"string"}, + Format: "", + }, + }, + "storedTemplates": { + SchemaProps: spec.SchemaProps{ + Description: "StoredTemplates is a mapping between a template ref and the node's status.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), + }, + }, + }, + }, + }, + "persistentVolumeClaims": { + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaims tracks all PVCs that were created as part of the workflow. The contents of this list are drained at the end of the workflow.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "outputs": { + SchemaProps: spec.SchemaProps{ + Description: "Outputs captures output values and artifact locations produced by the workflow via global outputs", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), + }, + }, + "conditions": { + SchemaProps: spec.SchemaProps{ + Description: "Conditions is a list of conditions the Workflow may have", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition"), + }, + }, + }, + }, + }, + "resourcesDuration": { + SchemaProps: spec.SchemaProps{ + Description: "ResourcesDuration is the total for the workflow", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + }, + }, + "storedWorkflowTemplateSpec": { + SchemaProps: spec.SchemaProps{ + Description: "StoredWorkflowSpec stores the WorkflowTemplate spec for future execution.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), + }, + }, + "synchronization": { + SchemaProps: spec.SchemaProps{ + Description: "Synchronization stores the status of synchronization locks", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SynchronizationStatus"), + }, + }, + "artifactRepositoryRef": { + SchemaProps: spec.SchemaProps{ + Description: "ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRefStatus"), + }, + }, + "artifactGCStatus": { + SchemaProps: spec.SchemaProps{ + Description: "ArtifactGCStatus maintains the status of Artifact Garbage Collection", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtGCStatus"), + }, + }, + "taskResultsCompletionStatus": { + SchemaProps: spec.SchemaProps{ + Description: "TaskResultsCompletionStatus tracks task result completion status (mapped by node ID). Used to prevent premature archiving and garbage collection.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtGCStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRefStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SynchronizationStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowStep(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowStep is a reference to a template to execute in a series of step", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the step", + Type: []string{"string"}, + Format: "", + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template is the name of the template to execute as the step", + Type: []string{"string"}, + Format: "", + }, + }, + "inline": { + SchemaProps: spec.SchemaProps{ + Description: "Inline is the template. Template must be empty if this is declared (and vice-versa).", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), + }, + }, + "arguments": { + SchemaProps: spec.SchemaProps{ + Description: "Arguments hold arguments to the template", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), + }, + }, + "templateRef": { + SchemaProps: spec.SchemaProps{ + Description: "TemplateRef is the reference to the template resource to execute as the step.", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), + }, + }, + "withItems": { + SchemaProps: spec.SchemaProps{ + Description: "WithItems expands a step into multiple parallel steps from the items in the list", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item"), + }, + }, + }, + }, + }, + "withParam": { + SchemaProps: spec.SchemaProps{ + Description: "WithParam expands a step into multiple parallel steps from the value in the parameter, which is expected to be a JSON list.", + Type: []string{"string"}, + Format: "", + }, + }, + "withSequence": { + SchemaProps: spec.SchemaProps{ + Description: "WithSequence expands a step into a numeric sequence", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence"), + }, + }, + "when": { + SchemaProps: spec.SchemaProps{ + Description: "When is an expression in which the step should conditionally execute", + Type: []string{"string"}, + Format: "", + }, + }, + "continueOn": { + SchemaProps: spec.SchemaProps{ + Description: "ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified", + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn"), + }, + }, + "onExit": { + SchemaProps: spec.SchemaProps{ + Description: "OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template. DEPRECATED: Use Hooks[exit].Template instead.", + Type: []string{"string"}, + Format: "", + }, + }, + "hooks": { + SchemaProps: spec.SchemaProps{ + Description: "Hooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowTaskResult is a used to communicate a result back to the controller. Unlike WorkflowTaskSet, it has more capacity. This is an internal type. Users should never create this resource directly, much like you would never create a ReplicaSet directly.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "phase": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "outputs": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), + }, + }, + "progress": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"metadata"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResultList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResult"), + }, + }, + }, + }, + }, + }, + Required: []string{"metadata", "items"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResult", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSet(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetStatus"), + }, + }, + }, + Required: []string{"metadata", "spec"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSet"), + }, + }, + }, + }, + }, + }, + Required: []string{"metadata", "items"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "tasks": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodes": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeResult"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeResult"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowTemplate is the definition of a workflow template resource", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), + }, + }, + }, + Required: []string{"metadata", "spec"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowTemplateList is list of WorkflowTemplate resources", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplate"), + }, + }, + }, + }, + }, + }, + Required: []string{"metadata", "items"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowTemplateRef is a reference to a WorkflowTemplate resource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the resource name of the workflow template.", + Type: []string{"string"}, + Format: "", + }, + }, + "clusterScope": { + SchemaProps: spec.SchemaProps{ + Description: "ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_workflow_v1alpha1_ZipStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ZipStrategy will unzip zipped input artifacts", + Type: []string{"object"}, + }, + }, + } +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/plugin_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/plugin_types.go new file mode 100644 index 00000000..a505a2aa --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/plugin_types.go @@ -0,0 +1,29 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" +) + +// Plugin is an Object with exactly one key +type Plugin struct { + Object `json:",inline" protobuf:"bytes,1,opt,name=object"` +} + +// UnmarshalJSON unmarshalls the Plugin from JSON, and also validates that it is a map exactly one key +func (p *Plugin) UnmarshalJSON(value []byte) error { + if err := p.Object.UnmarshalJSON(value); err != nil { + return err + } + // by validating the structure in UnmarshallJSON, we prevent bad data entering the system at the point of + // parsing, which means we do not need validate + m := map[string]interface{}{} + if err := json.Unmarshal(p.Object.Value, &m); err != nil { + return err + } + numKeys := len(m) + if numKeys != 1 { + return fmt.Errorf("expected exactly one key, got %d", numKeys) + } + return nil +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/progress.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/progress.go new file mode 100644 index 00000000..14a57f98 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/progress.go @@ -0,0 +1,54 @@ +package v1alpha1 + +import ( + "fmt" + "strconv" + "strings" +) + +// Progress in N/M format. N is number of task complete. M is number of tasks. +type Progress string + +const ( + ProgressUndefined = Progress("") + ProgressZero = Progress("0/0") // zero value (not the same as "no progress) + ProgressDefault = Progress("0/1") +) + +func NewProgress(n, m int64) (Progress, bool) { + return ParseProgress(fmt.Sprintf("%v/%v", n, m)) +} + +func ParseProgress(s string) (Progress, bool) { + v := Progress(s) + return v, v.IsValid() +} + +func (in Progress) parts() []string { + return strings.SplitN(string(in), "/", 2) +} + +func (in Progress) N() int64 { + return parseInt64(in.parts()[0]) +} + +func (in Progress) M() int64 { + return parseInt64(in.parts()[1]) +} + +func (in Progress) Add(x Progress) Progress { + return Progress(fmt.Sprintf("%v/%v", in.N()+x.N(), in.M()+x.M())) +} + +func (in Progress) Complete() Progress { + return Progress(fmt.Sprintf("%v/%v", in.M(), in.M())) +} + +func (in Progress) IsValid() bool { + return in != "" && in.N() >= 0 && in.N() <= in.M() && in.M() > 0 +} + +func parseInt64(s string) int64 { + v, _ := strconv.ParseInt(s, 10, 64) + return v +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/register.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/register.go new file mode 100644 index 00000000..b4d6738f --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/register.go @@ -0,0 +1,56 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" +) + +// SchemeGroupVersion is group version used to register these objects +var ( + SchemeGroupVersion = schema.GroupVersion{Group: workflow.Group, Version: "v1alpha1"} + WorkflowSchemaGroupVersionKind = schema.GroupVersionKind{Group: workflow.Group, Version: "v1alpha1", Kind: workflow.WorkflowKind} +) + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group-qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the set of types defined in this package to the supplied scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Workflow{}, + &WorkflowList{}, + &WorkflowEventBinding{}, + &WorkflowEventBindingList{}, + &WorkflowTemplate{}, + &WorkflowTemplateList{}, + &CronWorkflow{}, + &CronWorkflowList{}, + &ClusterWorkflowTemplate{}, + &ClusterWorkflowTemplateList{}, + &WorkflowTaskSet{}, + &WorkflowTaskSetList{}, + &WorkflowArtifactGCTask{}, + &WorkflowArtifactGCTaskList{}, + &WorkflowTaskResult{}, + &WorkflowTaskResultList{}, + &WorkflowArtifactGCTask{}, + &WorkflowArtifactGCTaskList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_result_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_result_types.go new file mode 100644 index 00000000..6f19052a --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_result_types.go @@ -0,0 +1,23 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// WorkflowTaskResult is a used to communicate a result back to the controller. Unlike WorkflowTaskSet, it has +// more capacity. This is an internal type. Users should never create this resource directly, much like you would +// never create a ReplicaSet directly. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WorkflowTaskResult struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + NodeResult `json:",inline" protobuf:"bytes,2,opt,name=nodeResult"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WorkflowTaskResultList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []WorkflowTaskResult `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_set_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_set_types.go new file mode 100644 index 00000000..b756aea7 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_set_types.go @@ -0,0 +1,42 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:resource:shortName=wfts +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status +type WorkflowTaskSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Spec WorkflowTaskSetSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + Status WorkflowTaskSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +type WorkflowTaskSetSpec struct { + Tasks map[string]Template `json:"tasks,omitempty" protobuf:"bytes,1,rep,name=tasks"` +} + +type WorkflowTaskSetStatus struct { + Nodes map[string]NodeResult `json:"nodes,omitempty" protobuf:"bytes,1,rep,name=nodes"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WorkflowTaskSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []WorkflowTaskSet `json:"items" protobuf:"bytes,2,opt,name=items"` +} + +type NodeResult struct { + Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NodePhase"` + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + Outputs *Outputs `json:"outputs,omitempty" protobuf:"bytes,3,opt,name=outputs"` + Progress Progress `json:"progress,omitempty" protobuf:"bytes,4,opt,name=progress,casttype=Progress"` +} + +func (in NodeResult) Fulfilled() bool { + return in.Phase.Fulfilled() +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/utils.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/utils.go new file mode 100644 index 00000000..c0f348df --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/utils.go @@ -0,0 +1,20 @@ +package v1alpha1 + +import ( + "fmt" + "strconv" + "time" +) + +func ParseStringToDuration(durationString string) (time.Duration, error) { + var duration time.Duration + // If no units are attached, treat as seconds + if val, err := strconv.Atoi(durationString); err == nil { + duration = time.Duration(val) * time.Second + } else if parsed, err := time.ParseDuration(durationString); err == nil { + duration = parsed + } else { + return 0, fmt.Errorf("unable to parse %s as a duration: %w", durationString, err) + } + return duration, nil +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/validation_utils.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/validation_utils.go new file mode 100644 index 00000000..912725d4 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/validation_utils.go @@ -0,0 +1,118 @@ +package v1alpha1 + +import ( + "fmt" + "regexp" + "sort" + "strings" + + apivalidation "k8s.io/apimachinery/pkg/util/validation" +) + +const ( + workflowFieldNameFmt string = "[a-zA-Z0-9][-a-zA-Z0-9]*" + workflowFieldNameErrMsg string = "name must consist of alpha-numeric characters or '-', and must start with an alpha-numeric character" + workflowFieldMaxLength int = 128 +) + +var ( + paramOrArtifactNameRegex = regexp.MustCompile(`^[-a-zA-Z0-9_]+[-a-zA-Z0-9_]*$`) + workflowFieldNameRegex = regexp.MustCompile("^" + workflowFieldNameFmt + "$") +) + +func isValidParamOrArtifactName(p string) []string { + var errs []string + if !paramOrArtifactNameRegex.MatchString(p) { + return append(errs, "Parameter/Artifact name must consist of alpha-numeric characters, '_' or '-' e.g. my_param_1, MY-PARAM-1") + } + return errs +} + +// isValidWorkflowFieldName : workflow field name must consist of alpha-numeric characters or '-', and must start with an alpha-numeric character +func isValidWorkflowFieldName(name string) []string { + var errs []string + if len(name) > workflowFieldMaxLength { + errs = append(errs, apivalidation.MaxLenError(workflowFieldMaxLength)) + } + if !workflowFieldNameRegex.MatchString(name) { + msg := workflowFieldNameErrMsg + " (e.g. My-name1-2, 123-NAME)" + errs = append(errs, msg) + } + return errs +} + +// validateWorkflowFieldNames accepts a slice of strings and +// verifies that the Name field of the structs are: +// * unique +// * non-empty +// * matches matches our regex requirements +func validateWorkflowFieldNames(names []string, isParamOrArtifact bool) error { + nameSet := make(map[string]bool) + + for i, name := range names { + if name == "" { + return fmt.Errorf("[%d].name is required", i) + } + var errs []string + if isParamOrArtifact { + errs = isValidParamOrArtifactName(name) + } else { + errs = isValidWorkflowFieldName(name) + } + if len(errs) != 0 { + return fmt.Errorf("[%d].name: '%s' is invalid: %s", i, name, strings.Join(errs, ";")) + } + _, ok := nameSet[name] + if ok { + return fmt.Errorf("[%d].name '%s' is not unique", i, name) + } + nameSet[name] = true + } + return nil +} + +// validateNoCycles validates that a dependency graph has no cycles by doing a Depth-First Search +// depGraph is an adjacency list, where key is a node name and value is a list of its dependencies' names +func validateNoCycles(depGraph map[string][]string) error { + visited := make(map[string]bool) + var noCyclesHelper func(currentName string, cycyle []string) error + noCyclesHelper = func(currentName string, cycle []string) error { + if _, ok := visited[currentName]; ok { + return nil + } + depNames, ok := depGraph[currentName] + if !ok { + return nil + } + for _, depName := range depNames { + for _, name := range cycle { + if depName == name { + return fmt.Errorf("dependency cycle detected: %s->%s", strings.Join(cycle, "->"), name) + } + } + cycle = append(cycle, depName) + err := noCyclesHelper(depName, cycle) + if err != nil { + return err + } + cycle = cycle[0 : len(cycle)-1] + } + visited[currentName] = true + return nil + } + names := make([]string, 0) + for name := range depGraph { + names = append(names, name) + } + // sort names here to make sure the error message has consistent ordering + // so that we can verify the error message in unit tests + sort.Strings(names) + + for _, name := range names { + err := noCyclesHelper(name, []string{}) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/version_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/version_types.go new file mode 100644 index 00000000..78065548 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/version_types.go @@ -0,0 +1,30 @@ +package v1alpha1 + +import ( + "errors" + "regexp" +) + +type Version struct { + Version string `json:"version" protobuf:"bytes,1,opt,name=version"` + BuildDate string `json:"buildDate" protobuf:"bytes,2,opt,name=buildDate"` + GitCommit string `json:"gitCommit" protobuf:"bytes,3,opt,name=gitCommit"` + GitTag string `json:"gitTag" protobuf:"bytes,4,opt,name=gitTag"` + GitTreeState string `json:"gitTreeState" protobuf:"bytes,5,opt,name=gitTreeState"` + GoVersion string `json:"goVersion" protobuf:"bytes,6,opt,name=goVersion"` + Compiler string `json:"compiler" protobuf:"bytes,7,opt,name=compiler"` + Platform string `json:"platform" protobuf:"bytes,8,opt,name=platform"` +} + +var verRe = regexp.MustCompile(`^v(\d+)\.(\d+)\.(\d+)`) + +// MajorMinorPatch returns the major, minor and patch components +// of the version number, or error if this is not a release +// The error path is considered "normal" in a non-release build. +func (v Version) MajorMinorPatch() (string, string, string, error) { + matches := verRe.FindStringSubmatch(v.Version) + if matches == nil || matches[1] == "0" { + return ``, ``, ``, errors.New("Not a formal release") + } + return matches[1], matches[2], matches[3], nil +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_phase.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_phase.go new file mode 100644 index 00000000..4027b10d --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_phase.go @@ -0,0 +1,22 @@ +package v1alpha1 + +// the workflow's phase +type WorkflowPhase string + +const ( + WorkflowUnknown WorkflowPhase = "" + WorkflowPending WorkflowPhase = "Pending" // pending some set-up - rarely used + WorkflowRunning WorkflowPhase = "Running" // any node has started; pods might not be running yet, the workflow maybe suspended too + WorkflowSucceeded WorkflowPhase = "Succeeded" + WorkflowFailed WorkflowPhase = "Failed" // it maybe that the workflow was terminated + WorkflowError WorkflowPhase = "Error" +) + +func (p WorkflowPhase) Completed() bool { + switch p { + case WorkflowSucceeded, WorkflowFailed, WorkflowError: + return true + default: + return false + } +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_template_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_template_types.go new file mode 100644 index 00000000..1317fc18 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_template_types.go @@ -0,0 +1,62 @@ +package v1alpha1 + +import ( + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// WorkflowTemplate is the definition of a workflow template resource +// +genclient +// +genclient:noStatus +// +kubebuilder:resource:shortName=wftmpl +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WorkflowTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +type WorkflowTemplates []WorkflowTemplate + +func (w WorkflowTemplates) Len() int { + return len(w) +} + +func (w WorkflowTemplates) Less(i, j int) bool { + return strings.Compare(w[j].ObjectMeta.Name, w[i].ObjectMeta.Name) > 0 +} + +func (w WorkflowTemplates) Swap(i, j int) { + w[i], w[j] = w[j], w[i] +} + +// WorkflowTemplateList is list of WorkflowTemplate resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WorkflowTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items WorkflowTemplates `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +var _ TemplateHolder = &WorkflowTemplate{} + +// GetTemplateByName retrieves a defined template by its name +func (wftmpl *WorkflowTemplate) GetTemplateByName(name string) *Template { + for _, t := range wftmpl.Spec.Templates { + if t.Name == name { + return &t + } + } + return nil +} + +// GetResourceScope returns the template scope of workflow template. +func (wftmpl *WorkflowTemplate) GetResourceScope() ResourceScope { + return ResourceScopeNamespaced +} + +// GetWorkflowSpec returns the WorkflowSpec of workflow template. +func (wftmpl *WorkflowTemplate) GetWorkflowSpec() *WorkflowSpec { + return &wftmpl.Spec +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go new file mode 100644 index 00000000..27522228 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go @@ -0,0 +1,3947 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "hash/fnv" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "slices" + "sort" + "strings" + "time" + + apiv1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + + log "github.com/sirupsen/logrus" + + argoerrs "github.com/argoproj/argo-workflows/v3/errors" +) + +// TemplateType is the type of a template +type TemplateType string + +// Possible template types +const ( + TemplateTypeContainer TemplateType = "Container" + TemplateTypeContainerSet TemplateType = "ContainerSet" + TemplateTypeSteps TemplateType = "Steps" + TemplateTypeScript TemplateType = "Script" + TemplateTypeResource TemplateType = "Resource" + TemplateTypeDAG TemplateType = "DAG" + TemplateTypeSuspend TemplateType = "Suspend" + TemplateTypeData TemplateType = "Data" + TemplateTypeHTTP TemplateType = "HTTP" + TemplateTypePlugin TemplateType = "Plugin" + TemplateTypeUnknown TemplateType = "Unknown" +) + +// NodePhase is a label for the condition of a node at the current time. +type NodePhase string + +// Workflow and node statuses +const ( + // Node is waiting to run + NodePending NodePhase = "Pending" + // Node is running + NodeRunning NodePhase = "Running" + // Node finished with no errors + NodeSucceeded NodePhase = "Succeeded" + // Node was skipped + NodeSkipped NodePhase = "Skipped" + // Node or child of node exited with non-0 code + NodeFailed NodePhase = "Failed" + // Node had an error other than a non 0 exit code + NodeError NodePhase = "Error" + // Node was omitted because its `depends` condition was not met (only relevant in DAGs) + NodeOmitted NodePhase = "Omitted" +) + +// NodeType is the type of a node +type NodeType string + +// Node types +const ( + NodeTypePod NodeType = "Pod" + NodeTypeContainer NodeType = "Container" + NodeTypeSteps NodeType = "Steps" + NodeTypeStepGroup NodeType = "StepGroup" + NodeTypeDAG NodeType = "DAG" + NodeTypeTaskGroup NodeType = "TaskGroup" + NodeTypeRetry NodeType = "Retry" + NodeTypeSkipped NodeType = "Skipped" + NodeTypeSuspend NodeType = "Suspend" + NodeTypeHTTP NodeType = "HTTP" + NodeTypePlugin NodeType = "Plugin" +) + +// ArtifactGCStrategy is the strategy when to delete artifacts for GC. +type ArtifactGCStrategy string + +// ArtifactGCStrategy +const ( + ArtifactGCOnWorkflowCompletion ArtifactGCStrategy = "OnWorkflowCompletion" + ArtifactGCOnWorkflowDeletion ArtifactGCStrategy = "OnWorkflowDeletion" + ArtifactGCNever ArtifactGCStrategy = "Never" + ArtifactGCStrategyUndefined ArtifactGCStrategy = "" +) + +var AnyArtifactGCStrategy = map[ArtifactGCStrategy]bool{ + ArtifactGCOnWorkflowCompletion: true, + ArtifactGCOnWorkflowDeletion: true, +} + +// PodGCStrategy is the strategy when to delete completed pods for GC. +type PodGCStrategy string + +func (s PodGCStrategy) IsValid() bool { + switch s { + case PodGCOnPodNone, + PodGCOnPodCompletion, + PodGCOnPodSuccess, + PodGCOnWorkflowCompletion, + PodGCOnWorkflowSuccess: + return true + } + return false +} + +// PodGCStrategy +const ( + PodGCOnPodNone PodGCStrategy = "" + PodGCOnPodCompletion PodGCStrategy = "OnPodCompletion" + PodGCOnPodSuccess PodGCStrategy = "OnPodSuccess" + PodGCOnWorkflowCompletion PodGCStrategy = "OnWorkflowCompletion" + PodGCOnWorkflowSuccess PodGCStrategy = "OnWorkflowSuccess" +) + +// VolumeClaimGCStrategy is the strategy to use when deleting volumes from completed workflows +type VolumeClaimGCStrategy string + +const ( + VolumeClaimGCOnCompletion VolumeClaimGCStrategy = "OnWorkflowCompletion" + VolumeClaimGCOnSuccess VolumeClaimGCStrategy = "OnWorkflowSuccess" +) + +type HoldingNameVersion int + +const ( + HoldingNameV1 HoldingNameVersion = 1 + HoldingNameV2 HoldingNameVersion = 2 +) + +// Workflow is the definition of a workflow resource +// +genclient +// +genclient:noStatus +// +kubebuilder:resource:shortName=wf +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Status of the workflow" +// +kubebuilder:printcolumn:name="Age",type="date",format="date-time",JSONPath=".status.startedAt",description="When the workflow was started" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Human readable message indicating details about why the workflow is in this condition." +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type Workflow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec "` + Status WorkflowStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// Workflows is a sort interface which sorts running jobs earlier before considering FinishedAt +type Workflows []Workflow + +func (w Workflows) Len() int { return len(w) } +func (w Workflows) Swap(i, j int) { w[i], w[j] = w[j], w[i] } +func (w Workflows) Less(i, j int) bool { + iStart := w[i].ObjectMeta.CreationTimestamp + iFinish := w[i].Status.FinishedAt + jStart := w[j].ObjectMeta.CreationTimestamp + jFinish := w[j].Status.FinishedAt + if iFinish.IsZero() && jFinish.IsZero() { + return !iStart.Before(&jStart) + } + if iFinish.IsZero() && !jFinish.IsZero() { + return true + } + if !iFinish.IsZero() && jFinish.IsZero() { + return false + } + return jFinish.Before(&iFinish) +} + +type WorkflowPredicate = func(wf Workflow) bool + +func (w Workflows) Filter(predicate WorkflowPredicate) Workflows { + var out Workflows + for _, wf := range w { + if predicate(wf) { + out = append(out, wf) + } + } + return out +} + +// GetTTLStrategy return TTLStrategy based on Order of precedence: +// 1. Workflow, 2. WorkflowTemplate, 3. Workflowdefault +func (w *Workflow) GetTTLStrategy() *TTLStrategy { + var ttlStrategy *TTLStrategy + // TTLStrategy from WorkflowTemplate + if w.Status.StoredWorkflowSpec != nil && w.Status.StoredWorkflowSpec.GetTTLStrategy() != nil { + ttlStrategy = w.Status.StoredWorkflowSpec.GetTTLStrategy() + } + // TTLStrategy from Workflow + if w.Spec.GetTTLStrategy() != nil { + ttlStrategy = w.Spec.GetTTLStrategy() + } + return ttlStrategy +} + +func (w *Workflow) GetExecSpec() *WorkflowSpec { + if w.Status.StoredWorkflowSpec != nil { + return w.Status.StoredWorkflowSpec + } + return &w.Spec +} + +// return the ultimate ArtifactGCStrategy for the Artifact +// (defined on the Workflow level but can be overridden on the Artifact level) +func (w *Workflow) GetArtifactGCStrategy(a *Artifact) ArtifactGCStrategy { + artifactStrategy := a.GetArtifactGC().GetStrategy() + wfStrategy := w.Spec.GetArtifactGC().GetStrategy() + strategy := wfStrategy + if artifactStrategy != ArtifactGCStrategyUndefined { + strategy = artifactStrategy + } + if strategy == ArtifactGCStrategyUndefined { + return ArtifactGCNever + } + return strategy +} + +var ( + WorkflowCreatedAfter = func(t time.Time) WorkflowPredicate { + return func(wf Workflow) bool { + return wf.ObjectMeta.CreationTimestamp.After(t) + } + } + WorkflowFinishedBefore = func(t time.Time) WorkflowPredicate { + return func(wf Workflow) bool { + return !wf.Status.FinishedAt.IsZero() && wf.Status.FinishedAt.Time.Before(t) + } + } + WorkflowRanBetween = func(startTime time.Time, endTime time.Time) WorkflowPredicate { + return func(wf Workflow) bool { + return wf.ObjectMeta.CreationTimestamp.After(startTime) && !wf.Status.FinishedAt.IsZero() && wf.Status.FinishedAt.Time.Before(endTime) + } + } +) + +// WorkflowList is list of Workflow resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WorkflowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items Workflows `json:"items" protobuf:"bytes,2,opt,name=items"` +} + +var _ TemplateHolder = &Workflow{} + +// TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed +type TTLStrategy struct { + // SecondsAfterCompletion is the number of seconds to live after completion + SecondsAfterCompletion *int32 `json:"secondsAfterCompletion,omitempty" protobuf:"bytes,1,opt,name=secondsAfterCompletion"` + // SecondsAfterSuccess is the number of seconds to live after success + SecondsAfterSuccess *int32 `json:"secondsAfterSuccess,omitempty" protobuf:"bytes,2,opt,name=secondsAfterSuccess"` + // SecondsAfterFailure is the number of seconds to live after failure + SecondsAfterFailure *int32 `json:"secondsAfterFailure,omitempty" protobuf:"bytes,3,opt,name=secondsAfterFailure"` +} + +// WorkflowSpec is the specification of a Workflow. +type WorkflowSpec struct { + // Templates is a list of workflow templates used in a workflow + // +patchStrategy=merge + // +patchMergeKey=name + Templates []Template `json:"templates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,opt,name=templates"` + + // Entrypoint is a template reference to the starting point of the workflow. + Entrypoint string `json:"entrypoint,omitempty" protobuf:"bytes,2,opt,name=entrypoint"` + + // Arguments contain the parameters and artifacts sent to the workflow entrypoint + // Parameters are referencable globally using the 'workflow' variable prefix. + // e.g. {{workflow.parameters.myparam}} + Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"` + + // ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as. + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,4,opt,name=serviceAccountName"` + + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. + // ServiceAccountName of ExecutorConfig must be specified if this value is false. + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,28,opt,name=automountServiceAccountToken"` + + // Executor holds configurations of executor containers of the workflow. + Executor *ExecutorConfig `json:"executor,omitempty" protobuf:"bytes,29,opt,name=executor"` + + // Volumes is a list of volumes that can be mounted by containers in a workflow. + // +patchStrategy=merge + // +patchMergeKey=name + Volumes []apiv1.Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,5,opt,name=volumes"` + + // VolumeClaimTemplates is a list of claims that containers are allowed to reference. + // The Workflow controller will create the claims at the beginning of the workflow + // and delete the claims upon completion of the workflow + VolumeClaimTemplates []apiv1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,6,opt,name=volumeClaimTemplates"` + + // Parallelism limits the max total parallel pods that can execute at the same time in a workflow + Parallelism *int64 `json:"parallelism,omitempty" protobuf:"bytes,7,opt,name=parallelism"` + + // ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config. + ArtifactRepositoryRef *ArtifactRepositoryRef `json:"artifactRepositoryRef,omitempty" protobuf:"bytes,8,opt,name=artifactRepositoryRef"` + + // Suspend will suspend the workflow and prevent execution of any future steps in the workflow + Suspend *bool `json:"suspend,omitempty" protobuf:"bytes,9,opt,name=suspend"` + + // NodeSelector is a selector which will result in all pods of the workflow + // to be scheduled on the selected node(s). This is able to be overridden by + // a nodeSelector specified in the template. + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,10,opt,name=nodeSelector"` + + // Affinity sets the scheduling constraints for all pods in the workflow. + // Can be overridden by an affinity specified in the template + Affinity *apiv1.Affinity `json:"affinity,omitempty" protobuf:"bytes,11,opt,name=affinity"` + + // Tolerations to apply to workflow pods. + // +patchStrategy=merge + // +patchMergeKey=key + Tolerations []apiv1.Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,12,opt,name=tolerations"` + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + // +patchStrategy=merge + // +patchMergeKey=name + ImagePullSecrets []apiv1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,13,opt,name=imagePullSecrets"` + + // Host networking requested for this workflow pod. Default to false. + HostNetwork *bool `json:"hostNetwork,omitempty" protobuf:"bytes,14,opt,name=hostNetwork"` + + // Set DNS policy for workflow pods. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + DNSPolicy *apiv1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,15,opt,name=dnsPolicy"` + + // PodDNSConfig defines the DNS parameters of a pod in addition to + // those generated from DNSPolicy. + DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,16,opt,name=dnsConfig"` + + // OnExit is a template reference which is invoked at the end of the + // workflow, irrespective of the success, failure, or error of the + // primary workflow. + OnExit string `json:"onExit,omitempty" protobuf:"bytes,17,opt,name=onExit"` + + // TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it + // Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be + // deleted after the time to live expires. If this field is unset, + // the controller config map will hold the default values. + TTLStrategy *TTLStrategy `json:"ttlStrategy,omitempty" protobuf:"bytes,30,opt,name=ttlStrategy"` + + // Optional duration in seconds relative to the workflow start time which the workflow is + // allowed to run before the controller terminates the workflow. A value of zero is used to + // terminate a Running workflow + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"bytes,19,opt,name=activeDeadlineSeconds"` + + // Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first. + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,20,opt,name=priority"` + + // Set scheduler name for all pods. + // Will be overridden if container/script template's scheduler name is set. + // Default scheduler will be used if neither specified. + // +optional + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,21,opt,name=schedulerName"` + + // PodGC describes the strategy to use when deleting completed pods + PodGC *PodGC `json:"podGC,omitempty" protobuf:"bytes,22,opt,name=podGC"` + + // PriorityClassName to apply to workflow pods. + PodPriorityClassName string `json:"podPriorityClassName,omitempty" protobuf:"bytes,23,opt,name=podPriorityClassName"` + + // Priority to apply to workflow pods. + // DEPRECATED: Use PodPriorityClassName instead. + PodPriority *int32 `json:"podPriority,omitempty" protobuf:"bytes,24,opt,name=podPriority"` + + // +patchStrategy=merge + // +patchMergeKey=ip + HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,25,opt,name=hostAliases"` + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,26,opt,name=securityContext"` + + // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of + // container fields which are not strings (e.g. resource limits). + PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,27,opt,name=podSpecPatch"` + + // PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. + // Controller will automatically add the selector with workflow name, if selector is empty. + // Optional: Defaults to empty. + // +optional + PodDisruptionBudget *policyv1.PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty" protobuf:"bytes,31,opt,name=podDisruptionBudget"` + + // Metrics are a list of metrics emitted from this Workflow + Metrics *Metrics `json:"metrics,omitempty" protobuf:"bytes,32,opt,name=metrics"` + + // Shutdown will shutdown the workflow according to its ShutdownStrategy + Shutdown ShutdownStrategy `json:"shutdown,omitempty" protobuf:"bytes,33,opt,name=shutdown,casttype=ShutdownStrategy"` + + // WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution + WorkflowTemplateRef *WorkflowTemplateRef `json:"workflowTemplateRef,omitempty" protobuf:"bytes,34,opt,name=workflowTemplateRef"` + + // Synchronization holds synchronization lock configuration for this Workflow + Synchronization *Synchronization `json:"synchronization,omitempty" protobuf:"bytes,35,opt,name=synchronization,casttype=Synchronization"` + + // VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows + VolumeClaimGC *VolumeClaimGC `json:"volumeClaimGC,omitempty" protobuf:"bytes,36,opt,name=volumeClaimGC,casttype=VolumeClaimGC"` + + // RetryStrategy for all templates in the workflow. + RetryStrategy *RetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,37,opt,name=retryStrategy"` + + // PodMetadata defines additional metadata that should be applied to workflow pods + PodMetadata *Metadata `json:"podMetadata,omitempty" protobuf:"bytes,38,opt,name=podMetadata"` + + // TemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level + TemplateDefaults *Template `json:"templateDefaults,omitempty" protobuf:"bytes,39,opt,name=templateDefaults"` + + // ArchiveLogs indicates if the container logs should be archived + ArchiveLogs *bool `json:"archiveLogs,omitempty" protobuf:"varint,40,opt,name=archiveLogs"` + + // Hooks holds the lifecycle hook which is invoked at lifecycle of + // step, irrespective of the success, failure, or error status of the primary step + Hooks LifecycleHooks `json:"hooks,omitempty" protobuf:"bytes,41,opt,name=hooks"` + + // WorkflowMetadata contains some metadata of the workflow to refer to + WorkflowMetadata *WorkflowMetadata `json:"workflowMetadata,omitempty" protobuf:"bytes,42,opt,name=workflowMetadata"` + + // ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts + // unless Artifact.ArtifactGC is specified, which overrides this) + ArtifactGC *WorkflowLevelArtifactGC `json:"artifactGC,omitempty" protobuf:"bytes,43,opt,name=artifactGC"` +} + +type LabelValueFrom struct { + Expression string `json:"expression" protobuf:"bytes,1,opt,name=expression"` +} + +type WorkflowMetadata struct { + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,1,rep,name=labels"` + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"` + LabelsFrom map[string]LabelValueFrom `json:"labelsFrom,omitempty" protobuf:"bytes,3,rep,name=labelsFrom"` +} + +func (in *WorkflowMetadata) AsObjectMeta() *metav1.ObjectMeta { + return &metav1.ObjectMeta{Labels: in.Labels, Annotations: in.Annotations} +} + +func (wfs *WorkflowSpec) GetExitHook(args Arguments) *LifecycleHook { + if !wfs.HasExitHook() { + return nil + } + if wfs.OnExit != "" { + return &LifecycleHook{Template: wfs.OnExit, Arguments: args} + } + return wfs.Hooks.GetExitHook().WithArgs(args) +} + +func (wfs *WorkflowSpec) HasExitHook() bool { + return (wfs.Hooks != nil && wfs.Hooks.HasExitHook()) || wfs.OnExit != "" +} + +// GetVolumeClaimGC returns the VolumeClaimGC that was defined in the workflow spec. If none was provided, a default value is returned. +func (wfs WorkflowSpec) GetVolumeClaimGC() *VolumeClaimGC { + // If no volumeClaimGC strategy was provided, we default to the equivalent of "OnSuccess" + // to match the existing behavior for back-compat + if wfs.VolumeClaimGC == nil { + return &VolumeClaimGC{Strategy: VolumeClaimGCOnSuccess} + } + + return wfs.VolumeClaimGC +} + +// ArtifactGC returns the ArtifactGC that was defined in the workflow spec. If none was provided, a default value is returned. +func (wfs WorkflowSpec) GetArtifactGC() *ArtifactGC { + if wfs.ArtifactGC == nil { + return &ArtifactGC{Strategy: ArtifactGCStrategyUndefined} + } + + return &wfs.ArtifactGC.ArtifactGC +} + +func (wfs WorkflowSpec) GetTTLStrategy() *TTLStrategy { + return wfs.TTLStrategy +} + +// GetSemaphoreKeys will return list of semaphore configmap keys which are configured in the workflow +// Example key format namespace/configmapname (argo/my-config) +// Return []string +func (wf *Workflow) GetSemaphoreKeys() []string { + keyMap := make(map[string]bool) + namespace := wf.Namespace + var templates []Template + if wf.Spec.WorkflowTemplateRef == nil { + templates = wf.Spec.Templates + if wf.Spec.Synchronization != nil { + for _, configMapRef := range wf.Spec.Synchronization.getSemaphoreConfigMapRefs() { + key := fmt.Sprintf("%s/%s", namespace, configMapRef.Name) + keyMap[key] = true + } + } + } else if wf.Status.StoredWorkflowSpec != nil { + templates = wf.Status.StoredWorkflowSpec.Templates + if wf.Status.StoredWorkflowSpec.Synchronization != nil { + for _, configMapRef := range wf.Status.StoredWorkflowSpec.Synchronization.getSemaphoreConfigMapRefs() { + key := fmt.Sprintf("%s/%s", namespace, configMapRef.Name) + keyMap[key] = true + } + } + } + + for _, tmpl := range templates { + if tmpl.Synchronization != nil { + for _, configMapRef := range tmpl.Synchronization.getSemaphoreConfigMapRefs() { + key := fmt.Sprintf("%s/%s", namespace, configMapRef.Name) + keyMap[key] = true + } + } + } + var semaphoreKeys []string + for key := range keyMap { + semaphoreKeys = append(semaphoreKeys, key) + } + return semaphoreKeys +} + +type ShutdownStrategy string + +const ( + ShutdownStrategyTerminate ShutdownStrategy = "Terminate" + ShutdownStrategyStop ShutdownStrategy = "Stop" + ShutdownStrategyNone ShutdownStrategy = "" +) + +func (s ShutdownStrategy) Enabled() bool { + return s != ShutdownStrategyNone +} + +func (s ShutdownStrategy) ShouldExecute(isOnExitPod bool) bool { + switch s { + case ShutdownStrategyTerminate: + return false + case ShutdownStrategyStop: + return isOnExitPod + default: + return true + } +} + +// +kubebuilder:validation:Type=array +type ParallelSteps struct { + Steps []WorkflowStep `json:"-" protobuf:"bytes,1,rep,name=steps"` +} + +// WorkflowStep is an anonymous list inside of ParallelSteps (i.e. it does not have a key), so it needs its own +// custom Unmarshaller +func (p *ParallelSteps) UnmarshalJSON(value []byte) error { + // Since we are writing a custom unmarshaller, we have to enforce the "DisallowUnknownFields" requirement manually. + + // First, get a generic representation of the contents + var candidate []map[string]interface{} + err := json.Unmarshal(value, &candidate) + if err != nil { + return err + } + + // Generate a list of all the available JSON fields of the WorkflowStep struct + availableFields := map[string]bool{} + reflectType := reflect.TypeOf(WorkflowStep{}) + for i := 0; i < reflectType.NumField(); i++ { + cleanString := strings.ReplaceAll(reflectType.Field(i).Tag.Get("json"), ",omitempty", "") + availableFields[cleanString] = true + } + + // Enforce that no unknown fields are present + for _, step := range candidate { + for key := range step { + if _, ok := availableFields[key]; !ok { + return fmt.Errorf(`json: unknown field "%s"`, key) + } + } + } + + // Finally, attempt to fully unmarshal the struct + err = json.Unmarshal(value, &p.Steps) + if err != nil { + return err + } + return nil +} + +func (p ParallelSteps) MarshalJSON() ([]byte, error) { + return json.Marshal(p.Steps) +} + +func (b ParallelSteps) OpenAPISchemaType() []string { + return []string{"array"} +} + +func (b ParallelSteps) OpenAPISchemaFormat() string { return "" } + +func (wfs *WorkflowSpec) HasPodSpecPatch() bool { + return wfs.PodSpecPatch != "" +} + +// Template is a reusable and composable unit of execution in a workflow +type Template struct { + // Name is the name of the template + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // Inputs describe what inputs parameters and artifacts are supplied to this template + Inputs Inputs `json:"inputs,omitempty" protobuf:"bytes,5,opt,name=inputs"` + + // Outputs describe the parameters and artifacts that this template produces + Outputs Outputs `json:"outputs,omitempty" protobuf:"bytes,6,opt,name=outputs"` + + // NodeSelector is a selector to schedule this step of the workflow to be + // run on the selected node(s). Overrides the selector set at the workflow level. + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,opt,name=nodeSelector"` + + // Affinity sets the pod's scheduling constraints + // Overrides the affinity set at the workflow level (if any) + Affinity *apiv1.Affinity `json:"affinity,omitempty" protobuf:"bytes,8,opt,name=affinity"` + + // Metdata sets the pods's metadata, i.e. annotations and labels + Metadata Metadata `json:"metadata,omitempty" protobuf:"bytes,9,opt,name=metadata"` + + // Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness + Daemon *bool `json:"daemon,omitempty" protobuf:"bytes,10,opt,name=daemon"` + + // Steps define a series of sequential/parallel workflow steps + Steps []ParallelSteps `json:"steps,omitempty" protobuf:"bytes,11,opt,name=steps"` + + // Container is the main container image to run in the pod + Container *apiv1.Container `json:"container,omitempty" protobuf:"bytes,12,opt,name=container"` + + // ContainerSet groups multiple containers within a single pod. + ContainerSet *ContainerSetTemplate `json:"containerSet,omitempty" protobuf:"bytes,40,opt,name=containerSet"` + + // Script runs a portion of code against an interpreter + Script *ScriptTemplate `json:"script,omitempty" protobuf:"bytes,13,opt,name=script"` + + // Resource template subtype which can run k8s resources + Resource *ResourceTemplate `json:"resource,omitempty" protobuf:"bytes,14,opt,name=resource"` + + // DAG template subtype which runs a DAG + DAG *DAGTemplate `json:"dag,omitempty" protobuf:"bytes,15,opt,name=dag"` + + // Suspend template subtype which can suspend a workflow when reaching the step + Suspend *SuspendTemplate `json:"suspend,omitempty" protobuf:"bytes,16,opt,name=suspend"` + + // Data is a data template + Data *Data `json:"data,omitempty" protobuf:"bytes,39,opt,name=data"` + + // HTTP makes a HTTP request + HTTP *HTTP `json:"http,omitempty" protobuf:"bytes,42,opt,name=http"` + + // Plugin is a plugin template + Plugin *Plugin `json:"plugin,omitempty" protobuf:"bytes,43,opt,name=plugin"` + + // Volumes is a list of volumes that can be mounted by containers in a template. + // +patchStrategy=merge + // +patchMergeKey=name + Volumes []apiv1.Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,17,opt,name=volumes"` + + // InitContainers is a list of containers which run before the main container. + // +patchStrategy=merge + // +patchMergeKey=name + InitContainers []UserContainer `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,18,opt,name=initContainers"` + + // Sidecars is a list of containers which run alongside the main container + // Sidecars are automatically killed when the main container completes + // +patchStrategy=merge + // +patchMergeKey=name + Sidecars []UserContainer `json:"sidecars,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,19,opt,name=sidecars"` + + // Location in which all files related to the step will be stored (logs, artifacts, etc...). + // Can be overridden by individual items in Outputs. If omitted, will use the default + // artifact repository location configured in the controller, appended with the + // / in the key. + ArchiveLocation *ArtifactLocation `json:"archiveLocation,omitempty" protobuf:"bytes,20,opt,name=archiveLocation"` + + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // This field is only applicable to container and script templates. + ActiveDeadlineSeconds *intstr.IntOrString `json:"activeDeadlineSeconds,omitempty" protobuf:"bytes,21,opt,name=activeDeadlineSeconds"` + + // RetryStrategy describes how to retry a template when it fails + RetryStrategy *RetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,22,opt,name=retryStrategy"` + + // Parallelism limits the max total parallel pods that can execute at the same time within the + // boundaries of this template invocation. If additional steps/dag templates are invoked, the + // pods created by those templates will not be counted towards this total. + Parallelism *int64 `json:"parallelism,omitempty" protobuf:"bytes,23,opt,name=parallelism"` + + // FailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this + // template is expanded with `withItems`, etc. + FailFast *bool `json:"failFast,omitempty" protobuf:"varint,41,opt,name=failFast"` + + // Tolerations to apply to workflow pods. + // +patchStrategy=merge + // +patchMergeKey=key + Tolerations []apiv1.Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,24,opt,name=tolerations"` + + // If specified, the pod will be dispatched by specified scheduler. + // Or it will be dispatched by workflow scope scheduler if specified. + // If neither specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,25,opt,name=schedulerName"` + + // PriorityClassName to apply to workflow pods. + PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,26,opt,name=priorityClassName"` + + // Priority to apply to workflow pods. + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,27,opt,name=priority"` + + // ServiceAccountName to apply to workflow pods + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,28,opt,name=serviceAccountName"` + + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. + // ServiceAccountName of ExecutorConfig must be specified if this value is false. + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,32,opt,name=automountServiceAccountToken"` + + // Executor holds configurations of the executor container. + Executor *ExecutorConfig `json:"executor,omitempty" protobuf:"bytes,33,opt,name=executor"` + + // HostAliases is an optional list of hosts and IPs that will be injected into the pod spec + // +patchStrategy=merge + // +patchMergeKey=ip + HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,29,opt,name=hostAliases"` + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,30,opt,name=securityContext"` + + // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of + // container fields which are not strings (e.g. resource limits). + PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,31,opt,name=podSpecPatch"` + + // Metrics are a list of metrics emitted from this template + Metrics *Metrics `json:"metrics,omitempty" protobuf:"bytes,35,opt,name=metrics"` + + // Synchronization holds synchronization lock configuration for this template + Synchronization *Synchronization `json:"synchronization,omitempty" protobuf:"bytes,36,opt,name=synchronization,casttype=Synchronization"` + + // Memoize allows templates to use outputs generated from already executed templates + Memoize *Memoize `json:"memoize,omitempty" protobuf:"bytes,37,opt,name=memoize"` + + // Timeout allows to set the total node execution timeout duration counting from the node's start time. + // This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates. + Timeout string `json:"timeout,omitempty" protobuf:"bytes,38,opt,name=timeout"` +} + +// SetType will set the template object based on template type. +func (tmpl *Template) SetType(tmplType TemplateType) { + switch tmplType { + case TemplateTypeSteps: + tmpl.setTemplateObjs(tmpl.Steps, nil, nil, nil, nil, nil, nil) + case TemplateTypeDAG: + tmpl.setTemplateObjs(nil, tmpl.DAG, nil, nil, nil, nil, nil) + case TemplateTypeContainer: + tmpl.setTemplateObjs(nil, nil, tmpl.Container, nil, nil, nil, nil) + case TemplateTypeScript: + tmpl.setTemplateObjs(nil, nil, nil, tmpl.Script, nil, nil, nil) + case TemplateTypeResource: + tmpl.setTemplateObjs(nil, nil, nil, nil, tmpl.Resource, nil, nil) + case TemplateTypeData: + tmpl.setTemplateObjs(nil, nil, nil, nil, nil, tmpl.Data, nil) + case TemplateTypeSuspend: + tmpl.setTemplateObjs(nil, nil, nil, nil, nil, nil, tmpl.Suspend) + } +} + +func (tmpl *Template) setTemplateObjs(steps []ParallelSteps, dag *DAGTemplate, container *apiv1.Container, script *ScriptTemplate, resource *ResourceTemplate, data *Data, suspend *SuspendTemplate) { + tmpl.Steps = steps + tmpl.DAG = dag + tmpl.Container = container + tmpl.Script = script + tmpl.Resource = resource + tmpl.Data = data + tmpl.Suspend = suspend +} + +// GetBaseTemplate returns a base template content. +func (tmpl *Template) GetBaseTemplate() *Template { + baseTemplate := tmpl.DeepCopy() + baseTemplate.Inputs = Inputs{} + return baseTemplate +} + +func (tmpl *Template) HasPodSpecPatch() bool { + return tmpl.PodSpecPatch != "" +} + +func (tmpl *Template) GetSidecarNames() []string { + var containerNames []string + for _, s := range tmpl.Sidecars { + containerNames = append(containerNames, s.Name) + } + return containerNames +} + +func (tmpl *Template) IsFailFast() bool { + return tmpl.FailFast != nil && *tmpl.FailFast +} + +func (tmpl *Template) HasParallelism() bool { + return tmpl.Parallelism != nil && *tmpl.Parallelism > 0 +} + +func (tmpl *Template) GetOutputs() *Outputs { + if tmpl != nil { + return &tmpl.Outputs + } + return nil +} + +type Artifacts []Artifact + +func (a Artifacts) GetArtifactByName(name string) *Artifact { + for _, art := range a { + if art.Name == name { + return &art + } + } + return nil +} + +// Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another +type Inputs struct { + // Parameters are a list of parameters passed as inputs + // +patchStrategy=merge + // +patchMergeKey=name + Parameters []Parameter `json:"parameters,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,opt,name=parameters"` + + // Artifact are a list of artifacts passed as inputs + // +patchStrategy=merge + // +patchMergeKey=name + Artifacts Artifacts `json:"artifacts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,opt,name=artifacts"` +} + +func (in Inputs) IsEmpty() bool { + return len(in.Parameters) == 0 && len(in.Artifacts) == 0 +} + +// Pod metdata +type Metadata struct { + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,opt,name=annotations"` + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,2,opt,name=labels"` +} + +// Parameter indicate a passed string parameter to a service template with an optional default value +type Parameter struct { + // Name is the parameter name + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Default is the default value to use for an input parameter if a value was not supplied + Default *AnyString `json:"default,omitempty" protobuf:"bytes,2,opt,name=default"` + + // Value is the literal value to use for the parameter. + // If specified in the context of an input parameter, the value takes precedence over any passed values + Value *AnyString `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` + + // ValueFrom is the source for the output parameter's value + ValueFrom *ValueFrom `json:"valueFrom,omitempty" protobuf:"bytes,4,opt,name=valueFrom"` + + // GlobalName exports an output parameter to the global scope, making it available as + // '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters + GlobalName string `json:"globalName,omitempty" protobuf:"bytes,5,opt,name=globalName"` + + // Enum holds a list of string values to choose from, for the actual value of the parameter + Enum []AnyString `json:"enum,omitempty" protobuf:"bytes,6,rep,name=enum"` + + // Description is the parameter description + Description *AnyString `json:"description,omitempty" protobuf:"bytes,7,opt,name=description"` +} + +// ValueFrom describes a location in which to obtain the value to a parameter +type ValueFrom struct { + // Path in the container to retrieve an output parameter value from in container templates + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + + // JSONPath of a resource to retrieve an output parameter value from in resource templates + JSONPath string `json:"jsonPath,omitempty" protobuf:"bytes,2,opt,name=jsonPath"` + + // JQFilter expression against the resource object in resource templates + JQFilter string `json:"jqFilter,omitempty" protobuf:"bytes,3,opt,name=jqFilter"` + + // Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message` + Event string `json:"event,omitempty" protobuf:"bytes,7,opt,name=event"` + + // Parameter reference to a step or dag task in which to retrieve an output parameter value from + // (e.g. '{{steps.mystep.outputs.myparam}}') + Parameter string `json:"parameter,omitempty" protobuf:"bytes,4,opt,name=parameter"` + + // Supplied value to be filled in directly, either through the CLI, API, etc. + Supplied *SuppliedValueFrom `json:"supplied,omitempty" protobuf:"bytes,6,opt,name=supplied"` + + // ConfigMapKeyRef is configmap selector for input parameter configuration + ConfigMapKeyRef *apiv1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,9,opt,name=configMapKeyRef"` + + // Default specifies a value to be used if retrieving the value from the specified source fails + Default *AnyString `json:"default,omitempty" protobuf:"bytes,5,opt,name=default"` + + // Expression, if defined, is evaluated to specify the value for the parameter + Expression string `json:"expression,omitempty" protobuf:"bytes,8,rep,name=expression"` +} + +func (p *Parameter) HasValue() bool { + return p.Value != nil || p.Default != nil || p.ValueFrom != nil +} + +func (p *Parameter) GetValue() string { + if p.Value != nil { + return p.Value.String() + } + if p.Default != nil { + return p.Default.String() + } + return "" +} + +// SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc. +type SuppliedValueFrom struct{} + +// Artifact indicates an artifact to place at a specified path +type Artifact struct { + // name of the artifact. must be unique within a template's inputs/outputs. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Path is the container path to the artifact + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + + // mode bits to use on this file, must be a value between 0 and 0777 + // set when loading input artifacts. + Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` + + // From allows an artifact to reference an artifact from a previous step + From string `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"` + + // ArtifactLocation contains the location of the artifact + ArtifactLocation `json:",inline" protobuf:"bytes,5,opt,name=artifactLocation"` + + // GlobalName exports an output artifact to the global scope, making it available as + // '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts + GlobalName string `json:"globalName,omitempty" protobuf:"bytes,6,opt,name=globalName"` + + // Archive controls how the artifact will be saved to the artifact repository. + Archive *ArchiveStrategy `json:"archive,omitempty" protobuf:"bytes,7,opt,name=archive"` + + // Make Artifacts optional, if Artifacts doesn't generate or exist + Optional bool `json:"optional,omitempty" protobuf:"varint,8,opt,name=optional"` + + // SubPath allows an artifact to be sourced from a subpath within the specified source + SubPath string `json:"subPath,omitempty" protobuf:"bytes,9,opt,name=subPath"` + + // If mode is set, apply the permission recursively into the artifact if it is a folder + RecurseMode bool `json:"recurseMode,omitempty" protobuf:"varint,10,opt,name=recurseMode"` + + // FromExpression, if defined, is evaluated to specify the value for the artifact + FromExpression string `json:"fromExpression,omitempty" protobuf:"bytes,11,opt,name=fromExpression"` + + // ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows + ArtifactGC *ArtifactGC `json:"artifactGC,omitempty" protobuf:"bytes,12,opt,name=artifactGC"` + + // Has this been deleted? + Deleted bool `json:"deleted,omitempty" protobuf:"varint,13,opt,name=deleted"` +} + +// ArtifactGC returns the ArtifactGC that was defined by the artifact. If none was provided, a default value is returned. +func (a *Artifact) GetArtifactGC() *ArtifactGC { + if a.ArtifactGC == nil { + return &ArtifactGC{Strategy: ArtifactGCStrategyUndefined} + } + + return a.ArtifactGC +} + +// CleanPath validates and cleans the artifact path. +func (a *Artifact) CleanPath() error { + if a.Path == "" { + return argoerrs.InternalErrorf("Artifact '%s' did not specify a path", a.Name) + } + + // ensure path is separated by filepath.Separator (aka os.PathSeparator). + // This ensures e.g. on windows /foo/bar is translated to \foo\bar first - otherwise the regexps below wouldn't trigger. + path := filepath.FromSlash(a.Path) + + // Ensure that the artifact path does not use directory traversal to escape a + // "safe" sub-directory, assuming malicious user input is present. For example: + // inputs: + // artifacts: + // - name: a1 + // path: /tmp/safe/{{ inputs.parameters.user-input }} + // + // Any resolved path should always be within the /tmp/safe/ directory. + safeDir := "" + slashDotDotRe := regexp.MustCompile(fmt.Sprintf(`%c..$`, os.PathSeparator)) + if runtime.GOOS == "windows" { + // windows PathSeparator is \ and needs escaping + slashDotDotRe = regexp.MustCompile(fmt.Sprintf(`\%c..$`, os.PathSeparator)) + } + + slashDotDotSlash := fmt.Sprintf(`%c..%c`, os.PathSeparator, os.PathSeparator) + if strings.Contains(path, slashDotDotSlash) { + safeDir = path[:strings.Index(path, slashDotDotSlash)] + } else if slashDotDotRe.FindStringIndex(path) != nil { + safeDir = path[:len(path)-3] + } + cleaned := filepath.Clean(path) + safeDirWithSlash := fmt.Sprintf(`%s%c`, safeDir, os.PathSeparator) + if len(safeDir) > 0 && (!strings.HasPrefix(cleaned, safeDirWithSlash) || len(cleaned) <= len(safeDirWithSlash)) { + return argoerrs.InternalErrorf("Artifact '%s' attempted to use a path containing '..'. Directory traversal is not permitted", a.Name) + } + a.Path = cleaned + return nil +} + +// PodGC describes how to delete completed pods as they complete +type PodGC struct { + // Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess". If unset, does not delete Pods + Strategy PodGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=PodGCStrategy"` + // LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue. + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,2,opt,name=labelSelector"` + // DeleteDelayDuration specifies the duration before pods in the GC queue get deleted. + DeleteDelayDuration string `json:"deleteDelayDuration,omitempty" protobuf:"bytes,3,opt,name=deleteDelayDuration"` +} + +// GetLabelSelector gets the label selector from podGC. +func (podGC *PodGC) GetLabelSelector() (labels.Selector, error) { + if podGC == nil { + return labels.Nothing(), nil + } + if podGC.LabelSelector == nil { + return labels.Everything(), nil + } + return metav1.LabelSelectorAsSelector(podGC.LabelSelector) +} + +func (podGC *PodGC) GetStrategy() PodGCStrategy { + if podGC != nil { + return podGC.Strategy + } + return PodGCOnPodNone +} + +func (podGC *PodGC) GetDeleteDelayDuration() (time.Duration, error) { + if podGC == nil || podGC.DeleteDelayDuration == "" { + return -1, nil // negative return means the field was omitted + } + return ParseStringToDuration(podGC.DeleteDelayDuration) +} + +// WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level +type WorkflowLevelArtifactGC struct { + // ArtifactGC is an embedded struct + ArtifactGC `json:",inline" protobuf:"bytes,1,opt,name=artifactGC"` + + // ForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails + ForceFinalizerRemoval bool `json:"forceFinalizerRemoval,omitempty" protobuf:"bytes,2,opt,name=forceFinalizerRemoval"` + + // PodSpecPatch holds strategic merge patch to apply against the artgc pod spec. + PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,3,opt,name=podSpecPatch"` +} + +// ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed +type ArtifactGC struct { + // Strategy is the strategy to use. + // +kubebuilder:validation:Enum="";OnWorkflowCompletion;OnWorkflowDeletion;Never + Strategy ArtifactGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=ArtifactGCStategy"` + + // PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion + PodMetadata *Metadata `json:"podMetadata,omitempty" protobuf:"bytes,2,opt,name=podMetadata"` + + // ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,3,opt,name=serviceAccountName"` +} + +// GetStrategy returns the VolumeClaimGCStrategy to use for the workflow +func (agc *ArtifactGC) GetStrategy() ArtifactGCStrategy { + if agc != nil { + return agc.Strategy + } + return ArtifactGCStrategyUndefined +} + +// VolumeClaimGC describes how to delete volumes from completed Workflows +type VolumeClaimGC struct { + // Strategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess". Defaults to "OnWorkflowSuccess" + Strategy VolumeClaimGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=VolumeClaimGCStrategy"` +} + +// GetStrategy returns the VolumeClaimGCStrategy to use for the workflow +func (vgc VolumeClaimGC) GetStrategy() VolumeClaimGCStrategy { + if vgc.Strategy == "" { + return VolumeClaimGCOnSuccess + } + + return vgc.Strategy +} + +// ArchiveStrategy describes how to archive files/directory when saving artifacts +type ArchiveStrategy struct { + Tar *TarStrategy `json:"tar,omitempty" protobuf:"bytes,1,opt,name=tar"` + None *NoneStrategy `json:"none,omitempty" protobuf:"bytes,2,opt,name=none"` + Zip *ZipStrategy `json:"zip,omitempty" protobuf:"bytes,3,opt,name=zip"` +} + +// TarStrategy will tar and gzip the file or directory when saving +type TarStrategy struct { + // CompressionLevel specifies the gzip compression level to use for the artifact. + // Defaults to gzip.DefaultCompression. + CompressionLevel *int32 `json:"compressionLevel,omitempty" protobuf:"varint,1,opt,name=compressionLevel"` +} + +// ZipStrategy will unzip zipped input artifacts +type ZipStrategy struct{} + +// NoneStrategy indicates to skip tar process and upload the files or directory tree as independent +// files. Note that if the artifact is a directory, the artifact driver must support the ability to +// save/load the directory appropriately. +type NoneStrategy struct{} + +type ArtifactLocationType interface { + HasLocation() bool + GetKey() (string, error) + SetKey(key string) error +} + +// ArtifactLocation describes a location for a single or multiple artifacts. +// It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). +// It is also used to describe the location of multiple artifacts such as the archive location +// of a single workflow step, which the executor will use as a default location to store its files. +type ArtifactLocation struct { + // ArchiveLogs indicates if the container logs should be archived + ArchiveLogs *bool `json:"archiveLogs,omitempty" protobuf:"varint,1,opt,name=archiveLogs"` + + // S3 contains S3 artifact location details + S3 *S3Artifact `json:"s3,omitempty" protobuf:"bytes,2,opt,name=s3"` + + // Git contains git artifact location details + Git *GitArtifact `json:"git,omitempty" protobuf:"bytes,3,opt,name=git"` + + // HTTP contains HTTP artifact location details + HTTP *HTTPArtifact `json:"http,omitempty" protobuf:"bytes,4,opt,name=http"` + + // Artifactory contains artifactory artifact location details + Artifactory *ArtifactoryArtifact `json:"artifactory,omitempty" protobuf:"bytes,5,opt,name=artifactory"` + + // HDFS contains HDFS artifact location details + HDFS *HDFSArtifact `json:"hdfs,omitempty" protobuf:"bytes,6,opt,name=hdfs"` + + // Raw contains raw artifact location details + Raw *RawArtifact `json:"raw,omitempty" protobuf:"bytes,7,opt,name=raw"` + + // OSS contains OSS artifact location details + OSS *OSSArtifact `json:"oss,omitempty" protobuf:"bytes,8,opt,name=oss"` + + // GCS contains GCS artifact location details + GCS *GCSArtifact `json:"gcs,omitempty" protobuf:"bytes,9,opt,name=gcs"` + + // Azure contains Azure Storage artifact location details + Azure *AzureArtifact `json:"azure,omitempty" protobuf:"bytes,10,opt,name=azure"` +} + +func (a *ArtifactLocation) Get() (ArtifactLocationType, error) { + if a == nil { + return nil, fmt.Errorf("key unsupported: cannot get key for artifact location, because it is invalid") + } else if a.Artifactory != nil { + return a.Artifactory, nil + } else if a.Azure != nil { + return a.Azure, nil + } else if a.Git != nil { + return a.Git, nil + } else if a.GCS != nil { + return a.GCS, nil + } else if a.HDFS != nil { + return a.HDFS, nil + } else if a.HTTP != nil { + return a.HTTP, nil + } else if a.OSS != nil { + return a.OSS, nil + } else if a.Raw != nil { + return a.Raw, nil + } else if a.S3 != nil { + return a.S3, nil + } + return nil, fmt.Errorf("You need to configure artifact storage. More information on how to do this can be found in the docs: https://argo-workflows.readthedocs.io/en/latest/configure-artifact-repository/") +} + +// SetType sets the type of the artifact to type the argument. +// Any existing value is deleted. +func (a *ArtifactLocation) SetType(x ArtifactLocationType) error { + switch v := x.(type) { + case *ArtifactoryArtifact: + a.Artifactory = &ArtifactoryArtifact{} + case *AzureArtifact: + a.Azure = &AzureArtifact{} + case *GCSArtifact: + a.GCS = &GCSArtifact{} + case *HDFSArtifact: + a.HDFS = &HDFSArtifact{} + case *HTTPArtifact: + a.HTTP = &HTTPArtifact{} + case *OSSArtifact: + a.OSS = &OSSArtifact{} + case *RawArtifact: + a.Raw = &RawArtifact{} + case *S3Artifact: + a.S3 = &S3Artifact{} + default: + return fmt.Errorf("set type not supported for type: %v", reflect.TypeOf(v)) + } + return nil +} + +func (a *ArtifactLocation) HasLocationOrKey() bool { + return a.HasLocation() || a.HasKey() +} + +// HasKey returns whether or not an artifact has a key. They may or may not also HasLocation. +func (a *ArtifactLocation) HasKey() bool { + key, _ := a.GetKey() + return key != "" +} + +// set the key to a new value, use path.Join to combine items +func (a *ArtifactLocation) SetKey(key string) error { + v, err := a.Get() + if err != nil { + return err + } + return v.SetKey(key) +} + +func (a *ArtifactLocation) AppendToKey(x string) error { + key, err := a.GetKey() + if err != nil { + return err + } + return a.SetKey(path.Join(key, x)) +} + +// Relocate copies all location info from the parameter, except the key. +// But only if it does not have a location already. +func (a *ArtifactLocation) Relocate(l *ArtifactLocation) error { + if a.HasLocation() { + return nil + } + if l == nil { + return fmt.Errorf("template artifact location not set") + } + key, err := a.GetKey() + if err != nil { + return err + } + *a = *l.DeepCopy() + return a.SetKey(key) +} + +// HasLocation whether or not an artifact has a *full* location defined +// An artifact that has a location implicitly has a key (i.e. HasKey() == true). +func (a *ArtifactLocation) HasLocation() bool { + v, err := a.Get() + return err == nil && v.HasLocation() +} + +func (a *ArtifactLocation) IsArchiveLogs() bool { + return a != nil && a.ArchiveLogs != nil && *a.ArchiveLogs +} + +func (a *ArtifactLocation) GetKey() (string, error) { + v, err := a.Get() + if err != nil { + return "", err + } + return v.GetKey() +} + +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ArtifactRepositoryRef struct { + // The name of the config map. Defaults to "artifact-repositories". + ConfigMap string `json:"configMap,omitempty" protobuf:"bytes,1,opt,name=configMap"` + // The config map key. Defaults to the value of the "workflows.argoproj.io/default-artifact-repository" annotation. + Key string `json:"key,omitempty" protobuf:"bytes,2,opt,name=key"` +} + +func (r *ArtifactRepositoryRef) GetConfigMapOr(configMap string) string { + if r == nil || r.ConfigMap == "" { + return configMap + } + return r.ConfigMap +} + +func (r *ArtifactRepositoryRef) GetKeyOr(key string) string { + if r == nil || r.Key == "" { + return key + } + return r.Key +} + +func (r *ArtifactRepositoryRef) String() string { + if r == nil { + return "nil" + } + return fmt.Sprintf("%s#%s", r.ConfigMap, r.Key) +} + +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ArtifactRepositoryRefStatus struct { + ArtifactRepositoryRef `json:",inline" protobuf:"bytes,1,opt,name=artifactRepositoryRef"` + // The namespace of the config map. Defaults to the workflow's namespace, or the controller's namespace (if found). + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` + // If this ref represents the default artifact repository, rather than a config map. + Default bool `json:"default,omitempty" protobuf:"varint,3,opt,name=default"` + // The repository the workflow will use. This maybe empty before v3.1. + ArtifactRepository *ArtifactRepository `json:"artifactRepository,omitempty" protobuf:"bytes,4,opt,name=artifactRepository"` +} + +func (r *ArtifactRepositoryRefStatus) String() string { + if r == nil { + return "nil" + } + if r.Default { + return "default-artifact-repository" + } + return fmt.Sprintf("%s/%s", r.Namespace, r.ArtifactRepositoryRef.String()) +} + +type ArtifactSearchQuery struct { + ArtifactGCStrategies map[ArtifactGCStrategy]bool `json:"artifactGCStrategies,omitempty" protobuf:"bytes,1,rep,name=artifactGCStrategies,castkey=ArtifactGCStrategy"` + ArtifactName string `json:"artifactName,omitempty" protobuf:"bytes,2,rep,name=artifactName"` + TemplateName string `json:"templateName,omitempty" protobuf:"bytes,3,rep,name=templateName"` + NodeId string `json:"nodeId,omitempty" protobuf:"bytes,4,rep,name=nodeId"` + Deleted *bool `json:"deleted,omitempty" protobuf:"varint,5,opt,name=deleted"` + NodeTypes map[NodeType]bool `json:"nodeTypes,omitempty" protobuf:"bytes,6,opt,name=nodeTypes"` +} + +// ArtGCStatus maintains state related to ArtifactGC +type ArtGCStatus struct { + + // have Pods been started to perform this strategy? (enables us not to re-process what we've already done) + StrategiesProcessed map[ArtifactGCStrategy]bool `json:"strategiesProcessed,omitempty" protobuf:"bytes,1,opt,name=strategiesProcessed"` + + // have completed Pods been processed? (mapped by Pod name) + // used to prevent re-processing the Status of a Pod more than once + PodsRecouped map[string]bool `json:"podsRecouped,omitempty" protobuf:"bytes,2,opt,name=podsRecouped"` + + // if this is true, we already checked to see if we need to do it and we don't + NotSpecified bool `json:"notSpecified,omitempty" protobuf:"varint,3,opt,name=notSpecified"` +} + +func (gcStatus *ArtGCStatus) SetArtifactGCStrategyProcessed(strategy ArtifactGCStrategy, processed bool) { + if gcStatus.StrategiesProcessed == nil { + gcStatus.StrategiesProcessed = make(map[ArtifactGCStrategy]bool) + } + gcStatus.StrategiesProcessed[strategy] = processed +} + +func (gcStatus *ArtGCStatus) IsArtifactGCStrategyProcessed(strategy ArtifactGCStrategy) bool { + if gcStatus.StrategiesProcessed != nil { + processed := gcStatus.StrategiesProcessed[strategy] + return processed + } + return false +} + +func (gcStatus *ArtGCStatus) SetArtifactGCPodRecouped(podName string, recouped bool) { + if gcStatus.PodsRecouped == nil { + gcStatus.PodsRecouped = make(map[string]bool) + } + gcStatus.PodsRecouped[podName] = recouped +} + +func (gcStatus *ArtGCStatus) IsArtifactGCPodRecouped(podName string) bool { + if gcStatus.PodsRecouped != nil { + recouped := gcStatus.PodsRecouped[podName] + return recouped + } + return false +} +func (gcStatus *ArtGCStatus) AllArtifactGCPodsRecouped() bool { + if gcStatus.PodsRecouped == nil { + return false + } + for _, recouped := range gcStatus.PodsRecouped { + if !recouped { + return false + } + } + return true +} + +type ArtifactSearchResult struct { + Artifact `protobuf:"bytes,1,opt,name=artifact"` + NodeID string `protobuf:"bytes,2,opt,name=nodeID"` +} + +type ArtifactSearchResults []ArtifactSearchResult + +func (asr ArtifactSearchResults) GetArtifacts() []Artifact { + artifacts := make([]Artifact, len(asr)) + for i, result := range asr { + artifacts[i] = result.Artifact + } + return artifacts +} + +func NewArtifactSearchQuery() *ArtifactSearchQuery { + var q ArtifactSearchQuery + q.ArtifactGCStrategies = make(map[ArtifactGCStrategy]bool) + return &q +} + +func (q *ArtifactSearchQuery) anyArtifactGCStrategy() bool { + for _, val := range q.ArtifactGCStrategies { + if val { + return val + } + } + return false +} + +func (w *Workflow) SearchArtifacts(q *ArtifactSearchQuery) ArtifactSearchResults { + + var results ArtifactSearchResults + + for _, n := range w.Status.Nodes { + if q.TemplateName != "" && n.TemplateName != q.TemplateName { + continue + } + if q.NodeId != "" && n.ID != q.NodeId { + continue + } + if q.NodeTypes != nil && !q.NodeTypes[n.Type] { + continue + } + for _, a := range n.GetOutputs().GetArtifacts() { + match := true + if q.anyArtifactGCStrategy() { + // artifact strategy is either based on overall Workflow ArtifactGC Strategy, or + // if it's specified on the individual artifact level that takes priority + artifactStrategy := w.GetArtifactGCStrategy(&a) + if !q.ArtifactGCStrategies[artifactStrategy] { + match = false + } + } + if q.ArtifactName != "" && a.Name != q.ArtifactName { + match = false + } + if q.Deleted != nil && a.Deleted != *q.Deleted { + match = false + } + if match { + results = append(results, ArtifactSearchResult{Artifact: a, NodeID: n.ID}) + } + } + } + return results +} + +// Outputs hold parameters, artifacts, and results from a step +type Outputs struct { + // Parameters holds the list of output parameters produced by a step + // +patchStrategy=merge + // +patchMergeKey=name + Parameters []Parameter `json:"parameters,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=parameters"` + + // Artifacts holds the list of output artifacts produced by a step + // +patchStrategy=merge + // +patchMergeKey=name + Artifacts Artifacts `json:"artifacts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=artifacts"` + + // Result holds the result (stdout) of a script template + Result *string `json:"result,omitempty" protobuf:"bytes,3,opt,name=result"` + + // ExitCode holds the exit code of a script template + ExitCode *string `json:"exitCode,omitempty" protobuf:"bytes,4,opt,name=exitCode"` +} + +func (o *Outputs) GetArtifacts() Artifacts { + if o == nil { + return nil + } + return o.Artifacts +} + +// WorkflowStep is a reference to a template to execute in a series of step +type WorkflowStep struct { + // Name of the step + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // Template is the name of the template to execute as the step + Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` + + // Inline is the template. Template must be empty if this is declared (and vice-versa). + Inline *Template `json:"inline,omitempty" protobuf:"bytes,13,opt,name=inline"` + + // Arguments hold arguments to the template + Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"` + + // TemplateRef is the reference to the template resource to execute as the step. + TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,4,opt,name=templateRef"` + + // WithItems expands a step into multiple parallel steps from the items in the list + WithItems []Item `json:"withItems,omitempty" protobuf:"bytes,5,rep,name=withItems"` + + // WithParam expands a step into multiple parallel steps from the value in the parameter, + // which is expected to be a JSON list. + WithParam string `json:"withParam,omitempty" protobuf:"bytes,6,opt,name=withParam"` + + // WithSequence expands a step into a numeric sequence + WithSequence *Sequence `json:"withSequence,omitempty" protobuf:"bytes,7,opt,name=withSequence"` + + // When is an expression in which the step should conditionally execute + When string `json:"when,omitempty" protobuf:"bytes,8,opt,name=when"` + + // ContinueOn makes argo to proceed with the following step even if this step fails. + // Errors and Failed states can be specified + ContinueOn *ContinueOn `json:"continueOn,omitempty" protobuf:"bytes,9,opt,name=continueOn"` + + // OnExit is a template reference which is invoked at the end of the + // template, irrespective of the success, failure, or error of the + // primary template. + // DEPRECATED: Use Hooks[exit].Template instead. + OnExit string `json:"onExit,omitempty" protobuf:"bytes,11,opt,name=onExit"` + + // Hooks holds the lifecycle hook which is invoked at lifecycle of + // step, irrespective of the success, failure, or error status of the primary step + Hooks LifecycleHooks `json:"hooks,omitempty" protobuf:"bytes,12,opt,name=hooks"` +} + +func (step *WorkflowStep) GetName() string { + return step.Name +} + +func (step *WorkflowStep) IsDAGTask() bool { + return false +} +func (step *WorkflowStep) IsWorkflowStep() bool { + return true +} + +type LifecycleEvent string + +const ( + ExitLifecycleEvent = "exit" +) + +type LifecycleHooks map[LifecycleEvent]LifecycleHook + +func (lchs LifecycleHooks) GetExitHook() *LifecycleHook { + hook, ok := lchs[ExitLifecycleEvent] + if ok { + return &hook + } + return nil +} + +func (lchs LifecycleHooks) HasExitHook() bool { + return lchs.GetExitHook() != nil +} + +type LifecycleHook struct { + // Template is the name of the template to execute by the hook + Template string `json:"template,omitempty" protobuf:"bytes,1,opt,name=template"` + // Arguments hold arguments to the template + Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,2,opt,name=arguments"` + // TemplateRef is the reference to the template resource to execute by the hook + TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,3,opt,name=templateRef"` + // Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not + // be retried and the retry strategy will be ignored + Expression string `json:"expression,omitempty" protobuf:"bytes,4,opt,name=expression"` +} + +func (lch *LifecycleHook) WithArgs(args Arguments) *LifecycleHook { + lch1 := lch.DeepCopy() + if lch1.Arguments.IsEmpty() { + lch1.Arguments = args + } + return lch1 +} + +var _ TemplateReferenceHolder = &WorkflowStep{} + +func (step *WorkflowStep) HasExitHook() bool { + return (step.Hooks != nil && step.Hooks.HasExitHook()) || step.OnExit != "" +} + +func (step *WorkflowStep) GetExitHook(args Arguments) *LifecycleHook { + if !step.HasExitHook() { + return nil + } + if step.OnExit != "" { + return &LifecycleHook{Template: step.OnExit, Arguments: args} + } + return step.Hooks.GetExitHook().WithArgs(args) +} + +func (step *WorkflowStep) GetTemplate() *Template { + return step.Inline +} + +func (step *WorkflowStep) GetTemplateName() string { + return step.Template +} + +func (step *WorkflowStep) GetTemplateRef() *TemplateRef { + return step.TemplateRef +} + +func (step *WorkflowStep) ShouldExpand() bool { + return len(step.WithItems) != 0 || step.WithParam != "" || step.WithSequence != nil +} + +// Sequence expands a workflow step into numeric range +type Sequence struct { + // Count is number of elements in the sequence (default: 0). Not to be used with end + Count *intstr.IntOrString `json:"count,omitempty" protobuf:"bytes,1,opt,name=count"` + + // Number at which to start the sequence (default: 0) + Start *intstr.IntOrString `json:"start,omitempty" protobuf:"bytes,2,opt,name=start"` + + // Number at which to end the sequence (default: 0). Not to be used with Count + End *intstr.IntOrString `json:"end,omitempty" protobuf:"bytes,3,opt,name=end"` + + // Format is a printf format string to format the value in the sequence + Format string `json:"format,omitempty" protobuf:"bytes,4,opt,name=format"` +} + +// TemplateRef is a reference of template resource. +type TemplateRef struct { + // Name is the resource name of the template. + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Template is the name of referred template in the resource. + Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` + // ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate). + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` +} + +// Synchronization holds synchronization lock configuration +type Synchronization struct { + // Semaphore holds the Semaphore configuration - deprecated, use semaphores instead + Semaphore *SemaphoreRef `json:"semaphore,omitempty" protobuf:"bytes,1,opt,name=semaphore"` + // Mutex holds the Mutex lock details - deprecated, use mutexes instead + Mutex *Mutex `json:"mutex,omitempty" protobuf:"bytes,2,opt,name=mutex"` + // v3.6 and after: Semaphores holds the list of Semaphores configuration + Semaphores []*SemaphoreRef `json:"semaphores,omitempty" protobuf:"bytes,3,opt,name=semaphores"` + // v3.6 and after: Mutexes holds the list of Mutex lock details + Mutexes []*Mutex `json:"mutexes,omitempty" protobuf:"bytes,4,opt,name=mutexes"` +} + +func (s *Synchronization) getSemaphoreConfigMapRefs() []*apiv1.ConfigMapKeySelector { + selectors := make([]*apiv1.ConfigMapKeySelector, 0) + if s.Semaphore != nil && s.Semaphore.ConfigMapKeyRef != nil { + selectors = append(selectors, s.Semaphore.ConfigMapKeyRef) + } + + for _, semaphore := range s.Semaphores { + if semaphore.ConfigMapKeyRef != nil { + selectors = append(selectors, semaphore.ConfigMapKeyRef) + } + } + return selectors +} + +// SemaphoreRef is a reference of Semaphore +type SemaphoreRef struct { + // ConfigMapKeyRef is configmap selector for Semaphore configuration + ConfigMapKeyRef *apiv1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,1,opt,name=configMapKeyRef"` + // Namespace is the namespace of the configmap, default: [namespace of workflow] + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` +} + +// Mutex holds Mutex configuration +type Mutex struct { + // name of the mutex + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Namespace is the namespace of the mutex, default: [namespace of workflow] + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` +} + +// WorkflowTemplateRef is a reference to a WorkflowTemplate resource. +type WorkflowTemplateRef struct { + // Name is the resource name of the workflow template. + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate). + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,2,opt,name=clusterScope"` +} + +func (ref *WorkflowTemplateRef) ToTemplateRef(template string) *TemplateRef { + return &TemplateRef{ + Name: ref.Name, + ClusterScope: ref.ClusterScope, + Template: template, + } +} + +type ArgumentsProvider interface { + GetParameterByName(name string) *Parameter + GetArtifactByName(name string) *Artifact +} + +// Arguments to a template +type Arguments struct { + // Parameters is the list of parameters to pass to the template or workflow + // +patchStrategy=merge + // +patchMergeKey=name + Parameters []Parameter `json:"parameters,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=parameters"` + + // Artifacts is the list of artifacts to pass to the template or workflow + // +patchStrategy=merge + // +patchMergeKey=name + Artifacts Artifacts `json:"artifacts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=artifacts"` +} + +func (a Arguments) IsEmpty() bool { + return len(a.Parameters) == 0 && len(a.Artifacts) == 0 +} + +var _ ArgumentsProvider = &Arguments{} + +type Nodes map[string]NodeStatus + +func (n Nodes) FindByDisplayName(name string) *NodeStatus { + return n.Find(NodeWithDisplayName(name)) +} + +func (n Nodes) FindByName(name string) *NodeStatus { + return n.Find(NodeWithName(name)) +} + +func (in Nodes) Any(f func(NodeStatus) bool) bool { + return in.Find(f) != nil +} + +func (n Nodes) Find(f func(NodeStatus) bool) *NodeStatus { + for _, i := range n { + if f(i) { + return &i + } + } + return nil +} + +// Get a NodeStatus from the hashmap of Nodes. +// Return a nil along with an error if non existent. +func (n Nodes) Get(key string) (*NodeStatus, error) { + val, ok := n[key] + if !ok { + return nil, fmt.Errorf("key was not found for %s", key) + } + return &val, nil +} + +// Check if the Nodes map has a key entry +func (n Nodes) Has(key string) bool { + _, err := n.Get(key) + return err == nil +} + +// Get the Phase of a Node +func (n Nodes) GetPhase(key string) (*NodePhase, error) { + val, err := n.Get(key) + if err != nil { + return nil, err + } + return &val.Phase, nil +} + +// Set the status of a node by key +func (n Nodes) Set(key string, status NodeStatus) { + if status.Name == "" { + log.Warnf("Name was not set for key %s", key) + } + if status.ID == "" { + log.Warnf("ID was not set for key %s", key) + } + _, ok := n[key] + if ok { + log.Tracef("Changing NodeStatus for %s to %+v", key, status) + } + n[key] = status +} + +// Delete a node from the Nodes by key +func (n Nodes) Delete(key string) { + has := n.Has(key) + if !has { + log.Warnf("Trying to delete non existent key %s", key) + return + } + delete(n, key) +} + +// Get the name of a node by key +func (n Nodes) GetName(key string) (string, error) { + val, err := n.Get(key) + if err != nil { + return "", err + } + return val.Name, nil +} +func NodeWithName(name string) func(n NodeStatus) bool { + return func(n NodeStatus) bool { return n.Name == name } +} + +func NodeWithDisplayName(name string) func(n NodeStatus) bool { + return func(n NodeStatus) bool { return n.DisplayName == name } +} + +func FailedPodNode(n NodeStatus) bool { + return n.Type == NodeTypePod && n.Phase == NodeFailed +} + +func SucceededPodNode(n NodeStatus) bool { + return n.Type == NodeTypePod && n.Phase == NodeSucceeded +} + +// Children returns the children of the parent. +func (s Nodes) Children(parentNodeId string) Nodes { + childNodes := make(Nodes) + parentNode, ok := s[parentNodeId] + if !ok { + return childNodes + } + for _, childID := range parentNode.Children { + if childNode, ok := s[childID]; ok { + childNodes[childID] = childNode + } + } + return childNodes +} + +// NestedChildrenStatus takes in a nodeID and returns all its children, this involves a tree search using DFS. +// This is needed to mark all children nodes as failed for example. +func (s Nodes) NestedChildrenStatus(parentNodeId string) ([]NodeStatus, error) { + parentNode, ok := s[parentNodeId] + if !ok { + return nil, fmt.Errorf("could not find %s in nodes when searching for nested children", parentNodeId) + } + + children := []NodeStatus{} + toexplore := []NodeStatus{parentNode} + + for len(toexplore) > 0 { + childNode := toexplore[0] + toexplore = toexplore[1:] + for _, nodeID := range childNode.Children { + toexplore = append(toexplore, s[nodeID]) + } + + if childNode.Name == parentNode.Name { + continue + } + children = append(children, childNode) + } + + return children, nil +} + +// Filter returns the subset of the nodes that match the predicate, e.g. only failed nodes +func (s Nodes) Filter(predicate func(NodeStatus) bool) Nodes { + filteredNodes := make(Nodes) + for _, node := range s { + if predicate(node) { + filteredNodes[node.ID] = node + } + } + return filteredNodes +} + +// Map maps the nodes to new values, e.g. `x.Hostname` +func (s Nodes) Map(f func(x NodeStatus) interface{}) map[string]interface{} { + values := make(map[string]interface{}) + for _, node := range s { + values[node.ID] = f(node) + } + return values +} + +// UserContainer is a container specified by a user. +type UserContainer struct { + apiv1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"` + + // MirrorVolumeMounts will mount the same volumes specified in the main container + // to the container (including artifacts), at the same mountPaths. This enables + // dind daemon to partially see the same filesystem as the main container in + // order to use features such as docker volume binding + MirrorVolumeMounts *bool `json:"mirrorVolumeMounts,omitempty" protobuf:"varint,2,opt,name=mirrorVolumeMounts"` +} + +// WorkflowStatus contains overall status information about a workflow +type WorkflowStatus struct { + // Phase a simple, high-level summary of where the workflow is in its lifecycle. + // Will be "" (Unknown), "Pending", or "Running" before the workflow is completed, and "Succeeded", + // "Failed" or "Error" once the workflow has completed. + Phase WorkflowPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=WorkflowPhase"` + + // Time at which this workflow started + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,2,opt,name=startedAt"` + + // Time at which this workflow completed + FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,3,opt,name=finishedAt"` + + // EstimatedDuration in seconds. + EstimatedDuration EstimatedDuration `json:"estimatedDuration,omitempty" protobuf:"varint,16,opt,name=estimatedDuration,casttype=EstimatedDuration"` + + // Progress to completion + Progress Progress `json:"progress,omitempty" protobuf:"bytes,17,opt,name=progress,casttype=Progress"` + + // A human readable message indicating details about why the workflow is in this condition. + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + + // Compressed and base64 decoded Nodes map + CompressedNodes string `json:"compressedNodes,omitempty" protobuf:"bytes,5,opt,name=compressedNodes"` + + // Nodes is a mapping between a node ID and the node's status. + Nodes Nodes `json:"nodes,omitempty" protobuf:"bytes,6,rep,name=nodes"` + + // Whether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. + // This will actually be populated with a hash of the offloaded data. + OffloadNodeStatusVersion string `json:"offloadNodeStatusVersion,omitempty" protobuf:"bytes,10,rep,name=offloadNodeStatusVersion"` + + // StoredTemplates is a mapping between a template ref and the node's status. + StoredTemplates map[string]Template `json:"storedTemplates,omitempty" protobuf:"bytes,9,rep,name=storedTemplates"` + + // PersistentVolumeClaims tracks all PVCs that were created as part of the workflow. + // The contents of this list are drained at the end of the workflow. + PersistentVolumeClaims []apiv1.Volume `json:"persistentVolumeClaims,omitempty" protobuf:"bytes,7,rep,name=persistentVolumeClaims"` + + // Outputs captures output values and artifact locations produced by the workflow via global outputs + Outputs *Outputs `json:"outputs,omitempty" protobuf:"bytes,8,opt,name=outputs"` + + // Conditions is a list of conditions the Workflow may have + Conditions Conditions `json:"conditions,omitempty" protobuf:"bytes,13,rep,name=conditions"` + + // ResourcesDuration is the total for the workflow + ResourcesDuration ResourcesDuration `json:"resourcesDuration,omitempty" protobuf:"bytes,12,opt,name=resourcesDuration"` + + // StoredWorkflowSpec stores the WorkflowTemplate spec for future execution. + StoredWorkflowSpec *WorkflowSpec `json:"storedWorkflowTemplateSpec,omitempty" protobuf:"bytes,14,opt,name=storedWorkflowTemplateSpec"` + + // Synchronization stores the status of synchronization locks + Synchronization *SynchronizationStatus `json:"synchronization,omitempty" protobuf:"bytes,15,opt,name=synchronization"` + + // ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile. + ArtifactRepositoryRef *ArtifactRepositoryRefStatus `json:"artifactRepositoryRef,omitempty" protobuf:"bytes,18,opt,name=artifactRepositoryRef"` + + // ArtifactGCStatus maintains the status of Artifact Garbage Collection + ArtifactGCStatus *ArtGCStatus `json:"artifactGCStatus,omitempty" protobuf:"bytes,19,opt,name=artifactGCStatus"` + + // TaskResultsCompletionStatus tracks task result completion status (mapped by node ID). Used to prevent premature archiving and garbage collection. + TaskResultsCompletionStatus map[string]bool `json:"taskResultsCompletionStatus,omitempty" protobuf:"bytes,20,opt,name=taskResultsCompletionStatus"` +} + +func (ws *WorkflowStatus) MarkTaskResultIncomplete(name string) { + if ws.TaskResultsCompletionStatus == nil { + ws.TaskResultsCompletionStatus = make(map[string]bool) + } + ws.TaskResultsCompletionStatus[name] = false +} + +func (ws *WorkflowStatus) MarkTaskResultComplete(name string) { + if ws.TaskResultsCompletionStatus == nil { + ws.TaskResultsCompletionStatus = make(map[string]bool) + } + ws.TaskResultsCompletionStatus[name] = true +} + +func (ws *WorkflowStatus) TaskResultsInProgress() bool { + for _, value := range ws.TaskResultsCompletionStatus { + if !value { + return true + } + } + return false +} + +func (ws *WorkflowStatus) IsTaskResultIncomplete(name string) bool { + value, found := ws.TaskResultsCompletionStatus[name] + if found { + return !value + } + return false // workflows from older versions do not have this status, so assume completed if this is missing +} + +func (ws *WorkflowStatus) IsOffloadNodeStatus() bool { + return ws.OffloadNodeStatusVersion != "" +} + +func (ws *WorkflowStatus) GetOffloadNodeStatusVersion() string { + return ws.OffloadNodeStatusVersion +} + +func (ws *WorkflowStatus) GetStoredTemplates() []Template { + var out []Template + for _, t := range ws.StoredTemplates { + out = append(out, t) + } + return out +} + +func (wf *Workflow) GetOffloadNodeStatusVersion() string { + return wf.Status.GetOffloadNodeStatusVersion() +} + +type RetryPolicy string + +const ( + RetryPolicyAlways RetryPolicy = "Always" + RetryPolicyOnFailure RetryPolicy = "OnFailure" + RetryPolicyOnError RetryPolicy = "OnError" + RetryPolicyOnTransientError RetryPolicy = "OnTransientError" +) + +// Backoff is a backoff strategy to use within retryStrategy +type Backoff struct { + // Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h") + Duration string `json:"duration,omitempty" protobuf:"varint,1,opt,name=duration"` + // Factor is a factor to multiply the base duration after each failed retry + Factor *intstr.IntOrString `json:"factor,omitempty" protobuf:"varint,2,opt,name=factor"` + // MaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy. + // It is important to note that if the workflow template includes activeDeadlineSeconds, the pod's deadline is initially set with activeDeadlineSeconds. + // However, when the workflow fails, the pod's deadline is then overridden by maxDuration. + // This ensures that the workflow does not exceed the specified maximum duration when retries are involved. + MaxDuration string `json:"maxDuration,omitempty" protobuf:"varint,3,opt,name=maxDuration"` +} + +// RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. +// In order to prevent running steps on the same host, it uses "kubernetes.io/hostname". +type RetryNodeAntiAffinity struct{} + +// RetryAffinity prevents running steps on the same host. +type RetryAffinity struct { + NodeAntiAffinity *RetryNodeAntiAffinity `json:"nodeAntiAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAntiAffinity"` +} + +// RetryStrategy provides controls on how to retry a workflow step +type RetryStrategy struct { + // Limit is the maximum number of retry attempts when retrying a container. It does not include the original + // container; the maximum number of total attempts will be `limit + 1`. + Limit *intstr.IntOrString `json:"limit,omitempty" protobuf:"varint,1,opt,name=limit"` + + // RetryPolicy is a policy of NodePhase statuses that will be retried + RetryPolicy RetryPolicy `json:"retryPolicy,omitempty" protobuf:"bytes,2,opt,name=retryPolicy,casttype=RetryPolicy"` + + // Backoff is a backoff strategy + Backoff *Backoff `json:"backoff,omitempty" protobuf:"bytes,3,opt,name=backoff,casttype=Backoff"` + + // Affinity prevents running workflow's step on the same host + Affinity *RetryAffinity `json:"affinity,omitempty" protobuf:"bytes,4,opt,name=affinity"` + + // Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not + // be retried and the retry strategy will be ignored + Expression string `json:"expression,omitempty" protobuf:"bytes,5,opt,name=expression"` +} + +// RetryPolicyActual gets the active retry policy for a strategy. +// If the policy is explicit, use that. +// If an expression is given, use a policy of Always so the +// expression is all that controls the retry for 'least surprise'. +// Otherwise, if neither is given, default to retry OnFailure. +func (s RetryStrategy) RetryPolicyActual() RetryPolicy { + if s.RetryPolicy != "" { + return s.RetryPolicy + } + if s.Expression == "" { + return RetryPolicyOnFailure + } else { + return RetryPolicyAlways + } +} + +// The amount of requested resource * the duration that request was used. +// This is represented as duration in seconds, so can be converted to and from +// duration (with loss of precision). +type ResourceDuration int64 + +func NewResourceDuration(d time.Duration) ResourceDuration { + return ResourceDuration(d.Seconds()) +} + +func (in ResourceDuration) Duration() time.Duration { + return time.Duration(in) * time.Second +} + +func (in ResourceDuration) String() string { + return in.Duration().String() +} + +// This contains each duration by request requested. +// e.g. 100m CPU * 1h, 1Gi memory * 1h +type ResourcesDuration map[apiv1.ResourceName]ResourceDuration + +func (in ResourcesDuration) Add(o ResourcesDuration) ResourcesDuration { + res := ResourcesDuration{} + for n, d := range in { + res[n] += d + } + for n, d := range o { + res[n] += d + } + return res +} + +func (in ResourcesDuration) String() string { + var parts []string + for n, d := range in { + parts = append(parts, fmt.Sprintf("%v*(%s %s)", d, ResourceQuantityDenominator(n).String(), n)) + } + return strings.Join(parts, ",") +} + +func (in ResourcesDuration) IsZero() bool { + return len(in) == 0 +} + +func ResourceQuantityDenominator(r apiv1.ResourceName) *resource.Quantity { + q, ok := map[apiv1.ResourceName]resource.Quantity{ + apiv1.ResourceMemory: resource.MustParse("100Mi"), + apiv1.ResourceStorage: resource.MustParse("10Gi"), + apiv1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }[r] + if !ok { + q = resource.MustParse("1") + } + return &q +} + +type Conditions []Condition + +func (cs *Conditions) UpsertCondition(condition Condition) { + for index, wfCondition := range *cs { + if wfCondition.Type == condition.Type { + (*cs)[index] = condition + return + } + } + *cs = append(*cs, condition) +} + +func (cs *Conditions) UpsertConditionMessage(condition Condition) { + for index, wfCondition := range *cs { + if wfCondition.Type == condition.Type { + (*cs)[index].Message += ", " + condition.Message + return + } + } + *cs = append(*cs, condition) +} + +func (cs *Conditions) JoinConditions(conditions *Conditions) { + for _, condition := range *conditions { + cs.UpsertCondition(condition) + } +} + +func (cs *Conditions) RemoveCondition(conditionType ConditionType) { + for index, wfCondition := range *cs { + if wfCondition.Type == conditionType { + *cs = append((*cs)[:index], (*cs)[index+1:]...) + return + } + } +} + +func (cs *Conditions) DisplayString(fmtStr string, iconMap map[ConditionType]string) string { + if len(*cs) == 0 { + return fmt.Sprintf(fmtStr, "Conditions:", "None") + } + out := fmt.Sprintf(fmtStr, "Conditions:", "") + for _, condition := range *cs { + conditionMessage := condition.Message + if conditionMessage == "" { + conditionMessage = string(condition.Status) + } + conditionPrefix := fmt.Sprintf("%s %s", iconMap[condition.Type], string(condition.Type)) + out += fmt.Sprintf(fmtStr, conditionPrefix, conditionMessage) + } + return out +} + +type ConditionType string + +const ( + // ConditionTypeCompleted is a signifies the workflow has completed + ConditionTypeCompleted ConditionType = "Completed" + // ConditionTypePodRunning any workflow pods are currently running + ConditionTypePodRunning ConditionType = "PodRunning" + // ConditionTypeSpecWarning is a warning on the current application spec + ConditionTypeSpecWarning ConditionType = "SpecWarning" + // ConditionTypeSpecWarning is an error on the current application spec + ConditionTypeSpecError ConditionType = "SpecError" + // ConditionTypeMetricsError is an error during metric emission + ConditionTypeMetricsError ConditionType = "MetricsError" + //ConditionTypeArtifactGCError is an error on artifact garbage collection + ConditionTypeArtifactGCError ConditionType = "ArtifactGCError" +) + +type Condition struct { + // Type is the type of condition + Type ConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=ConditionType"` + + // Status is the status of the condition + Status metav1.ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/apimachinery/pkg/apis/meta/v1.ConditionStatus"` + + // Message is the condition message + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` +} + +// NodeStatus contains status information about an individual node in the workflow +type NodeStatus struct { + // ID is a unique identifier of a node within the worklow + // It is implemented as a hash of the node name, which makes the ID deterministic + ID string `json:"id" protobuf:"bytes,1,opt,name=id"` + + // Name is unique name in the node tree used to generate the node ID + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // DisplayName is a human readable representation of the node. Unique within a template boundary + DisplayName string `json:"displayName,omitempty" protobuf:"bytes,3,opt,name=displayName"` + + // Type indicates type of node + Type NodeType `json:"type" protobuf:"bytes,4,opt,name=type,casttype=NodeType"` + + // TemplateName is the template name which this node corresponds to. + // Not applicable to virtual nodes (e.g. Retry, StepGroup) + TemplateName string `json:"templateName,omitempty" protobuf:"bytes,5,opt,name=templateName"` + + // TemplateRef is the reference to the template resource which this node corresponds to. + // Not applicable to virtual nodes (e.g. Retry, StepGroup) + TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,6,opt,name=templateRef"` + + // TemplateScope is the template scope in which the template of this node was retrieved. + TemplateScope string `json:"templateScope,omitempty" protobuf:"bytes,20,opt,name=templateScope"` + + // Phase a simple, high-level summary of where the node is in its lifecycle. + // Can be used as a state machine. + // Will be one of these values "Pending", "Running" before the node is completed, or "Succeeded", + // "Skipped", "Failed", "Error", or "Omitted" as a final state. + Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,7,opt,name=phase,casttype=NodePhase"` + + // BoundaryID indicates the node ID of the associated template root node in which this node belongs to + BoundaryID string `json:"boundaryID,omitempty" protobuf:"bytes,8,opt,name=boundaryID"` + + // A human readable message indicating details about why the node is in this condition. + Message string `json:"message,omitempty" protobuf:"bytes,9,opt,name=message"` + + // Time at which this node started + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,10,opt,name=startedAt"` + + // Time at which this node completed + FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,11,opt,name=finishedAt"` + + // EstimatedDuration in seconds. + EstimatedDuration EstimatedDuration `json:"estimatedDuration,omitempty" protobuf:"varint,24,opt,name=estimatedDuration,casttype=EstimatedDuration"` + + // Progress to completion + Progress Progress `json:"progress,omitempty" protobuf:"bytes,26,opt,name=progress,casttype=Progress"` + + // ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes. + ResourcesDuration ResourcesDuration `json:"resourcesDuration,omitempty" protobuf:"bytes,21,opt,name=resourcesDuration"` + + // PodIP captures the IP of the pod for daemoned steps + PodIP string `json:"podIP,omitempty" protobuf:"bytes,12,opt,name=podIP"` + + // Daemoned tracks whether or not this node was daemoned and need to be terminated + Daemoned *bool `json:"daemoned,omitempty" protobuf:"varint,13,opt,name=daemoned"` + + // NodeFlag tracks some history of node. e.g.) hooked, retried, etc. + NodeFlag *NodeFlag `json:"nodeFlag,omitempty" protobuf:"bytes,27,opt,name=nodeFlag"` + + // Inputs captures input parameter values and artifact locations supplied to this template invocation + Inputs *Inputs `json:"inputs,omitempty" protobuf:"bytes,14,opt,name=inputs"` + + // Outputs captures output parameter values and artifact locations produced by this template invocation + Outputs *Outputs `json:"outputs,omitempty" protobuf:"bytes,15,opt,name=outputs"` + + // Children is a list of child node IDs + Children []string `json:"children,omitempty" protobuf:"bytes,16,rep,name=children"` + + // OutboundNodes tracks the node IDs which are considered "outbound" nodes to a template invocation. + // For every invocation of a template, there are nodes which we considered as "outbound". Essentially, + // these are last nodes in the execution sequence to run, before the template is considered completed. + // These nodes are then connected as parents to a following step. + // + // In the case of single pod steps (i.e. container, script, resource templates), this list will be nil + // since the pod itself is already considered the "outbound" node. + // In the case of DAGs, outbound nodes are the "target" tasks (tasks with no children). + // In the case of steps, outbound nodes are all the containers involved in the last step group. + // NOTE: since templates are composable, the list of outbound nodes are carried upwards when + // a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of + // a template, will be a superset of the outbound nodes of its last children. + OutboundNodes []string `json:"outboundNodes,omitempty" protobuf:"bytes,17,rep,name=outboundNodes"` + + // HostNodeName name of the Kubernetes node on which the Pod is running, if applicable + HostNodeName string `json:"hostNodeName,omitempty" protobuf:"bytes,22,rep,name=hostNodeName"` + + // MemoizationStatus holds information about cached nodes + MemoizationStatus *MemoizationStatus `json:"memoizationStatus,omitempty" protobuf:"varint,23,opt,name=memoizationStatus"` + + // SynchronizationStatus is the synchronization status of the node + SynchronizationStatus *NodeSynchronizationStatus `json:"synchronizationStatus,omitempty" protobuf:"bytes,25,opt,name=synchronizationStatus"` +} + +func (n *NodeStatus) GetName() string { + return n.Name +} + +func (n *NodeStatus) IsDAGTask() bool { + return false +} + +func (n *NodeStatus) IsWorkflowStep() bool { + return false +} + +// Fulfilled returns whether a phase is fulfilled, i.e. it completed execution or was skipped or omitted +func (phase NodePhase) Fulfilled() bool { + return phase.Completed() || phase == NodeSkipped || phase == NodeOmitted +} + +// Completed returns whether or not a phase completed. Notably, a skipped phase is not considered as having completed +func (phase NodePhase) Completed() bool { + return phase.FailedOrError() || phase == NodeSucceeded +} + +func (phase NodePhase) FailedOrError() bool { + return phase == NodeFailed || phase == NodeError +} + +// Fulfilled returns whether or not the workflow has fulfilled its execution +func (ws WorkflowStatus) Fulfilled() bool { + return ws.Phase.Completed() +} + +// Successful return whether or not the workflow has succeeded +func (ws WorkflowStatus) Successful() bool { + return ws.Phase == WorkflowSucceeded +} + +// Failed return whether or not the workflow has failed +func (ws WorkflowStatus) Failed() bool { + return ws.Phase == WorkflowFailed +} + +func (ws WorkflowStatus) StartTime() *metav1.Time { + return &ws.StartedAt +} + +func (ws WorkflowStatus) FinishTime() *metav1.Time { + return &ws.FinishedAt +} + +// Fulfilled returns whether a node is fulfilled, i.e. it finished execution, was skipped, or was dameoned successfully +func (n NodeStatus) Fulfilled() bool { + return n.Phase.Fulfilled() || n.IsDaemoned() && n.Phase != NodePending +} + +// Completed returns whether a node completed. Notably, a skipped node is not considered as having completed +func (n NodeStatus) Completed() bool { + return n.Phase.Completed() +} + +func (in *WorkflowStatus) AnyActiveSuspendNode() bool { + return in.Nodes.Any(func(node NodeStatus) bool { return node.IsActiveSuspendNode() }) +} + +func (ws *WorkflowStatus) GetDuration() time.Duration { + if ws.FinishedAt.IsZero() { + return 0 + } + return ws.FinishedAt.Time.Sub(ws.StartedAt.Time) +} + +// Pending returns whether or not the node is in pending state +func (n NodeStatus) Pending() bool { + return n.Phase == NodePending +} + +// IsDaemoned returns whether or not the node is daemoned +func (n NodeStatus) IsDaemoned() bool { + if n.Daemoned == nil || !*n.Daemoned { + return false + } + return true +} + +// IsPartOfExitHandler returns whether node is part of exit handler. +func (n *NodeStatus) IsPartOfExitHandler(nodes Nodes) bool { + currentNode := n + for !currentNode.IsExitNode() { + if currentNode.BoundaryID == "" { + return false + } + boundaryNode, err := nodes.Get(currentNode.BoundaryID) + if err != nil { + log.Panicf("was unable to obtain node for %s", currentNode.BoundaryID) + } + currentNode = boundaryNode + } + return true +} + +// IsExitNode returns whether or not node run as exit handler. +func (n NodeStatus) IsExitNode() bool { + return strings.HasSuffix(n.DisplayName, ".onExit") +} + +func (n NodeStatus) Succeeded() bool { + return n.Phase == NodeSucceeded +} + +func (n NodeStatus) FailedOrError() bool { + return n.Phase.FailedOrError() +} + +func (n NodeStatus) Omitted() bool { + return n.Type == NodeTypeSkipped && n.Phase == NodeOmitted +} + +func (n NodeStatus) StartTime() *metav1.Time { + return &n.StartedAt +} + +func (n NodeStatus) FinishTime() *metav1.Time { + return &n.FinishedAt +} + +// CanRetry returns whether the node should be retried or not. +func (n NodeStatus) CanRetry() bool { + // TODO(shri): Check if there are some 'unretryable' errors. + return n.FailedOrError() +} + +func (n NodeStatus) GetTemplateScope() (ResourceScope, string) { + // For compatibility: an empty TemplateScope is a local scope + if n.TemplateScope == "" { + return ResourceScopeLocal, "" + } + split := strings.Split(n.TemplateScope, "/") + // For compatibility: an unspecified ResourceScope in a TemplateScope is a namespaced scope + if len(split) == 1 { + return ResourceScopeNamespaced, split[0] + } + resourceScope, resourceName := split[0], split[1] + return ResourceScope(resourceScope), resourceName +} + +var _ TemplateReferenceHolder = &NodeStatus{} + +func (n *NodeStatus) GetTemplate() *Template { + return nil +} + +func (n *NodeStatus) GetTemplateName() string { + return n.TemplateName +} + +func (n *NodeStatus) GetTemplateRef() *TemplateRef { + return n.TemplateRef +} + +func (n *NodeStatus) GetOutputs() *Outputs { + if n == nil { + return nil + } + return n.Outputs +} + +// IsActiveSuspendNode returns whether this node is an active suspend node +func (n *NodeStatus) IsActiveSuspendNode() bool { + return n.Type == NodeTypeSuspend && n.Phase == NodeRunning +} + +// IsTaskSetNode returns whether this node uses the taskset +func (n *NodeStatus) IsTaskSetNode() bool { + return n.Type == NodeTypeHTTP || n.Type == NodeTypePlugin +} + +func (n NodeStatus) GetDuration() time.Duration { + if n.FinishedAt.IsZero() { + return 0 + } + return n.FinishedAt.Sub(n.StartedAt.Time) +} + +func (n NodeStatus) HasChild(childID string) bool { + for _, nodeID := range n.Children { + if childID == nodeID { + return true + } + } + return false +} + +// S3Bucket contains the access information required for interfacing with an S3 bucket +type S3Bucket struct { + // Endpoint is the hostname of the bucket endpoint + Endpoint string `json:"endpoint,omitempty" protobuf:"bytes,1,opt,name=endpoint"` + + // Bucket is the name of the bucket + Bucket string `json:"bucket,omitempty" protobuf:"bytes,2,opt,name=bucket"` + + // Region contains the optional bucket region + Region string `json:"region,omitempty" protobuf:"bytes,3,opt,name=region"` + + // Insecure will connect to the service with TLS + Insecure *bool `json:"insecure,omitempty" protobuf:"varint,4,opt,name=insecure"` + + // AccessKeySecret is the secret selector to the bucket's access key + AccessKeySecret *apiv1.SecretKeySelector `json:"accessKeySecret,omitempty" protobuf:"bytes,5,opt,name=accessKeySecret"` + + // SecretKeySecret is the secret selector to the bucket's secret key + SecretKeySecret *apiv1.SecretKeySelector `json:"secretKeySecret,omitempty" protobuf:"bytes,6,opt,name=secretKeySecret"` + + // SessionTokenSecret is used for ephemeral credentials like an IAM assume role or S3 access grant + SessionTokenSecret *apiv1.SecretKeySelector `json:"sessionTokenSecret,omitempty" protobuf:"bytes,12,opt,name=sessionTokenSecret"` + + // RoleARN is the Amazon Resource Name (ARN) of the role to assume. + RoleARN string `json:"roleARN,omitempty" protobuf:"bytes,7,opt,name=roleARN"` + + // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. + UseSDKCreds bool `json:"useSDKCreds,omitempty" protobuf:"varint,8,opt,name=useSDKCreds"` + + // CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is. + CreateBucketIfNotPresent *CreateS3BucketOptions `json:"createBucketIfNotPresent,omitempty" protobuf:"bytes,9,opt,name=createBucketIfNotPresent"` + + EncryptionOptions *S3EncryptionOptions `json:"encryptionOptions,omitempty" protobuf:"bytes,10,opt,name=encryptionOptions"` + + // CASecret specifies the secret that contains the CA, used to verify the TLS connection + CASecret *apiv1.SecretKeySelector `json:"caSecret,omitempty" protobuf:"bytes,11,opt,name=caSecret"` +} + +// S3EncryptionOptions used to determine encryption options during s3 operations +type S3EncryptionOptions struct { + // KMSKeyId tells the driver to encrypt the object using the specified KMS Key. + KmsKeyId string `json:"kmsKeyId,omitempty" protobuf:"bytes,1,opt,name=kmsKeyId"` + + // KmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information + KmsEncryptionContext string `json:"kmsEncryptionContext,omitempty" protobuf:"bytes,2,opt,name=kmsEncryptionContext"` + + // EnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used + EnableEncryption bool `json:"enableEncryption,omitempty" protobuf:"varint,3,opt,name=enableEncryption"` + + // ServerSideCustomerKeySecret tells the driver to encrypt the output artifacts using SSE-C with the specified secret. + ServerSideCustomerKeySecret *apiv1.SecretKeySelector `json:"serverSideCustomerKeySecret,omitempty" protobuf:"bytes,4,opt,name=serverSideCustomerKeySecret"` +} + +// CreateS3BucketOptions options used to determine automatic automatic bucket-creation process +type CreateS3BucketOptions struct { + // ObjectLocking Enable object locking + ObjectLocking bool `json:"objectLocking,omitempty" protobuf:"varint,3,opt,name=objectLocking"` +} + +// S3Artifact is the location of an S3 artifact +type S3Artifact struct { + S3Bucket `json:",inline" protobuf:"bytes,1,opt,name=s3Bucket"` + + // Key is the key in the bucket where the artifact resides + Key string `json:"key,omitempty" protobuf:"bytes,2,opt,name=key"` +} + +func (s *S3Artifact) GetKey() (string, error) { + return s.Key, nil +} + +func (s *S3Artifact) SetKey(key string) error { + s.Key = key + return nil +} + +func (s *S3Artifact) HasLocation() bool { + return s != nil && s.Endpoint != "" && s.Bucket != "" && s.Key != "" +} + +// GitArtifact is the location of an git artifact +type GitArtifact struct { + // Repo is the git repository + Repo string `json:"repo" protobuf:"bytes,1,opt,name=repo"` + + // Revision is the git commit, tag, branch to checkout + Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` + + // Depth specifies clones/fetches should be shallow and include the given + // number of commits from the branch tip + Depth *uint64 `json:"depth,omitempty" protobuf:"bytes,3,opt,name=depth"` + + // Fetch specifies a number of refs that should be fetched before checkout + Fetch []string `json:"fetch,omitempty" protobuf:"bytes,4,rep,name=fetch"` + + // UsernameSecret is the secret selector to the repository username + UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty" protobuf:"bytes,5,opt,name=usernameSecret"` + + // PasswordSecret is the secret selector to the repository password + PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,6,opt,name=passwordSecret"` + + // SSHPrivateKeySecret is the secret selector to the repository ssh private key + SSHPrivateKeySecret *apiv1.SecretKeySelector `json:"sshPrivateKeySecret,omitempty" protobuf:"bytes,7,opt,name=sshPrivateKeySecret"` + + // InsecureIgnoreHostKey disables SSH strict host key checking during git clone + InsecureIgnoreHostKey bool `json:"insecureIgnoreHostKey,omitempty" protobuf:"varint,8,opt,name=insecureIgnoreHostKey"` + + // DisableSubmodules disables submodules during git clone + DisableSubmodules bool `json:"disableSubmodules,omitempty" protobuf:"varint,9,opt,name=disableSubmodules"` + + // SingleBranch enables single branch clone, using the `branch` parameter + SingleBranch bool `json:"singleBranch,omitempty" protobuf:"varint,10,opt,name=singleBranch"` + + // Branch is the branch to fetch when `SingleBranch` is enabled + Branch string `json:"branch,omitempty" protobuf:"bytes,11,opt,name=branch"` + + // InsecureSkipTLS disables server certificate verification resulting in insecure HTTPS connections + InsecureSkipTLS bool `json:"insecureSkipTLS,omitempty" protobuf:"varint,12,opt,name=insecureSkipTLS"` +} + +func (g *GitArtifact) HasLocation() bool { + return g != nil && g.Repo != "" +} + +func (g *GitArtifact) GetKey() (string, error) { + return "", fmt.Errorf("key unsupported: git artifact does not have a key") +} + +func (g *GitArtifact) SetKey(string) error { + return fmt.Errorf("key unsupported: cannot set key on git artifact") +} + +func (g *GitArtifact) GetDepth() int { + if g == nil || g.Depth == nil { + return 0 + } + return int(*g.Depth) +} + +// ArtifactoryAuth describes the secret selectors required for authenticating to artifactory +type ArtifactoryAuth struct { + // UsernameSecret is the secret selector to the repository username + UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty" protobuf:"bytes,1,opt,name=usernameSecret"` + + // PasswordSecret is the secret selector to the repository password + PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,2,opt,name=passwordSecret"` +} + +// ArtifactoryArtifact is the location of an artifactory artifact +type ArtifactoryArtifact struct { + // URL of the artifact + URL string `json:"url" protobuf:"bytes,1,opt,name=url"` + ArtifactoryAuth `json:",inline" protobuf:"bytes,2,opt,name=artifactoryAuth"` +} + +// func (a *ArtifactoryArtifact) String() string { +// return a.URL +// } +func (a *ArtifactoryArtifact) GetKey() (string, error) { + u, err := url.Parse(a.URL) + if err != nil { + return "", err + } + return u.Path, nil +} + +func (a *ArtifactoryArtifact) SetKey(key string) error { + u, err := url.Parse(a.URL) + if err != nil { + return err + } + u.Path = key + a.URL = u.String() + return nil +} + +func (a *ArtifactoryArtifact) HasLocation() bool { + return a != nil && a.URL != "" && a.UsernameSecret != nil +} + +// AzureBlobContainer contains the access information for interfacing with an Azure Blob Storage container +type AzureBlobContainer struct { + // Endpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net" + Endpoint string `json:"endpoint" protobuf:"bytes,1,opt,name=endpoint"` + + // Container is the container where resources will be stored + Container string `json:"container" protobuf:"bytes,2,opt,name=container"` + + // AccountKeySecret is the secret selector to the Azure Blob Storage account access key + AccountKeySecret *apiv1.SecretKeySelector `json:"accountKeySecret,omitempty" protobuf:"bytes,3,opt,name=accountKeySecret"` + + // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. + UseSDKCreds bool `json:"useSDKCreds,omitempty" protobuf:"varint,4,opt,name=useSDKCreds"` +} + +// AzureArtifact is the location of a an Azure Storage artifact +type AzureArtifact struct { + AzureBlobContainer `json:",inline" protobuf:"bytes,1,opt,name=azureBlobContainer"` + + // Blob is the blob name (i.e., path) in the container where the artifact resides + Blob string `json:"blob" protobuf:"bytes,2,opt,name=blob"` +} + +func (a *AzureArtifact) GetKey() (string, error) { + return a.Blob, nil +} + +func (a *AzureArtifact) SetKey(key string) error { + a.Blob = key + return nil +} + +func (a *AzureArtifact) HasLocation() bool { + return a != nil && a.Endpoint != "" && a.Container != "" && a.Blob != "" +} + +// HDFSArtifact is the location of an HDFS artifact +type HDFSArtifact struct { + HDFSConfig `json:",inline" protobuf:"bytes,1,opt,name=hDFSConfig"` + + // Path is a file path in HDFS + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // Force copies a file forcibly even if it exists + Force bool `json:"force,omitempty" protobuf:"varint,3,opt,name=force"` +} + +func (h *HDFSArtifact) GetKey() (string, error) { + return h.Path, nil +} + +func (g *HDFSArtifact) SetKey(key string) error { + g.Path = key + return nil +} + +func (h *HDFSArtifact) HasLocation() bool { + return h != nil && len(h.Addresses) > 0 +} + +// HDFSConfig is configurations for HDFS +type HDFSConfig struct { + HDFSKrbConfig `json:",inline" protobuf:"bytes,1,opt,name=hDFSKrbConfig"` + + // Addresses is accessible addresses of HDFS name nodes + Addresses []string `json:"addresses,omitempty" protobuf:"bytes,2,rep,name=addresses"` + + // HDFSUser is the user to access HDFS file system. + // It is ignored if either ccache or keytab is used. + HDFSUser string `json:"hdfsUser,omitempty" protobuf:"bytes,3,opt,name=hdfsUser"` + + // DataTransferProtection is the protection level for HDFS data transfer. + // It corresponds to the dfs.data.transfer.protection configuration in HDFS. + DataTransferProtection string `json:"dataTransferProtection,omitempty" protobuf:"bytes,4,opt,name=dataTransferProtection"` +} + +// HDFSKrbConfig is auth configurations for Kerberos +type HDFSKrbConfig struct { + // KrbCCacheSecret is the secret selector for Kerberos ccache + // Either ccache or keytab can be set to use Kerberos. + KrbCCacheSecret *apiv1.SecretKeySelector `json:"krbCCacheSecret,omitempty" protobuf:"bytes,1,opt,name=krbCCacheSecret"` + + // KrbKeytabSecret is the secret selector for Kerberos keytab + // Either ccache or keytab can be set to use Kerberos. + KrbKeytabSecret *apiv1.SecretKeySelector `json:"krbKeytabSecret,omitempty" protobuf:"bytes,2,opt,name=krbKeytabSecret"` + + // KrbUsername is the Kerberos username used with Kerberos keytab + // It must be set if keytab is used. + KrbUsername string `json:"krbUsername,omitempty" protobuf:"bytes,3,opt,name=krbUsername"` + + // KrbRealm is the Kerberos realm used with Kerberos keytab + // It must be set if keytab is used. + KrbRealm string `json:"krbRealm,omitempty" protobuf:"bytes,4,opt,name=krbRealm"` + + // KrbConfig is the configmap selector for Kerberos config as string + // It must be set if either ccache or keytab is used. + KrbConfigConfigMap *apiv1.ConfigMapKeySelector `json:"krbConfigConfigMap,omitempty" protobuf:"bytes,5,opt,name=krbConfigConfigMap"` + + // KrbServicePrincipalName is the principal name of Kerberos service + // It must be set if either ccache or keytab is used. + KrbServicePrincipalName string `json:"krbServicePrincipalName,omitempty" protobuf:"bytes,6,opt,name=krbServicePrincipalName"` +} + +// RawArtifact allows raw string content to be placed as an artifact in a container +type RawArtifact struct { + // Data is the string contents of the artifact + Data string `json:"data" protobuf:"bytes,1,opt,name=data"` +} + +func (r *RawArtifact) GetKey() (string, error) { + return "", fmt.Errorf("key unsupported: raw artifat does not have key") +} + +func (r *RawArtifact) SetKey(string) error { + return fmt.Errorf("key unsupported: cannot set key for raw artifact") +} + +func (r *RawArtifact) HasLocation() bool { + return r != nil +} + +// Header indicate a key-value request header to be used when fetching artifacts over HTTP +type Header struct { + // Name is the header name + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Value is the literal value to use for the header + Value string `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +// BasicAuth describes the secret selectors required for basic authentication +type BasicAuth struct { + // UsernameSecret is the secret selector to the repository username + UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty" protobuf:"bytes,1,opt,name=usernameSecret"` + + // PasswordSecret is the secret selector to the repository password + PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,2,opt,name=passwordSecret"` +} + +// ClientCertAuth holds necessary information for client authentication via certificates +type ClientCertAuth struct { + ClientCertSecret *apiv1.SecretKeySelector `json:"clientCertSecret,omitempty" protobuf:"bytes,1,opt,name=clientCertSecret"` + ClientKeySecret *apiv1.SecretKeySelector `json:"clientKeySecret,omitempty" protobuf:"bytes,2,opt,name=clientKeySecret"` +} + +// OAuth2Auth holds all information for client authentication via OAuth2 tokens +type OAuth2Auth struct { + ClientIDSecret *apiv1.SecretKeySelector `json:"clientIDSecret,omitempty" protobuf:"bytes,1,opt,name=clientIDSecret"` + ClientSecretSecret *apiv1.SecretKeySelector `json:"clientSecretSecret,omitempty" protobuf:"bytes,2,opt,name=clientSecretSecret"` + TokenURLSecret *apiv1.SecretKeySelector `json:"tokenURLSecret,omitempty" protobuf:"bytes,3,opt,name=tokenURLSecret"` + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,5,rep,name=scopes"` + EndpointParams []OAuth2EndpointParam `json:"endpointParams,omitempty" protobuf:"bytes,6,rep,name=endpointParams"` +} + +// EndpointParam is for requesting optional fields that should be sent in the oauth request +type OAuth2EndpointParam struct { + // Name is the header name + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // Value is the literal value to use for the header + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +type HTTPAuth struct { + ClientCert ClientCertAuth `json:"clientCert,omitempty" protobuf:"bytes,1,opt,name=clientCert"` + OAuth2 OAuth2Auth `json:"oauth2,omitempty" protobuf:"bytes,2,opt,name=oauth2"` + BasicAuth BasicAuth `json:"basicAuth,omitempty" protobuf:"bytes,3,opt,name=basicAuth"` +} + +// HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container +type HTTPArtifact struct { + // URL of the artifact + URL string `json:"url" protobuf:"bytes,1,opt,name=url"` + + // Headers are an optional list of headers to send with HTTP requests for artifacts + Headers []Header `json:"headers,omitempty" protobuf:"bytes,2,rep,name=headers"` + + // Auth contains information for client authentication + Auth *HTTPAuth `json:"auth,omitempty" protobuf:"bytes,3,opt,name=auth"` +} + +func (h *HTTPArtifact) GetKey() (string, error) { + u, err := url.Parse(h.URL) + if err != nil { + return "", err + } + return u.Path, nil +} + +func (g *HTTPArtifact) SetKey(key string) error { + u, err := url.Parse(g.URL) + if err != nil { + return err + } + u.Path = key + g.URL = u.String() + return nil +} + +func (h *HTTPArtifact) HasLocation() bool { + return h != nil && h.URL != "" +} + +// GCSBucket contains the access information for interfacring with a GCS bucket +type GCSBucket struct { + // Bucket is the name of the bucket + Bucket string `json:"bucket,omitempty" protobuf:"bytes,1,opt,name=bucket"` + + // ServiceAccountKeySecret is the secret selector to the bucket's service account key + ServiceAccountKeySecret *apiv1.SecretKeySelector `json:"serviceAccountKeySecret,omitempty" protobuf:"bytes,2,opt,name=serviceAccountKeySecret"` +} + +// GCSArtifact is the location of a GCS artifact +type GCSArtifact struct { + GCSBucket `json:",inline" protobuf:"bytes,1,opt,name=gCSBucket"` + + // Key is the path in the bucket where the artifact resides + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` +} + +func (g *GCSArtifact) GetKey() (string, error) { + return g.Key, nil +} + +func (g *GCSArtifact) SetKey(key string) error { + g.Key = key + return nil +} + +func (g *GCSArtifact) HasLocation() bool { + return g != nil && g.Bucket != "" && g.Key != "" +} + +// OSSBucket contains the access information required for interfacing with an Alibaba Cloud OSS bucket +type OSSBucket struct { + // Endpoint is the hostname of the bucket endpoint + Endpoint string `json:"endpoint,omitempty" protobuf:"bytes,1,opt,name=endpoint"` + + // Bucket is the name of the bucket + Bucket string `json:"bucket,omitempty" protobuf:"bytes,2,opt,name=bucket"` + + // AccessKeySecret is the secret selector to the bucket's access key + AccessKeySecret *apiv1.SecretKeySelector `json:"accessKeySecret,omitempty" protobuf:"bytes,3,opt,name=accessKeySecret"` + + // SecretKeySecret is the secret selector to the bucket's secret key + SecretKeySecret *apiv1.SecretKeySelector `json:"secretKeySecret,omitempty" protobuf:"bytes,4,opt,name=secretKeySecret"` + + // CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist + CreateBucketIfNotPresent bool `json:"createBucketIfNotPresent,omitempty" protobuf:"varint,5,opt,name=createBucketIfNotPresent"` + + // SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm + SecurityToken string `json:"securityToken,omitempty" protobuf:"bytes,6,opt,name=securityToken"` + + // LifecycleRule specifies how to manage bucket's lifecycle + LifecycleRule *OSSLifecycleRule `json:"lifecycleRule,omitempty" protobuf:"bytes,7,opt,name=lifecycleRule"` + + // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. + UseSDKCreds bool `json:"useSDKCreds,omitempty" protobuf:"varint,8,opt,name=useSDKCreds"` +} + +// OSSArtifact is the location of an Alibaba Cloud OSS artifact +type OSSArtifact struct { + OSSBucket `json:",inline" protobuf:"bytes,1,opt,name=oSSBucket"` + + // Key is the path in the bucket where the artifact resides + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` +} + +// OSSLifecycleRule specifies how to manage bucket's lifecycle +type OSSLifecycleRule struct { + // MarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type + MarkInfrequentAccessAfterDays int32 `json:"markInfrequentAccessAfterDays,omitempty" protobuf:"varint,1,opt,name=markInfrequentAccessAfterDays"` + + // MarkDeletionAfterDays is the number of days before we delete objects in the bucket + MarkDeletionAfterDays int32 `json:"markDeletionAfterDays,omitempty" protobuf:"varint,2,opt,name=markDeletionAfterDays"` +} + +func (o *OSSArtifact) GetKey() (string, error) { + return o.Key, nil +} + +func (o *OSSArtifact) SetKey(key string) error { + o.Key = key + return nil +} + +func (o *OSSArtifact) HasLocation() bool { + return o != nil && o.Bucket != "" && o.Endpoint != "" && o.Key != "" +} + +// ExecutorConfig holds configurations of an executor container. +type ExecutorConfig struct { + // ServiceAccountName specifies the service account name of the executor container. + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,1,opt,name=serviceAccountName"` +} + +// ScriptTemplate is a template subtype to enable scripting through code steps +type ScriptTemplate struct { + apiv1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"` + + // Source contains the source code of the script to execute + Source string `json:"source" protobuf:"bytes,2,opt,name=source"` +} + +// ResourceTemplate is a template subtype to manipulate kubernetes resources +type ResourceTemplate struct { + // Action is the action to perform to the resource. + // Must be one of: get, create, apply, delete, replace, patch + Action string `json:"action" protobuf:"bytes,1,opt,name=action"` + + // MergeStrategy is the strategy used to merge a patch. It defaults to "strategic" + // Must be one of: strategic, merge, json + MergeStrategy string `json:"mergeStrategy,omitempty" protobuf:"bytes,2,opt,name=mergeStrategy"` + + // Manifest contains the kubernetes manifest + Manifest string `json:"manifest,omitempty" protobuf:"bytes,3,opt,name=manifest"` + + // ManifestFrom is the source for a single kubernetes manifest + ManifestFrom *ManifestFrom `json:"manifestFrom,omitempty" protobuf:"bytes,8,opt,name=manifestFrom"` + + // SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource. + SetOwnerReference bool `json:"setOwnerReference,omitempty" protobuf:"varint,4,opt,name=setOwnerReference"` + + // SuccessCondition is a label selector expression which describes the conditions + // of the k8s resource in which it is acceptable to proceed to the following step + SuccessCondition string `json:"successCondition,omitempty" protobuf:"bytes,5,opt,name=successCondition"` + + // FailureCondition is a label selector expression which describes the conditions + // of the k8s resource in which the step was considered failed + FailureCondition string `json:"failureCondition,omitempty" protobuf:"bytes,6,opt,name=failureCondition"` + + // Flags is a set of additional options passed to kubectl before submitting a resource + // I.e. to disable resource validation: + // flags: [ + // "--validate=false" # disable resource validation + // ] + Flags []string `json:"flags,omitempty" protobuf:"varint,7,opt,name=flags"` +} + +type ManifestFrom struct { + // Artifact contains the artifact to use + Artifact *Artifact `json:"artifact" protobuf:"bytes,1,opt,name=artifact"` +} + +// GetType returns the type of this template +func (tmpl *Template) GetType() TemplateType { + if tmpl.Container != nil { + return TemplateTypeContainer + } + if tmpl.ContainerSet != nil { + return TemplateTypeContainerSet + } + if tmpl.Steps != nil { + return TemplateTypeSteps + } + if tmpl.DAG != nil { + return TemplateTypeDAG + } + if tmpl.Script != nil { + return TemplateTypeScript + } + if tmpl.Resource != nil { + return TemplateTypeResource + } + if tmpl.Data != nil { + return TemplateTypeData + } + if tmpl.Suspend != nil { + return TemplateTypeSuspend + } + if tmpl.HTTP != nil { + return TemplateTypeHTTP + } + if tmpl.Plugin != nil { + return TemplateTypePlugin + } + return TemplateTypeUnknown +} + +func (tmpl *Template) GetNodeType() NodeType { + if tmpl.RetryStrategy != nil { + return NodeTypeRetry + } + switch tmpl.GetType() { + case TemplateTypeContainer, TemplateTypeContainerSet, TemplateTypeScript, TemplateTypeResource, TemplateTypeData: + return NodeTypePod + case TemplateTypeDAG: + return NodeTypeDAG + case TemplateTypeSteps: + return NodeTypeSteps + case TemplateTypeSuspend: + return NodeTypeSuspend + case TemplateTypeHTTP: + return NodeTypeHTTP + case TemplateTypePlugin: + return NodeTypePlugin + } + return "" +} + +// IsPodType returns whether or not the template is a pod type +func (tmpl *Template) IsPodType() bool { + switch tmpl.GetType() { + case TemplateTypeContainer, TemplateTypeContainerSet, TemplateTypeScript, TemplateTypeResource, TemplateTypeData: + return true + } + return false +} + +// IsLeaf returns whether or not the template is a leaf +func (tmpl *Template) IsLeaf() bool { + switch tmpl.GetType() { + case TemplateTypeContainer, TemplateTypeContainerSet, TemplateTypeScript, TemplateTypeResource, TemplateTypeData, TemplateTypeHTTP, TemplateTypePlugin: + return true + } + return false +} + +func (tmpl *Template) IsMainContainerName(containerName string) bool { + for _, c := range tmpl.GetMainContainerNames() { + if c == containerName { + return true + } + } + return false +} + +func (tmpl *Template) GetMainContainerNames() []string { + if tmpl != nil && tmpl.ContainerSet != nil { + out := make([]string, 0) + for _, c := range tmpl.ContainerSet.GetContainers() { + out = append(out, c.Name) + } + return out + } else { + return []string{"main"} + } +} + +func (tmpl *Template) HasSequencedContainers() bool { + return tmpl != nil && tmpl.ContainerSet.HasSequencedContainers() +} + +func (tmpl *Template) GetVolumeMounts() []apiv1.VolumeMount { + if tmpl.Container != nil { + return tmpl.Container.VolumeMounts + } else if tmpl.Script != nil { + return tmpl.Script.VolumeMounts + } else if tmpl.ContainerSet != nil { + return tmpl.ContainerSet.VolumeMounts + } + return nil +} + +// HasOutput returns true if the template can and will have outputs (i.e. exit code and result). +// In the case of a plugin, we assume it will have outputs because we cannot know at runtime. +func (tmpl *Template) HasOutput() bool { + return tmpl.Container != nil || tmpl.ContainerSet.HasContainerNamed("main") || tmpl.Script != nil || tmpl.Data != nil || tmpl.HTTP != nil || tmpl.Plugin != nil +} + +func (t *Template) IsDaemon() bool { + return t != nil && t.Daemon != nil && *t.Daemon +} + +// if logs should be saved as an artifact +func (tmpl *Template) SaveLogsAsArtifact() bool { + return tmpl != nil && tmpl.ArchiveLocation.IsArchiveLogs() +} + +func (t *Template) GetRetryStrategy() (wait.Backoff, error) { + return t.ContainerSet.GetRetryStrategy() +} + +func (t *Template) HasOutputs() bool { + return t != nil && t.Outputs.HasOutputs() +} + +// DAGTemplate is a template subtype for directed acyclic graph templates +type DAGTemplate struct { + // Target are one or more names of targets to execute in a DAG + Target string `json:"target,omitempty" protobuf:"bytes,1,opt,name=target"` + + // Tasks are a list of DAG tasks + // +patchStrategy=merge + // +patchMergeKey=name + Tasks []DAGTask `json:"tasks" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=tasks"` + + // This flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps, + // as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed + // before failing the DAG itself. + // The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to + // completion (either success or failure), regardless of the failed outcomes of branches in the DAG. + // More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442 + FailFast *bool `json:"failFast,omitempty" protobuf:"varint,3,opt,name=failFast"` +} + +// DAGTask represents a node in the graph during DAG execution +type DAGTask struct { + // Name is the name of the target + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Name of template to execute + Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` + + // Inline is the template. Template must be empty if this is declared (and vice-versa). + Inline *Template `json:"inline,omitempty" protobuf:"bytes,14,opt,name=inline"` + + // Arguments are the parameter and artifact arguments to the template + Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"` + + // TemplateRef is the reference to the template resource to execute. + TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,4,opt,name=templateRef"` + + // Dependencies are name of other targets which this depends on + Dependencies []string `json:"dependencies,omitempty" protobuf:"bytes,5,rep,name=dependencies"` + + // WithItems expands a task into multiple parallel tasks from the items in the list + WithItems []Item `json:"withItems,omitempty" protobuf:"bytes,6,rep,name=withItems"` + + // WithParam expands a task into multiple parallel tasks from the value in the parameter, + // which is expected to be a JSON list. + WithParam string `json:"withParam,omitempty" protobuf:"bytes,7,opt,name=withParam"` + + // WithSequence expands a task into a numeric sequence + WithSequence *Sequence `json:"withSequence,omitempty" protobuf:"bytes,8,opt,name=withSequence"` + + // When is an expression in which the task should conditionally execute + When string `json:"when,omitempty" protobuf:"bytes,9,opt,name=when"` + + // ContinueOn makes argo to proceed with the following step even if this step fails. + // Errors and Failed states can be specified + ContinueOn *ContinueOn `json:"continueOn,omitempty" protobuf:"bytes,10,opt,name=continueOn"` + + // OnExit is a template reference which is invoked at the end of the + // template, irrespective of the success, failure, or error of the + // primary template. + // DEPRECATED: Use Hooks[exit].Template instead. + OnExit string `json:"onExit,omitempty" protobuf:"bytes,11,opt,name=onExit"` + + // Depends are name of other targets which this depends on + Depends string `json:"depends,omitempty" protobuf:"bytes,12,opt,name=depends"` + + // Hooks hold the lifecycle hook which is invoked at lifecycle of + // task, irrespective of the success, failure, or error status of the primary task + Hooks LifecycleHooks `json:"hooks,omitempty" protobuf:"bytes,13,opt,name=hooks"` +} + +func (t *DAGTask) GetName() string { + return t.Name +} + +func (t *DAGTask) IsDAGTask() bool { + return true +} + +func (t *DAGTask) IsWorkflowStep() bool { + return false +} + +var _ TemplateReferenceHolder = &DAGTask{} + +func (t *DAGTask) GetExitHook(args Arguments) *LifecycleHook { + if !t.HasExitHook() { + return nil + } + if t.OnExit != "" { + return &LifecycleHook{Template: t.OnExit, Arguments: args} + } + return t.Hooks.GetExitHook().WithArgs(args) +} + +func (t *DAGTask) HasExitHook() bool { + return (t.Hooks != nil && t.Hooks.HasExitHook()) || t.OnExit != "" +} + +func (t *DAGTask) GetTemplate() *Template { + return t.Inline +} + +func (t *DAGTask) GetTemplateName() string { + return t.Template +} + +func (t *DAGTask) GetTemplateRef() *TemplateRef { + return t.TemplateRef +} + +func (t *DAGTask) ShouldExpand() bool { + return len(t.WithItems) != 0 || t.WithParam != "" || t.WithSequence != nil +} + +// SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time +type SuspendTemplate struct { + // Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. + // Could also be a Duration, e.g.: "2m", "6h" + Duration string `json:"duration,omitempty" protobuf:"bytes,1,opt,name=duration"` +} + +// GetArtifactByName returns an input artifact by its name +func (in *Inputs) GetArtifactByName(name string) *Artifact { + if in == nil { + return nil + } + return in.Artifacts.GetArtifactByName(name) +} + +// GetParameterByName returns an input parameter by its name +func (in *Inputs) GetParameterByName(name string) *Parameter { + for _, param := range in.Parameters { + if param.Name == name { + return ¶m + } + } + return nil +} + +// HasInputs returns whether or not there are any inputs +func (in *Inputs) HasInputs() bool { + if len(in.Artifacts) > 0 { + return true + } + if len(in.Parameters) > 0 { + return true + } + return false +} + +// HasOutputs returns whether or not there are any outputs +func (out *Outputs) HasOutputs() bool { + if out == nil { + return false + } + if out.Result != nil { + return true + } + if out.ExitCode != nil { + return true + } + if len(out.Artifacts) > 0 { + return true + } + if len(out.Parameters) > 0 { + return true + } + return false +} + +func (out *Outputs) GetArtifactByName(name string) *Artifact { + if out == nil { + return nil + } + return out.Artifacts.GetArtifactByName(name) +} + +func (out *Outputs) HasResult() bool { + return out != nil && out.Result != nil +} + +func (out *Outputs) HasArtifacts() bool { + return out != nil && len(out.Artifacts) > 0 +} + +func (out *Outputs) HasParameters() bool { + return out != nil && len(out.Parameters) > 0 +} + +const LogsSuffix = "-logs" + +func (out *Outputs) HasLogs() bool { + if out == nil { + return false + } + for _, a := range out.Artifacts { + if strings.HasSuffix(a.Name, LogsSuffix) { + return true + } + } + return false +} + +// GetArtifactByName retrieves an artifact by its name +func (args *Arguments) GetArtifactByName(name string) *Artifact { + return args.Artifacts.GetArtifactByName(name) +} + +// GetParameterByName retrieves a parameter by its name +func (args *Arguments) GetParameterByName(name string) *Parameter { + for _, param := range args.Parameters { + if param.Name == name { + return ¶m + } + } + return nil +} + +func (a *Artifact) GetArchive() *ArchiveStrategy { + if a == nil || a.Archive == nil { + return &ArchiveStrategy{} + } + return a.Archive +} + +// GetTemplateByName retrieves a defined template by its name +func (wf *Workflow) GetTemplateByName(name string) *Template { + for _, t := range wf.Spec.Templates { + if t.Name == name { + return &t + } + } + if wf.Status.StoredWorkflowSpec != nil { + for _, t := range wf.Status.StoredWorkflowSpec.Templates { + if t.Name == name { + return &t + } + } + } + for _, t := range wf.Status.StoredTemplates { + if t.Name == name { + return &t + } + } + return nil +} + +func (wf *Workflow) GetNodeByName(nodeName string) (*NodeStatus, error) { + nodeID := wf.NodeID(nodeName) + return wf.Status.Nodes.Get(nodeID) +} + +// GetResourceScope returns the template scope of workflow. +func (wf *Workflow) GetResourceScope() ResourceScope { + return ResourceScopeLocal +} + +// GetWorkflowSpec returns the Spec of a workflow. +func (wf *Workflow) GetWorkflowSpec() WorkflowSpec { + return wf.Spec +} + +// NodeID creates a deterministic node ID based on a node name +func (wf *Workflow) NodeID(name string) string { + if name == wf.ObjectMeta.Name { + return wf.ObjectMeta.Name + } + h := fnv.New32a() + _, _ = h.Write([]byte(name)) + return fmt.Sprintf("%s-%v", wf.ObjectMeta.Name, h.Sum32()) +} + +// GetStoredTemplate retrieves a template from stored templates of the workflow. +func (wf *Workflow) GetStoredTemplate(scope ResourceScope, resourceName string, caller TemplateReferenceHolder) *Template { + tmplID, storageNeeded := resolveTemplateReference(scope, resourceName, caller) + if !storageNeeded { + // Local templates aren't stored + return nil + } + if tmpl, ok := wf.Status.StoredTemplates[tmplID]; ok { + return tmpl.DeepCopy() + } + return nil +} + +// SetStoredTemplate stores a new template in stored templates of the workflow. +func (wf *Workflow) SetStoredTemplate(scope ResourceScope, resourceName string, caller TemplateReferenceHolder, tmpl *Template) (bool, error) { + tmplID, storageNeeded := resolveTemplateReference(scope, resourceName, caller) + if !storageNeeded { + // Don't need to store local templates + return false, nil + } + if _, ok := wf.Status.StoredTemplates[tmplID]; !ok { + if wf.Status.StoredTemplates == nil { + wf.Status.StoredTemplates = map[string]Template{} + } + wf.Status.StoredTemplates[tmplID] = *tmpl + return true, nil + } + return false, nil +} + +// SetStoredInlineTemplate stores a inline template in stored templates of the workflow. +func (wf *Workflow) SetStoredInlineTemplate(scope ResourceScope, resourceName string, tmpl *Template) error { + // Store inline templates in steps. + for _, steps := range tmpl.Steps { + for _, step := range steps.Steps { + if step.GetTemplate() != nil { + _, err := wf.SetStoredTemplate(scope, resourceName, &step, step.GetTemplate()) + if err != nil { + return err + } + } + } + } + // Store inline templates in DAG tasks. + if tmpl.DAG != nil { + for _, task := range tmpl.DAG.Tasks { + if task.GetTemplate() != nil { + _, err := wf.SetStoredTemplate(scope, resourceName, &task, task.GetTemplate()) + if err != nil { + return err + } + } + } + } + + return nil +} + +// resolveTemplateReference resolves the stored template name of a given template holder on the template scope and determines +// if it should be stored +func resolveTemplateReference(callerScope ResourceScope, resourceName string, caller TemplateReferenceHolder) (string, bool) { + tmplRef := caller.GetTemplateRef() + if tmplRef != nil { + // We are calling an external WorkflowTemplate or ClusterWorkflowTemplate. Template storage is needed + // We need to determine if we're calling a WorkflowTemplate or a ClusterWorkflowTemplate + referenceScope := ResourceScopeNamespaced + if tmplRef.ClusterScope { + referenceScope = ResourceScopeCluster + } + return fmt.Sprintf("%s/%s/%s", referenceScope, tmplRef.Name, tmplRef.Template), true + } else if callerScope != ResourceScopeLocal { + // Either a WorkflowTemplate or a ClusterWorkflowTemplate is calling a template inside itself. Template storage is needed + if caller.GetTemplate() != nil { + // If we have an inlined template here, use the inlined name + return fmt.Sprintf("%s/%s/inline/%s", callerScope, resourceName, caller.GetName()), true + } + return fmt.Sprintf("%s/%s/%s", callerScope, resourceName, caller.GetTemplateName()), true + } else { + // A Workflow is calling a template inside itself. Template storage is not needed + return "", false + } +} + +// ContinueOn defines if a workflow should continue even if a task or step fails/errors. +// It can be specified if the workflow should continue when the pod errors, fails or both. +type ContinueOn struct { + // +optional + Error bool `json:"error,omitempty" protobuf:"varint,1,opt,name=error"` + // +optional + Failed bool `json:"failed,omitempty" protobuf:"varint,2,opt,name=failed"` +} + +func continues(c *ContinueOn, phase NodePhase) bool { + if c == nil { + return false + } + if c.Error && phase == NodeError { + return true + } + if c.Failed && phase == NodeFailed { + return true + } + return false +} + +// ContinuesOn returns whether the DAG should be proceeded if the task fails or errors. +func (t *DAGTask) ContinuesOn(phase NodePhase) bool { + return continues(t.ContinueOn, phase) +} + +// ContinuesOn returns whether the StepGroup should be proceeded if the task fails or errors. +func (s *WorkflowStep) ContinuesOn(phase NodePhase) bool { + return continues(s.ContinueOn, phase) +} + +type MetricType string + +const ( + MetricTypeGauge MetricType = "Gauge" + MetricTypeHistogram MetricType = "Histogram" + MetricTypeCounter MetricType = "Counter" + MetricTypeUnknown MetricType = "Unknown" +) + +// Metrics are a list of metrics emitted from a Workflow/Template +type Metrics struct { + // Prometheus is a list of prometheus metrics to be emitted + Prometheus []*Prometheus `json:"prometheus" protobuf:"bytes,1,rep,name=prometheus"` +} + +// Prometheus is a prometheus metric to be emitted +type Prometheus struct { + // Name is the name of the metric + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Labels is a list of metric labels + Labels []*MetricLabel `json:"labels,omitempty" protobuf:"bytes,2,rep,name=labels"` + // Help is a string that describes the metric + Help string `json:"help" protobuf:"bytes,3,opt,name=help"` + // When is a conditional statement that decides when to emit the metric + When string `json:"when,omitempty" protobuf:"bytes,4,opt,name=when"` + // Gauge is a gauge metric + Gauge *Gauge `json:"gauge,omitempty" protobuf:"bytes,5,opt,name=gauge"` + // Histogram is a histogram metric + Histogram *Histogram `json:"histogram,omitempty" protobuf:"bytes,6,opt,name=histogram"` + // Counter is a counter metric + Counter *Counter `json:"counter,omitempty" protobuf:"bytes,7,opt,name=counter"` +} + +func (p *Prometheus) GetMetricLabels() map[string]string { + labels := make(map[string]string) + for _, label := range p.Labels { + labels[label.Key] = label.Value + } + return labels +} + +func (p *Prometheus) GetMetricType() MetricType { + if p.Gauge != nil { + return MetricTypeGauge + } + if p.Histogram != nil { + return MetricTypeHistogram + } + if p.Counter != nil { + return MetricTypeCounter + } + return MetricTypeUnknown +} + +func (p *Prometheus) GetValueString() string { + switch p.GetMetricType() { + case MetricTypeGauge: + return p.Gauge.Value + case MetricTypeCounter: + return p.Counter.Value + case MetricTypeHistogram: + return p.Histogram.Value + default: + return "" + } +} + +func (p *Prometheus) SetValueString(val string) { + switch p.GetMetricType() { + case MetricTypeGauge: + p.Gauge.Value = val + case MetricTypeCounter: + p.Counter.Value = val + case MetricTypeHistogram: + p.Histogram.Value = val + } +} + +func (p *Prometheus) GetKey() string { + // This serves as a hash for the metric + // TODO: Make sure this is what we want to use as the hash + labels := p.GetMetricLabels() + desc := p.Name + "{" + for _, key := range sortedMapStringStringKeys(labels) { + desc += key + "=" + labels[key] + "," + } + if p.Histogram != nil { + sortedBuckets := p.Histogram.GetBuckets() + sort.Float64s(sortedBuckets) + for _, bucket := range sortedBuckets { + desc += "bucket=" + fmt.Sprint(bucket) + "," + } + } + desc += "}" + return desc +} + +func sortedMapStringStringKeys(in map[string]string) []string { + var stringList []string + for key := range in { + stringList = append(stringList, key) + } + sort.Strings(stringList) + return stringList +} + +func (p *Prometheus) IsRealtime() bool { + return p.GetMetricType() == MetricTypeGauge && p.Gauge.Realtime != nil && *p.Gauge.Realtime +} + +// MetricLabel is a single label for a prometheus metric +type MetricLabel struct { + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + Value string `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +// Gauge is a Gauge prometheus metric +type Gauge struct { + // Value is the value to be used in the operation with the metric's current value. If no operation is set, + // value is the value of the metric + Value string `json:"value" protobuf:"bytes,1,opt,name=value"` + // Realtime emits this metric in real time if applicable + Realtime *bool `json:"realtime" protobuf:"varint,2,opt,name=realtime"` + // Operation defines the operation to apply with value and the metrics' current value + // +optional + Operation GaugeOperation `json:"operation,omitempty" protobuf:"bytes,3,opt,name=operation"` +} + +// A GaugeOperation is the set of operations that can be used in a gauge metric. +type GaugeOperation string + +const ( + GaugeOperationSet GaugeOperation = "Set" + GaugeOperationAdd GaugeOperation = "Add" + GaugeOperationSub GaugeOperation = "Sub" +) + +// Histogram is a Histogram prometheus metric +type Histogram struct { + // Value is the value of the metric + Value string `json:"value" protobuf:"bytes,3,opt,name=value"` + // Buckets is a list of bucket divisors for the histogram + Buckets []Amount `json:"buckets" protobuf:"bytes,4,rep,name=buckets"` +} + +func (in *Histogram) GetBuckets() []float64 { + buckets := make([]float64, len(in.Buckets)) + for i, bucket := range in.Buckets { + buckets[i], _ = bucket.Float64() + } + return buckets +} + +// Counter is a Counter prometheus metric +type Counter struct { + // Value is the value of the metric + Value string `json:"value" protobuf:"bytes,1,opt,name=value"` +} + +// Memoization enables caching for the Outputs of the template +type Memoize struct { + // Key is the key to use as the caching key + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // Cache sets and configures the kind of cache + Cache *Cache `json:"cache" protobuf:"bytes,2,opt,name=cache"` + // MaxAge is the maximum age (e.g. "180s", "24h") of an entry that is still considered valid. If an entry is older + // than the MaxAge, it will be ignored. + MaxAge string `json:"maxAge" protobuf:"bytes,3,opt,name=maxAge"` +} + +// MemoizationStatus is the status of this memoized node +type MemoizationStatus struct { + // Hit indicates whether this node was created from a cache entry + Hit bool `json:"hit" protobuf:"bytes,1,opt,name=hit"` + // Key is the name of the key used for this node's cache + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Cache is the name of the cache that was used + CacheName string `json:"cacheName" protobuf:"bytes,3,opt,name=cacheName"` +} + +// Cache is the configuration for the type of cache to be used +type Cache struct { + // ConfigMap sets a ConfigMap-based cache + ConfigMap *apiv1.ConfigMapKeySelector `json:"configMap" protobuf:"bytes,1,opt,name=configMap"` +} + +type SynchronizationAction interface { + LockWaiting(holderKey, lockKey string, currentHolders []string) bool + LockAcquired(holderKey, lockKey string, currentHolders []string) bool + LockReleased(holderKey, lockKey string) bool +} + +type SemaphoreHolding struct { + // Semaphore stores the semaphore name. + Semaphore string `json:"semaphore,omitempty" protobuf:"bytes,1,opt,name=semaphore"` + // Holders stores the list of current holder names in the workflow. + // +listType=atomic + Holders []string `json:"holders,omitempty" protobuf:"bytes,2,opt,name=holders"` +} + +type SemaphoreStatus struct { + // Holding stores the list of resource acquired synchronization lock for workflows. + Holding []SemaphoreHolding `json:"holding,omitempty" protobuf:"bytes,1,opt,name=holding"` + // Waiting indicates the list of current synchronization lock holders. + Waiting []SemaphoreHolding `json:"waiting,omitempty" protobuf:"bytes,2,opt,name=waiting"` +} + +var _ SynchronizationAction = &SemaphoreStatus{} + +func (ss *SemaphoreStatus) GetHolding(semaphoreName string) (int, SemaphoreHolding) { + for i, holder := range ss.Holding { + if holder.Semaphore == semaphoreName { + return i, holder + } + } + return -1, SemaphoreHolding{} +} + +func (ss *SemaphoreStatus) GetWaiting(semaphoreName string) (int, SemaphoreHolding) { + for i, holder := range ss.Waiting { + if holder.Semaphore == semaphoreName { + return i, holder + } + } + return -1, SemaphoreHolding{} +} + +func (ss *SemaphoreStatus) LockWaiting(holderKey, lockKey string, currentHolders []string) bool { + i, semaphoreWaiting := ss.GetWaiting(lockKey) + if i < 0 { + ss.Waiting = append(ss.Waiting, SemaphoreHolding{Semaphore: lockKey, Holders: currentHolders}) + } else { + semaphoreWaiting.Holders = currentHolders + ss.Waiting[i] = semaphoreWaiting + } + return true +} + +func (ss *SemaphoreStatus) LockAcquired(holderKey, lockKey string, currentHolders []string) bool { + i, semaphoreHolding := ss.GetHolding(lockKey) + holdingName := holderKey + if i < 0 { + ss.Holding = append(ss.Holding, SemaphoreHolding{Semaphore: lockKey, Holders: []string{holdingName}}) + return true + } else if !slices.Contains(semaphoreHolding.Holders, holdingName) { + semaphoreHolding.Holders = append(semaphoreHolding.Holders, holdingName) + ss.Holding[i] = semaphoreHolding + return true + } + return false +} + +func (ss *SemaphoreStatus) LockReleased(holderKey, lockKey string) bool { + i, semaphoreHolding := ss.GetHolding(lockKey) + holdingName := holderKey + + if i >= 0 { + semaphoreHolding.Holders = slices.DeleteFunc(semaphoreHolding.Holders, + func(x string) bool { return x == holdingName }) + ss.Holding[i] = semaphoreHolding + return true + } + return false +} + +// MutexHolding describes the mutex and the object which is holding it. +type MutexHolding struct { + // Reference for the mutex + // e.g: ${namespace}/mutex/${mutexName} + Mutex string `json:"mutex,omitempty" protobuf:"bytes,1,opt,name=mutex"` + // Holder is a reference to the object which holds the Mutex. + // Holding Scenario: + // 1. Current workflow's NodeID which is holding the lock. + // e.g: ${NodeID} + // Waiting Scenario: + // 1. Current workflow or other workflow NodeID which is holding the lock. + // e.g: ${WorkflowName}/${NodeID} + Holder string `json:"holder,omitempty" protobuf:"bytes,2,opt,name=holder"` +} + +// MutexStatus contains which objects hold mutex locks, and which objects this workflow is waiting on to release locks. +type MutexStatus struct { + // Holding is a list of mutexes and their respective objects that are held by mutex lock for this workflow. + // +listType=atomic + Holding []MutexHolding `json:"holding,omitempty" protobuf:"bytes,1,opt,name=holding"` + // Waiting is a list of mutexes and their respective objects this workflow is waiting for. + // +listType=atomic + Waiting []MutexHolding `json:"waiting,omitempty" protobuf:"bytes,2,opt,name=waiting"` +} + +var _ SynchronizationAction = &MutexStatus{} + +func (ms *MutexStatus) GetHolding(mutexName string) (int, MutexHolding) { + for i, holder := range ms.Holding { + if holder.Mutex == mutexName { + return i, holder + } + } + return -1, MutexHolding{} +} + +func (ms *MutexStatus) GetWaiting(mutexName string) (int, MutexHolding) { + for i, holder := range ms.Waiting { + if holder.Mutex == mutexName { + return i, holder + } + } + return -1, MutexHolding{} +} + +func (ms *MutexStatus) LockWaiting(holderKey, lockKey string, currentHolders []string) bool { + if len(currentHolders) == 0 { + return false + } + + i, mutexWaiting := ms.GetWaiting(lockKey) + if i < 0 { + ms.Waiting = append(ms.Waiting, MutexHolding{Mutex: lockKey, Holder: currentHolders[0]}) + return true + } else if mutexWaiting.Holder != currentHolders[0] { + mutexWaiting.Holder = currentHolders[0] + ms.Waiting[i] = mutexWaiting + return true + } + return false +} + +func CheckHolderKeyVersion(holderKey string) HoldingNameVersion { + items := strings.Split(holderKey, "/") + if len(items) == 2 || len(items) == 3 { + return HoldingNameV2 + } + return HoldingNameV1 +} + +func (ms *MutexStatus) LockAcquired(holderKey, lockKey string, currentHolders []string) bool { + i, mutexHolding := ms.GetHolding(lockKey) + holdingName := holderKey + if i < 0 { + ms.Holding = append(ms.Holding, MutexHolding{Mutex: lockKey, Holder: holdingName}) + return true + } else if mutexHolding.Holder != holdingName { + mutexHolding.Holder = holdingName + ms.Holding[i] = mutexHolding + return true + } + return false +} + +func (ms *MutexStatus) LockReleased(holderKey, lockKey string) bool { + i, holder := ms.GetHolding(lockKey) + holdingName := holderKey + if i >= 0 && holder.Holder == holdingName { + ms.Holding = append(ms.Holding[:i], ms.Holding[i+1:]...) + return true + } + return false +} + +// SynchronizationStatus stores the status of semaphore and mutex. +type SynchronizationStatus struct { + // Semaphore stores this workflow's Semaphore holder details + Semaphore *SemaphoreStatus `json:"semaphore,omitempty" protobuf:"bytes,1,opt,name=semaphore"` + // Mutex stores this workflow's mutex holder details + Mutex *MutexStatus `json:"mutex,omitempty" protobuf:"bytes,2,opt,name=mutex"` +} + +type SynchronizationType string + +const ( + SynchronizationTypeSemaphore SynchronizationType = "Semaphore" + SynchronizationTypeMutex SynchronizationType = "Mutex" + SynchronizationTypeUnknown SynchronizationType = "Unknown" +) + +func (ss *SynchronizationStatus) GetStatus(syncType SynchronizationType) SynchronizationAction { + switch syncType { + case SynchronizationTypeSemaphore: + return ss.Semaphore + case SynchronizationTypeMutex: + return ss.Mutex + default: + panic("invalid syncType in GetStatus") + } +} + +// NodeSynchronizationStatus stores the status of a node +type NodeSynchronizationStatus struct { + // Waiting is the name of the lock that this node is waiting for + Waiting string `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"` +} + +type NodeFlag struct { + // Hooked tracks whether or not this node was triggered by hook or onExit + Hooked bool `json:"hooked,omitempty" protobuf:"varint,1,opt,name=hooked"` + // Retried tracks whether or not this node was retried by retryStrategy + Retried bool `json:"retried,omitempty" protobuf:"varint,2,opt,name=retried"` +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..7592ca05 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,4385 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + json "encoding/json" + + v1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Amount) DeepCopyInto(out *Amount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Amount. +func (in *Amount) DeepCopy() *Amount { + if in == nil { + return nil + } + out := new(Amount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveStrategy) DeepCopyInto(out *ArchiveStrategy) { + *out = *in + if in.Tar != nil { + in, out := &in.Tar, &out.Tar + *out = new(TarStrategy) + (*in).DeepCopyInto(*out) + } + if in.None != nil { + in, out := &in.None, &out.None + *out = new(NoneStrategy) + **out = **in + } + if in.Zip != nil { + in, out := &in.Zip, &out.Zip + *out = new(ZipStrategy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveStrategy. +func (in *ArchiveStrategy) DeepCopy() *ArchiveStrategy { + if in == nil { + return nil + } + out := new(ArchiveStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Arguments) DeepCopyInto(out *Arguments) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]Parameter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifacts != nil { + in, out := &in.Artifacts, &out.Artifacts + *out = make(Artifacts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Arguments. +func (in *Arguments) DeepCopy() *Arguments { + if in == nil { + return nil + } + out := new(Arguments) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtGCStatus) DeepCopyInto(out *ArtGCStatus) { + *out = *in + if in.StrategiesProcessed != nil { + in, out := &in.StrategiesProcessed, &out.StrategiesProcessed + *out = make(map[ArtifactGCStrategy]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PodsRecouped != nil { + in, out := &in.PodsRecouped, &out.PodsRecouped + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtGCStatus. +func (in *ArtGCStatus) DeepCopy() *ArtGCStatus { + if in == nil { + return nil + } + out := new(ArtGCStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Artifact) DeepCopyInto(out *Artifact) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + in.ArtifactLocation.DeepCopyInto(&out.ArtifactLocation) + if in.Archive != nil { + in, out := &in.Archive, &out.Archive + *out = new(ArchiveStrategy) + (*in).DeepCopyInto(*out) + } + if in.ArtifactGC != nil { + in, out := &in.ArtifactGC, &out.ArtifactGC + *out = new(ArtifactGC) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact. +func (in *Artifact) DeepCopy() *Artifact { + if in == nil { + return nil + } + out := new(Artifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactGC) DeepCopyInto(out *ArtifactGC) { + *out = *in + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(Metadata) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactGC. +func (in *ArtifactGC) DeepCopy() *ArtifactGC { + if in == nil { + return nil + } + out := new(ArtifactGC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactGCSpec) DeepCopyInto(out *ArtifactGCSpec) { + *out = *in + if in.ArtifactsByNode != nil { + in, out := &in.ArtifactsByNode, &out.ArtifactsByNode + *out = make(map[string]ArtifactNodeSpec, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactGCSpec. +func (in *ArtifactGCSpec) DeepCopy() *ArtifactGCSpec { + if in == nil { + return nil + } + out := new(ArtifactGCSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactGCStatus) DeepCopyInto(out *ArtifactGCStatus) { + *out = *in + if in.ArtifactResultsByNode != nil { + in, out := &in.ArtifactResultsByNode, &out.ArtifactResultsByNode + *out = make(map[string]ArtifactResultNodeStatus, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactGCStatus. +func (in *ArtifactGCStatus) DeepCopy() *ArtifactGCStatus { + if in == nil { + return nil + } + out := new(ArtifactGCStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactLocation) DeepCopyInto(out *ArtifactLocation) { + *out = *in + if in.ArchiveLogs != nil { + in, out := &in.ArchiveLogs, &out.ArchiveLogs + *out = new(bool) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Artifact) + (*in).DeepCopyInto(*out) + } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitArtifact) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPArtifact) + (*in).DeepCopyInto(*out) + } + if in.Artifactory != nil { + in, out := &in.Artifactory, &out.Artifactory + *out = new(ArtifactoryArtifact) + (*in).DeepCopyInto(*out) + } + if in.HDFS != nil { + in, out := &in.HDFS, &out.HDFS + *out = new(HDFSArtifact) + (*in).DeepCopyInto(*out) + } + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = new(RawArtifact) + **out = **in + } + if in.OSS != nil { + in, out := &in.OSS, &out.OSS + *out = new(OSSArtifact) + (*in).DeepCopyInto(*out) + } + if in.GCS != nil { + in, out := &in.GCS, &out.GCS + *out = new(GCSArtifact) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureArtifact) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactLocation. +func (in *ArtifactLocation) DeepCopy() *ArtifactLocation { + if in == nil { + return nil + } + out := new(ArtifactLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactNodeSpec) DeepCopyInto(out *ArtifactNodeSpec) { + *out = *in + if in.ArchiveLocation != nil { + in, out := &in.ArchiveLocation, &out.ArchiveLocation + *out = new(ArtifactLocation) + (*in).DeepCopyInto(*out) + } + if in.Artifacts != nil { + in, out := &in.Artifacts, &out.Artifacts + *out = make(map[string]Artifact, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactNodeSpec. +func (in *ArtifactNodeSpec) DeepCopy() *ArtifactNodeSpec { + if in == nil { + return nil + } + out := new(ArtifactNodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactPaths) DeepCopyInto(out *ArtifactPaths) { + *out = *in + in.Artifact.DeepCopyInto(&out.Artifact) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactPaths. +func (in *ArtifactPaths) DeepCopy() *ArtifactPaths { + if in == nil { + return nil + } + out := new(ArtifactPaths) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactRepository) DeepCopyInto(out *ArtifactRepository) { + *out = *in + if in.ArchiveLogs != nil { + in, out := &in.ArchiveLogs, &out.ArchiveLogs + *out = new(bool) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3ArtifactRepository) + (*in).DeepCopyInto(*out) + } + if in.Artifactory != nil { + in, out := &in.Artifactory, &out.Artifactory + *out = new(ArtifactoryArtifactRepository) + (*in).DeepCopyInto(*out) + } + if in.HDFS != nil { + in, out := &in.HDFS, &out.HDFS + *out = new(HDFSArtifactRepository) + (*in).DeepCopyInto(*out) + } + if in.OSS != nil { + in, out := &in.OSS, &out.OSS + *out = new(OSSArtifactRepository) + (*in).DeepCopyInto(*out) + } + if in.GCS != nil { + in, out := &in.GCS, &out.GCS + *out = new(GCSArtifactRepository) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureArtifactRepository) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactRepository. +func (in *ArtifactRepository) DeepCopy() *ArtifactRepository { + if in == nil { + return nil + } + out := new(ArtifactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactRepositoryRef) DeepCopyInto(out *ArtifactRepositoryRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactRepositoryRef. +func (in *ArtifactRepositoryRef) DeepCopy() *ArtifactRepositoryRef { + if in == nil { + return nil + } + out := new(ArtifactRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactRepositoryRefStatus) DeepCopyInto(out *ArtifactRepositoryRefStatus) { + *out = *in + out.ArtifactRepositoryRef = in.ArtifactRepositoryRef + if in.ArtifactRepository != nil { + in, out := &in.ArtifactRepository, &out.ArtifactRepository + *out = new(ArtifactRepository) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactRepositoryRefStatus. +func (in *ArtifactRepositoryRefStatus) DeepCopy() *ArtifactRepositoryRefStatus { + if in == nil { + return nil + } + out := new(ArtifactRepositoryRefStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactResult) DeepCopyInto(out *ArtifactResult) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactResult. +func (in *ArtifactResult) DeepCopy() *ArtifactResult { + if in == nil { + return nil + } + out := new(ArtifactResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactResultNodeStatus) DeepCopyInto(out *ArtifactResultNodeStatus) { + *out = *in + if in.ArtifactResults != nil { + in, out := &in.ArtifactResults, &out.ArtifactResults + *out = make(map[string]ArtifactResult, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactResultNodeStatus. +func (in *ArtifactResultNodeStatus) DeepCopy() *ArtifactResultNodeStatus { + if in == nil { + return nil + } + out := new(ArtifactResultNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactSearchQuery) DeepCopyInto(out *ArtifactSearchQuery) { + *out = *in + if in.ArtifactGCStrategies != nil { + in, out := &in.ArtifactGCStrategies, &out.ArtifactGCStrategies + *out = make(map[ArtifactGCStrategy]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Deleted != nil { + in, out := &in.Deleted, &out.Deleted + *out = new(bool) + **out = **in + } + if in.NodeTypes != nil { + in, out := &in.NodeTypes, &out.NodeTypes + *out = make(map[NodeType]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactSearchQuery. +func (in *ArtifactSearchQuery) DeepCopy() *ArtifactSearchQuery { + if in == nil { + return nil + } + out := new(ArtifactSearchQuery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactSearchResult) DeepCopyInto(out *ArtifactSearchResult) { + *out = *in + in.Artifact.DeepCopyInto(&out.Artifact) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactSearchResult. +func (in *ArtifactSearchResult) DeepCopy() *ArtifactSearchResult { + if in == nil { + return nil + } + out := new(ArtifactSearchResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ArtifactSearchResults) DeepCopyInto(out *ArtifactSearchResults) { + { + in := &in + *out = make(ArtifactSearchResults, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactSearchResults. +func (in ArtifactSearchResults) DeepCopy() ArtifactSearchResults { + if in == nil { + return nil + } + out := new(ArtifactSearchResults) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactoryArtifact) DeepCopyInto(out *ArtifactoryArtifact) { + *out = *in + in.ArtifactoryAuth.DeepCopyInto(&out.ArtifactoryAuth) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactoryArtifact. +func (in *ArtifactoryArtifact) DeepCopy() *ArtifactoryArtifact { + if in == nil { + return nil + } + out := new(ArtifactoryArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactoryArtifactRepository) DeepCopyInto(out *ArtifactoryArtifactRepository) { + *out = *in + in.ArtifactoryAuth.DeepCopyInto(&out.ArtifactoryAuth) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactoryArtifactRepository. +func (in *ArtifactoryArtifactRepository) DeepCopy() *ArtifactoryArtifactRepository { + if in == nil { + return nil + } + out := new(ArtifactoryArtifactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactoryAuth) DeepCopyInto(out *ArtifactoryAuth) { + *out = *in + if in.UsernameSecret != nil { + in, out := &in.UsernameSecret, &out.UsernameSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecret != nil { + in, out := &in.PasswordSecret, &out.PasswordSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactoryAuth. +func (in *ArtifactoryAuth) DeepCopy() *ArtifactoryAuth { + if in == nil { + return nil + } + out := new(ArtifactoryAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Artifacts) DeepCopyInto(out *Artifacts) { + { + in := &in + *out = make(Artifacts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifacts. +func (in Artifacts) DeepCopy() Artifacts { + if in == nil { + return nil + } + out := new(Artifacts) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureArtifact) DeepCopyInto(out *AzureArtifact) { + *out = *in + in.AzureBlobContainer.DeepCopyInto(&out.AzureBlobContainer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureArtifact. +func (in *AzureArtifact) DeepCopy() *AzureArtifact { + if in == nil { + return nil + } + out := new(AzureArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureArtifactRepository) DeepCopyInto(out *AzureArtifactRepository) { + *out = *in + in.AzureBlobContainer.DeepCopyInto(&out.AzureBlobContainer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureArtifactRepository. +func (in *AzureArtifactRepository) DeepCopy() *AzureArtifactRepository { + if in == nil { + return nil + } + out := new(AzureArtifactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBlobContainer) DeepCopyInto(out *AzureBlobContainer) { + *out = *in + if in.AccountKeySecret != nil { + in, out := &in.AccountKeySecret, &out.AccountKeySecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobContainer. +func (in *AzureBlobContainer) DeepCopy() *AzureBlobContainer { + if in == nil { + return nil + } + out := new(AzureBlobContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backoff) DeepCopyInto(out *Backoff) { + *out = *in + if in.Factor != nil { + in, out := &in.Factor, &out.Factor + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backoff. +func (in *Backoff) DeepCopy() *Backoff { + if in == nil { + return nil + } + out := new(Backoff) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuth) DeepCopyInto(out *BasicAuth) { + *out = *in + if in.UsernameSecret != nil { + in, out := &in.UsernameSecret, &out.UsernameSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecret != nil { + in, out := &in.PasswordSecret, &out.PasswordSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuth. +func (in *BasicAuth) DeepCopy() *BasicAuth { + if in == nil { + return nil + } + out := new(BasicAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cache) DeepCopyInto(out *Cache) { + *out = *in + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(v1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cache. +func (in *Cache) DeepCopy() *Cache { + if in == nil { + return nil + } + out := new(Cache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientCertAuth) DeepCopyInto(out *ClientCertAuth) { + *out = *in + if in.ClientCertSecret != nil { + in, out := &in.ClientCertSecret, &out.ClientCertSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ClientKeySecret != nil { + in, out := &in.ClientKeySecret, &out.ClientKeySecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientCertAuth. +func (in *ClientCertAuth) DeepCopy() *ClientCertAuth { + if in == nil { + return nil + } + out := new(ClientCertAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterWorkflowTemplate) DeepCopyInto(out *ClusterWorkflowTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkflowTemplate. +func (in *ClusterWorkflowTemplate) DeepCopy() *ClusterWorkflowTemplate { + if in == nil { + return nil + } + out := new(ClusterWorkflowTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterWorkflowTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterWorkflowTemplateList) DeepCopyInto(out *ClusterWorkflowTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make(ClusterWorkflowTemplates, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkflowTemplateList. +func (in *ClusterWorkflowTemplateList) DeepCopy() *ClusterWorkflowTemplateList { + if in == nil { + return nil + } + out := new(ClusterWorkflowTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterWorkflowTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ClusterWorkflowTemplates) DeepCopyInto(out *ClusterWorkflowTemplates) { + { + in := &in + *out = make(ClusterWorkflowTemplates, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkflowTemplates. +func (in ClusterWorkflowTemplates) DeepCopy() ClusterWorkflowTemplates { + if in == nil { + return nil + } + out := new(ClusterWorkflowTemplates) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Column) DeepCopyInto(out *Column) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Column. +func (in *Column) DeepCopy() *Column { + if in == nil { + return nil + } + out := new(Column) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerNode) DeepCopyInto(out *ContainerNode) { + *out = *in + in.Container.DeepCopyInto(&out.Container) + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNode. +func (in *ContainerNode) DeepCopy() *ContainerNode { + if in == nil { + return nil + } + out := new(ContainerNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSetRetryStrategy) DeepCopyInto(out *ContainerSetRetryStrategy) { + *out = *in + if in.Retries != nil { + in, out := &in.Retries, &out.Retries + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSetRetryStrategy. +func (in *ContainerSetRetryStrategy) DeepCopy() *ContainerSetRetryStrategy { + if in == nil { + return nil + } + out := new(ContainerSetRetryStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSetTemplate) DeepCopyInto(out *ContainerSetTemplate) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ContainerNode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetryStrategy != nil { + in, out := &in.RetryStrategy, &out.RetryStrategy + *out = new(ContainerSetRetryStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSetTemplate. +func (in *ContainerSetTemplate) DeepCopy() *ContainerSetTemplate { + if in == nil { + return nil + } + out := new(ContainerSetTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContinueOn) DeepCopyInto(out *ContinueOn) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContinueOn. +func (in *ContinueOn) DeepCopy() *ContinueOn { + if in == nil { + return nil + } + out := new(ContinueOn) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Counter) DeepCopyInto(out *Counter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Counter. +func (in *Counter) DeepCopy() *Counter { + if in == nil { + return nil + } + out := new(Counter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreateS3BucketOptions) DeepCopyInto(out *CreateS3BucketOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateS3BucketOptions. +func (in *CreateS3BucketOptions) DeepCopy() *CreateS3BucketOptions { + if in == nil { + return nil + } + out := new(CreateS3BucketOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronWorkflow) DeepCopyInto(out *CronWorkflow) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronWorkflow. +func (in *CronWorkflow) DeepCopy() *CronWorkflow { + if in == nil { + return nil + } + out := new(CronWorkflow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronWorkflow) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronWorkflowList) DeepCopyInto(out *CronWorkflowList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronWorkflow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronWorkflowList. +func (in *CronWorkflowList) DeepCopy() *CronWorkflowList { + if in == nil { + return nil + } + out := new(CronWorkflowList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronWorkflowList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronWorkflowSpec) DeepCopyInto(out *CronWorkflowSpec) { + *out = *in + in.WorkflowSpec.DeepCopyInto(&out.WorkflowSpec) + if in.StartingDeadlineSeconds != nil { + in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.WorkflowMetadata != nil { + in, out := &in.WorkflowMetadata, &out.WorkflowMetadata + *out = new(metav1.ObjectMeta) + (*in).DeepCopyInto(*out) + } + if in.StopStrategy != nil { + in, out := &in.StopStrategy, &out.StopStrategy + *out = new(StopStrategy) + **out = **in + } + if in.Schedules != nil { + in, out := &in.Schedules, &out.Schedules + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronWorkflowSpec. +func (in *CronWorkflowSpec) DeepCopy() *CronWorkflowSpec { + if in == nil { + return nil + } + out := new(CronWorkflowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronWorkflowStatus) DeepCopyInto(out *CronWorkflowStatus) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.LastScheduledTime != nil { + in, out := &in.LastScheduledTime, &out.LastScheduledTime + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronWorkflowStatus. +func (in *CronWorkflowStatus) DeepCopy() *CronWorkflowStatus { + if in == nil { + return nil + } + out := new(CronWorkflowStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DAGTask) DeepCopyInto(out *DAGTask) { + *out = *in + if in.Inline != nil { + in, out := &in.Inline, &out.Inline + *out = new(Template) + (*in).DeepCopyInto(*out) + } + in.Arguments.DeepCopyInto(&out.Arguments) + if in.TemplateRef != nil { + in, out := &in.TemplateRef, &out.TemplateRef + *out = new(TemplateRef) + **out = **in + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.WithItems != nil { + in, out := &in.WithItems, &out.WithItems + *out = make([]Item, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WithSequence != nil { + in, out := &in.WithSequence, &out.WithSequence + *out = new(Sequence) + (*in).DeepCopyInto(*out) + } + if in.ContinueOn != nil { + in, out := &in.ContinueOn, &out.ContinueOn + *out = new(ContinueOn) + **out = **in + } + if in.Hooks != nil { + in, out := &in.Hooks, &out.Hooks + *out = make(LifecycleHooks, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DAGTask. +func (in *DAGTask) DeepCopy() *DAGTask { + if in == nil { + return nil + } + out := new(DAGTask) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DAGTemplate) DeepCopyInto(out *DAGTemplate) { + *out = *in + if in.Tasks != nil { + in, out := &in.Tasks, &out.Tasks + *out = make([]DAGTask, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailFast != nil { + in, out := &in.FailFast, &out.FailFast + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DAGTemplate. +func (in *DAGTemplate) DeepCopy() *DAGTemplate { + if in == nil { + return nil + } + out := new(DAGTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Data) DeepCopyInto(out *Data) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + if in.Transformation != nil { + in, out := &in.Transformation, &out.Transformation + *out = make(Transformation, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Data. +func (in *Data) DeepCopy() *Data { + if in == nil { + return nil + } + out := new(Data) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSource) DeepCopyInto(out *DataSource) { + *out = *in + if in.ArtifactPaths != nil { + in, out := &in.ArtifactPaths, &out.ArtifactPaths + *out = new(ArtifactPaths) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource. +func (in *DataSource) DeepCopy() *DataSource { + if in == nil { + return nil + } + out := new(DataSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutorConfig) DeepCopyInto(out *ExecutorConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorConfig. +func (in *ExecutorConfig) DeepCopy() *ExecutorConfig { + if in == nil { + return nil + } + out := new(ExecutorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCSArtifact) DeepCopyInto(out *GCSArtifact) { + *out = *in + in.GCSBucket.DeepCopyInto(&out.GCSBucket) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSArtifact. +func (in *GCSArtifact) DeepCopy() *GCSArtifact { + if in == nil { + return nil + } + out := new(GCSArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCSArtifactRepository) DeepCopyInto(out *GCSArtifactRepository) { + *out = *in + in.GCSBucket.DeepCopyInto(&out.GCSBucket) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSArtifactRepository. +func (in *GCSArtifactRepository) DeepCopy() *GCSArtifactRepository { + if in == nil { + return nil + } + out := new(GCSArtifactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCSBucket) DeepCopyInto(out *GCSBucket) { + *out = *in + if in.ServiceAccountKeySecret != nil { + in, out := &in.ServiceAccountKeySecret, &out.ServiceAccountKeySecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSBucket. +func (in *GCSBucket) DeepCopy() *GCSBucket { + if in == nil { + return nil + } + out := new(GCSBucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gauge) DeepCopyInto(out *Gauge) { + *out = *in + if in.Realtime != nil { + in, out := &in.Realtime, &out.Realtime + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gauge. +func (in *Gauge) DeepCopy() *Gauge { + if in == nil { + return nil + } + out := new(Gauge) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitArtifact) DeepCopyInto(out *GitArtifact) { + *out = *in + if in.Depth != nil { + in, out := &in.Depth, &out.Depth + *out = new(uint64) + **out = **in + } + if in.Fetch != nil { + in, out := &in.Fetch, &out.Fetch + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameSecret != nil { + in, out := &in.UsernameSecret, &out.UsernameSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecret != nil { + in, out := &in.PasswordSecret, &out.PasswordSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SSHPrivateKeySecret != nil { + in, out := &in.SSHPrivateKeySecret, &out.SSHPrivateKeySecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitArtifact. +func (in *GitArtifact) DeepCopy() *GitArtifact { + if in == nil { + return nil + } + out := new(GitArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HDFSArtifact) DeepCopyInto(out *HDFSArtifact) { + *out = *in + in.HDFSConfig.DeepCopyInto(&out.HDFSConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSArtifact. +func (in *HDFSArtifact) DeepCopy() *HDFSArtifact { + if in == nil { + return nil + } + out := new(HDFSArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HDFSArtifactRepository) DeepCopyInto(out *HDFSArtifactRepository) { + *out = *in + in.HDFSConfig.DeepCopyInto(&out.HDFSConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSArtifactRepository. +func (in *HDFSArtifactRepository) DeepCopy() *HDFSArtifactRepository { + if in == nil { + return nil + } + out := new(HDFSArtifactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HDFSConfig) DeepCopyInto(out *HDFSConfig) { + *out = *in + in.HDFSKrbConfig.DeepCopyInto(&out.HDFSKrbConfig) + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSConfig. +func (in *HDFSConfig) DeepCopy() *HDFSConfig { + if in == nil { + return nil + } + out := new(HDFSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HDFSKrbConfig) DeepCopyInto(out *HDFSKrbConfig) { + *out = *in + if in.KrbCCacheSecret != nil { + in, out := &in.KrbCCacheSecret, &out.KrbCCacheSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.KrbKeytabSecret != nil { + in, out := &in.KrbKeytabSecret, &out.KrbKeytabSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.KrbConfigConfigMap != nil { + in, out := &in.KrbConfigConfigMap, &out.KrbConfigConfigMap + *out = new(v1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSKrbConfig. +func (in *HDFSKrbConfig) DeepCopy() *HDFSKrbConfig { + if in == nil { + return nil + } + out := new(HDFSKrbConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTP) DeepCopyInto(out *HTTP) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(HTTPHeaders, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.BodyFrom != nil { + in, out := &in.BodyFrom, &out.BodyFrom + *out = new(HTTPBodySource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTP. +func (in *HTTP) DeepCopy() *HTTP { + if in == nil { + return nil + } + out := new(HTTP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPArtifact) DeepCopyInto(out *HTTPArtifact) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]Header, len(*in)) + copy(*out, *in) + } + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(HTTPAuth) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPArtifact. +func (in *HTTPArtifact) DeepCopy() *HTTPArtifact { + if in == nil { + return nil + } + out := new(HTTPArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPAuth) DeepCopyInto(out *HTTPAuth) { + *out = *in + in.ClientCert.DeepCopyInto(&out.ClientCert) + in.OAuth2.DeepCopyInto(&out.OAuth2) + in.BasicAuth.DeepCopyInto(&out.BasicAuth) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPAuth. +func (in *HTTPAuth) DeepCopy() *HTTPAuth { + if in == nil { + return nil + } + out := new(HTTPAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBodySource) DeepCopyInto(out *HTTPBodySource) { + *out = *in + if in.Bytes != nil { + in, out := &in.Bytes, &out.Bytes + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBodySource. +func (in *HTTPBodySource) DeepCopy() *HTTPBodySource { + if in == nil { + return nil + } + out := new(HTTPBodySource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(HTTPHeaderSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. +func (in *HTTPHeader) DeepCopy() *HTTPHeader { + if in == nil { + return nil + } + out := new(HTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderSource) DeepCopyInto(out *HTTPHeaderSource) { + *out = *in + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderSource. +func (in *HTTPHeaderSource) DeepCopy() *HTTPHeaderSource { + if in == nil { + return nil + } + out := new(HTTPHeaderSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in HTTPHeaders) DeepCopyInto(out *HTTPHeaders) { + { + in := &in + *out = make(HTTPHeaders, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaders. +func (in HTTPHeaders) DeepCopy() HTTPHeaders { + if in == nil { + return nil + } + out := new(HTTPHeaders) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Header) DeepCopyInto(out *Header) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Header. +func (in *Header) DeepCopy() *Header { + if in == nil { + return nil + } + out := new(Header) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Histogram) DeepCopyInto(out *Histogram) { + *out = *in + if in.Buckets != nil { + in, out := &in.Buckets, &out.Buckets + *out = make([]Amount, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Histogram. +func (in *Histogram) DeepCopy() *Histogram { + if in == nil { + return nil + } + out := new(Histogram) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Inputs) DeepCopyInto(out *Inputs) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]Parameter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifacts != nil { + in, out := &in.Artifacts, &out.Artifacts + *out = make(Artifacts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Inputs. +func (in *Inputs) DeepCopy() *Inputs { + if in == nil { + return nil + } + out := new(Inputs) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Item. +func (in *Item) DeepCopy() *Item { + if in == nil { + return nil + } + out := new(Item) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelKeys) DeepCopyInto(out *LabelKeys) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelKeys. +func (in *LabelKeys) DeepCopy() *LabelKeys { + if in == nil { + return nil + } + out := new(LabelKeys) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelValueFrom) DeepCopyInto(out *LabelValueFrom) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelValueFrom. +func (in *LabelValueFrom) DeepCopy() *LabelValueFrom { + if in == nil { + return nil + } + out := new(LabelValueFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelValues) DeepCopyInto(out *LabelValues) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelValues. +func (in *LabelValues) DeepCopy() *LabelValues { + if in == nil { + return nil + } + out := new(LabelValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { + *out = *in + in.Arguments.DeepCopyInto(&out.Arguments) + if in.TemplateRef != nil { + in, out := &in.TemplateRef, &out.TemplateRef + *out = new(TemplateRef) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook. +func (in *LifecycleHook) DeepCopy() *LifecycleHook { + if in == nil { + return nil + } + out := new(LifecycleHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in LifecycleHooks) DeepCopyInto(out *LifecycleHooks) { + { + in := &in + *out = make(LifecycleHooks, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHooks. +func (in LifecycleHooks) DeepCopy() LifecycleHooks { + if in == nil { + return nil + } + out := new(LifecycleHooks) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Link) DeepCopyInto(out *Link) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link. +func (in *Link) DeepCopy() *Link { + if in == nil { + return nil + } + out := new(Link) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManifestFrom) DeepCopyInto(out *ManifestFrom) { + *out = *in + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManifestFrom. +func (in *ManifestFrom) DeepCopy() *ManifestFrom { + if in == nil { + return nil + } + out := new(ManifestFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoizationStatus) DeepCopyInto(out *MemoizationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoizationStatus. +func (in *MemoizationStatus) DeepCopy() *MemoizationStatus { + if in == nil { + return nil + } + out := new(MemoizationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Memoize) DeepCopyInto(out *Memoize) { + *out = *in + if in.Cache != nil { + in, out := &in.Cache, &out.Cache + *out = new(Cache) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Memoize. +func (in *Memoize) DeepCopy() *Memoize { + if in == nil { + return nil + } + out := new(Memoize) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricLabel) DeepCopyInto(out *MetricLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricLabel. +func (in *MetricLabel) DeepCopy() *MetricLabel { + if in == nil { + return nil + } + out := new(MetricLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metrics) DeepCopyInto(out *Metrics) { + *out = *in + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = make([]*Prometheus, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Prometheus) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metrics. +func (in *Metrics) DeepCopy() *Metrics { + if in == nil { + return nil + } + out := new(Metrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mutex) DeepCopyInto(out *Mutex) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutex. +func (in *Mutex) DeepCopy() *Mutex { + if in == nil { + return nil + } + out := new(Mutex) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutexHolding) DeepCopyInto(out *MutexHolding) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutexHolding. +func (in *MutexHolding) DeepCopy() *MutexHolding { + if in == nil { + return nil + } + out := new(MutexHolding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutexStatus) DeepCopyInto(out *MutexStatus) { + *out = *in + if in.Holding != nil { + in, out := &in.Holding, &out.Holding + *out = make([]MutexHolding, len(*in)) + copy(*out, *in) + } + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = make([]MutexHolding, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutexStatus. +func (in *MutexStatus) DeepCopy() *MutexStatus { + if in == nil { + return nil + } + out := new(MutexStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeFlag) DeepCopyInto(out *NodeFlag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFlag. +func (in *NodeFlag) DeepCopy() *NodeFlag { + if in == nil { + return nil + } + out := new(NodeFlag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResult) DeepCopyInto(out *NodeResult) { + *out = *in + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = new(Outputs) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResult. +func (in *NodeResult) DeepCopy() *NodeResult { + if in == nil { + return nil + } + out := new(NodeResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.TemplateRef != nil { + in, out := &in.TemplateRef, &out.TemplateRef + *out = new(TemplateRef) + **out = **in + } + in.StartedAt.DeepCopyInto(&out.StartedAt) + in.FinishedAt.DeepCopyInto(&out.FinishedAt) + if in.ResourcesDuration != nil { + in, out := &in.ResourcesDuration, &out.ResourcesDuration + *out = make(ResourcesDuration, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Daemoned != nil { + in, out := &in.Daemoned, &out.Daemoned + *out = new(bool) + **out = **in + } + if in.NodeFlag != nil { + in, out := &in.NodeFlag, &out.NodeFlag + *out = new(NodeFlag) + **out = **in + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = new(Inputs) + (*in).DeepCopyInto(*out) + } + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = new(Outputs) + (*in).DeepCopyInto(*out) + } + if in.Children != nil { + in, out := &in.Children, &out.Children + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OutboundNodes != nil { + in, out := &in.OutboundNodes, &out.OutboundNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MemoizationStatus != nil { + in, out := &in.MemoizationStatus, &out.MemoizationStatus + *out = new(MemoizationStatus) + **out = **in + } + if in.SynchronizationStatus != nil { + in, out := &in.SynchronizationStatus, &out.SynchronizationStatus + *out = new(NodeSynchronizationStatus) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSynchronizationStatus) DeepCopyInto(out *NodeSynchronizationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSynchronizationStatus. +func (in *NodeSynchronizationStatus) DeepCopy() *NodeSynchronizationStatus { + if in == nil { + return nil + } + out := new(NodeSynchronizationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Nodes) DeepCopyInto(out *Nodes) { + { + in := &in + *out = make(Nodes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Nodes. +func (in Nodes) DeepCopy() Nodes { + if in == nil { + return nil + } + out := new(Nodes) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoneStrategy) DeepCopyInto(out *NoneStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoneStrategy. +func (in *NoneStrategy) DeepCopy() *NoneStrategy { + if in == nil { + return nil + } + out := new(NoneStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth2Auth) DeepCopyInto(out *OAuth2Auth) { + *out = *in + if in.ClientIDSecret != nil { + in, out := &in.ClientIDSecret, &out.ClientIDSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ClientSecretSecret != nil { + in, out := &in.ClientSecretSecret, &out.ClientSecretSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.TokenURLSecret != nil { + in, out := &in.TokenURLSecret, &out.TokenURLSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EndpointParams != nil { + in, out := &in.EndpointParams, &out.EndpointParams + *out = make([]OAuth2EndpointParam, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth2Auth. +func (in *OAuth2Auth) DeepCopy() *OAuth2Auth { + if in == nil { + return nil + } + out := new(OAuth2Auth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth2EndpointParam) DeepCopyInto(out *OAuth2EndpointParam) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth2EndpointParam. +func (in *OAuth2EndpointParam) DeepCopy() *OAuth2EndpointParam { + if in == nil { + return nil + } + out := new(OAuth2EndpointParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSSArtifact) DeepCopyInto(out *OSSArtifact) { + *out = *in + in.OSSBucket.DeepCopyInto(&out.OSSBucket) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSArtifact. +func (in *OSSArtifact) DeepCopy() *OSSArtifact { + if in == nil { + return nil + } + out := new(OSSArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSSArtifactRepository) DeepCopyInto(out *OSSArtifactRepository) { + *out = *in + in.OSSBucket.DeepCopyInto(&out.OSSBucket) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSArtifactRepository. +func (in *OSSArtifactRepository) DeepCopy() *OSSArtifactRepository { + if in == nil { + return nil + } + out := new(OSSArtifactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSSBucket) DeepCopyInto(out *OSSBucket) { + *out = *in + if in.AccessKeySecret != nil { + in, out := &in.AccessKeySecret, &out.AccessKeySecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKeySecret != nil { + in, out := &in.SecretKeySecret, &out.SecretKeySecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.LifecycleRule != nil { + in, out := &in.LifecycleRule, &out.LifecycleRule + *out = new(OSSLifecycleRule) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSBucket. +func (in *OSSBucket) DeepCopy() *OSSBucket { + if in == nil { + return nil + } + out := new(OSSBucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSSLifecycleRule) DeepCopyInto(out *OSSLifecycleRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSLifecycleRule. +func (in *OSSLifecycleRule) DeepCopy() *OSSLifecycleRule { + if in == nil { + return nil + } + out := new(OSSLifecycleRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Object) DeepCopyInto(out *Object) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *Object) DeepCopy() *Object { + if in == nil { + return nil + } + out := new(Object) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Outputs) DeepCopyInto(out *Outputs) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]Parameter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifacts != nil { + in, out := &in.Artifacts, &out.Artifacts + *out = make(Artifacts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Result != nil { + in, out := &in.Result, &out.Result + *out = new(string) + **out = **in + } + if in.ExitCode != nil { + in, out := &in.ExitCode, &out.ExitCode + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Outputs. +func (in *Outputs) DeepCopy() *Outputs { + if in == nil { + return nil + } + out := new(Outputs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelSteps) DeepCopyInto(out *ParallelSteps) { + *out = *in + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]WorkflowStep, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelSteps. +func (in *ParallelSteps) DeepCopy() *ParallelSteps { + if in == nil { + return nil + } + out := new(ParallelSteps) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Parameter) DeepCopyInto(out *Parameter) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(AnyString) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(AnyString) + **out = **in + } + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(ValueFrom) + (*in).DeepCopyInto(*out) + } + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]AnyString, len(*in)) + copy(*out, *in) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(AnyString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter. +func (in *Parameter) DeepCopy() *Parameter { + if in == nil { + return nil + } + out := new(Parameter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plugin) DeepCopyInto(out *Plugin) { + *out = *in + in.Object.DeepCopyInto(&out.Object) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. +func (in *Plugin) DeepCopy() *Plugin { + if in == nil { + return nil + } + out := new(Plugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodGC) DeepCopyInto(out *PodGC) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGC. +func (in *PodGC) DeepCopy() *PodGC { + if in == nil { + return nil + } + out := new(PodGC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Prometheus) DeepCopyInto(out *Prometheus) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*MetricLabel, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MetricLabel) + **out = **in + } + } + } + if in.Gauge != nil { + in, out := &in.Gauge, &out.Gauge + *out = new(Gauge) + (*in).DeepCopyInto(*out) + } + if in.Histogram != nil { + in, out := &in.Histogram, &out.Histogram + *out = new(Histogram) + (*in).DeepCopyInto(*out) + } + if in.Counter != nil { + in, out := &in.Counter, &out.Counter + *out = new(Counter) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus. +func (in *Prometheus) DeepCopy() *Prometheus { + if in == nil { + return nil + } + out := new(Prometheus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RawArtifact) DeepCopyInto(out *RawArtifact) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawArtifact. +func (in *RawArtifact) DeepCopy() *RawArtifact { + if in == nil { + return nil + } + out := new(RawArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTemplate) DeepCopyInto(out *ResourceTemplate) { + *out = *in + if in.ManifestFrom != nil { + in, out := &in.ManifestFrom, &out.ManifestFrom + *out = new(ManifestFrom) + (*in).DeepCopyInto(*out) + } + if in.Flags != nil { + in, out := &in.Flags, &out.Flags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTemplate. +func (in *ResourceTemplate) DeepCopy() *ResourceTemplate { + if in == nil { + return nil + } + out := new(ResourceTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourcesDuration) DeepCopyInto(out *ResourcesDuration) { + { + in := &in + *out = make(ResourcesDuration, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesDuration. +func (in ResourcesDuration) DeepCopy() ResourcesDuration { + if in == nil { + return nil + } + out := new(ResourcesDuration) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryAffinity) DeepCopyInto(out *RetryAffinity) { + *out = *in + if in.NodeAntiAffinity != nil { + in, out := &in.NodeAntiAffinity, &out.NodeAntiAffinity + *out = new(RetryNodeAntiAffinity) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryAffinity. +func (in *RetryAffinity) DeepCopy() *RetryAffinity { + if in == nil { + return nil + } + out := new(RetryAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryNodeAntiAffinity) DeepCopyInto(out *RetryNodeAntiAffinity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryNodeAntiAffinity. +func (in *RetryNodeAntiAffinity) DeepCopy() *RetryNodeAntiAffinity { + if in == nil { + return nil + } + out := new(RetryNodeAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryStrategy) DeepCopyInto(out *RetryStrategy) { + *out = *in + if in.Limit != nil { + in, out := &in.Limit, &out.Limit + *out = new(intstr.IntOrString) + **out = **in + } + if in.Backoff != nil { + in, out := &in.Backoff, &out.Backoff + *out = new(Backoff) + (*in).DeepCopyInto(*out) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(RetryAffinity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryStrategy. +func (in *RetryStrategy) DeepCopy() *RetryStrategy { + if in == nil { + return nil + } + out := new(RetryStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Artifact) DeepCopyInto(out *S3Artifact) { + *out = *in + in.S3Bucket.DeepCopyInto(&out.S3Bucket) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Artifact. +func (in *S3Artifact) DeepCopy() *S3Artifact { + if in == nil { + return nil + } + out := new(S3Artifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ArtifactRepository) DeepCopyInto(out *S3ArtifactRepository) { + *out = *in + in.S3Bucket.DeepCopyInto(&out.S3Bucket) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ArtifactRepository. +func (in *S3ArtifactRepository) DeepCopy() *S3ArtifactRepository { + if in == nil { + return nil + } + out := new(S3ArtifactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Bucket) DeepCopyInto(out *S3Bucket) { + *out = *in + if in.Insecure != nil { + in, out := &in.Insecure, &out.Insecure + *out = new(bool) + **out = **in + } + if in.AccessKeySecret != nil { + in, out := &in.AccessKeySecret, &out.AccessKeySecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKeySecret != nil { + in, out := &in.SecretKeySecret, &out.SecretKeySecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SessionTokenSecret != nil { + in, out := &in.SessionTokenSecret, &out.SessionTokenSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.CreateBucketIfNotPresent != nil { + in, out := &in.CreateBucketIfNotPresent, &out.CreateBucketIfNotPresent + *out = new(CreateS3BucketOptions) + **out = **in + } + if in.EncryptionOptions != nil { + in, out := &in.EncryptionOptions, &out.EncryptionOptions + *out = new(S3EncryptionOptions) + (*in).DeepCopyInto(*out) + } + if in.CASecret != nil { + in, out := &in.CASecret, &out.CASecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Bucket. +func (in *S3Bucket) DeepCopy() *S3Bucket { + if in == nil { + return nil + } + out := new(S3Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3EncryptionOptions) DeepCopyInto(out *S3EncryptionOptions) { + *out = *in + if in.ServerSideCustomerKeySecret != nil { + in, out := &in.ServerSideCustomerKeySecret, &out.ServerSideCustomerKeySecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3EncryptionOptions. +func (in *S3EncryptionOptions) DeepCopy() *S3EncryptionOptions { + if in == nil { + return nil + } + out := new(S3EncryptionOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptTemplate) DeepCopyInto(out *ScriptTemplate) { + *out = *in + in.Container.DeepCopyInto(&out.Container) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptTemplate. +func (in *ScriptTemplate) DeepCopy() *ScriptTemplate { + if in == nil { + return nil + } + out := new(ScriptTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SemaphoreHolding) DeepCopyInto(out *SemaphoreHolding) { + *out = *in + if in.Holders != nil { + in, out := &in.Holders, &out.Holders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SemaphoreHolding. +func (in *SemaphoreHolding) DeepCopy() *SemaphoreHolding { + if in == nil { + return nil + } + out := new(SemaphoreHolding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SemaphoreRef) DeepCopyInto(out *SemaphoreRef) { + *out = *in + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(v1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SemaphoreRef. +func (in *SemaphoreRef) DeepCopy() *SemaphoreRef { + if in == nil { + return nil + } + out := new(SemaphoreRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SemaphoreStatus) DeepCopyInto(out *SemaphoreStatus) { + *out = *in + if in.Holding != nil { + in, out := &in.Holding, &out.Holding + *out = make([]SemaphoreHolding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = make([]SemaphoreHolding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SemaphoreStatus. +func (in *SemaphoreStatus) DeepCopy() *SemaphoreStatus { + if in == nil { + return nil + } + out := new(SemaphoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sequence) DeepCopyInto(out *Sequence) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(intstr.IntOrString) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(intstr.IntOrString) + **out = **in + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sequence. +func (in *Sequence) DeepCopy() *Sequence { + if in == nil { + return nil + } + out := new(Sequence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StopStrategy) DeepCopyInto(out *StopStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StopStrategy. +func (in *StopStrategy) DeepCopy() *StopStrategy { + if in == nil { + return nil + } + out := new(StopStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Submit) DeepCopyInto(out *Submit) { + *out = *in + out.WorkflowTemplateRef = in.WorkflowTemplateRef + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Arguments != nil { + in, out := &in.Arguments, &out.Arguments + *out = new(Arguments) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Submit. +func (in *Submit) DeepCopy() *Submit { + if in == nil { + return nil + } + out := new(Submit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubmitOpts) DeepCopyInto(out *SubmitOpts) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OwnerReference != nil { + in, out := &in.OwnerReference, &out.OwnerReference + *out = new(metav1.OwnerReference) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubmitOpts. +func (in *SubmitOpts) DeepCopy() *SubmitOpts { + if in == nil { + return nil + } + out := new(SubmitOpts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuppliedValueFrom) DeepCopyInto(out *SuppliedValueFrom) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuppliedValueFrom. +func (in *SuppliedValueFrom) DeepCopy() *SuppliedValueFrom { + if in == nil { + return nil + } + out := new(SuppliedValueFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuspendTemplate) DeepCopyInto(out *SuspendTemplate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuspendTemplate. +func (in *SuspendTemplate) DeepCopy() *SuspendTemplate { + if in == nil { + return nil + } + out := new(SuspendTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Synchronization) DeepCopyInto(out *Synchronization) { + *out = *in + if in.Semaphore != nil { + in, out := &in.Semaphore, &out.Semaphore + *out = new(SemaphoreRef) + (*in).DeepCopyInto(*out) + } + if in.Mutex != nil { + in, out := &in.Mutex, &out.Mutex + *out = new(Mutex) + **out = **in + } + if in.Semaphores != nil { + in, out := &in.Semaphores, &out.Semaphores + *out = make([]*SemaphoreRef, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(SemaphoreRef) + (*in).DeepCopyInto(*out) + } + } + } + if in.Mutexes != nil { + in, out := &in.Mutexes, &out.Mutexes + *out = make([]*Mutex, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Mutex) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Synchronization. +func (in *Synchronization) DeepCopy() *Synchronization { + if in == nil { + return nil + } + out := new(Synchronization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynchronizationStatus) DeepCopyInto(out *SynchronizationStatus) { + *out = *in + if in.Semaphore != nil { + in, out := &in.Semaphore, &out.Semaphore + *out = new(SemaphoreStatus) + (*in).DeepCopyInto(*out) + } + if in.Mutex != nil { + in, out := &in.Mutex, &out.Mutex + *out = new(MutexStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynchronizationStatus. +func (in *SynchronizationStatus) DeepCopy() *SynchronizationStatus { + if in == nil { + return nil + } + out := new(SynchronizationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLStrategy) DeepCopyInto(out *TTLStrategy) { + *out = *in + if in.SecondsAfterCompletion != nil { + in, out := &in.SecondsAfterCompletion, &out.SecondsAfterCompletion + *out = new(int32) + **out = **in + } + if in.SecondsAfterSuccess != nil { + in, out := &in.SecondsAfterSuccess, &out.SecondsAfterSuccess + *out = new(int32) + **out = **in + } + if in.SecondsAfterFailure != nil { + in, out := &in.SecondsAfterFailure, &out.SecondsAfterFailure + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLStrategy. +func (in *TTLStrategy) DeepCopy() *TTLStrategy { + if in == nil { + return nil + } + out := new(TTLStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TarStrategy) DeepCopyInto(out *TarStrategy) { + *out = *in + if in.CompressionLevel != nil { + in, out := &in.CompressionLevel, &out.CompressionLevel + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TarStrategy. +func (in *TarStrategy) DeepCopy() *TarStrategy { + if in == nil { + return nil + } + out := new(TarStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Template) DeepCopyInto(out *Template) { + *out = *in + in.Inputs.DeepCopyInto(&out.Inputs) + in.Outputs.DeepCopyInto(&out.Outputs) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + in.Metadata.DeepCopyInto(&out.Metadata) + if in.Daemon != nil { + in, out := &in.Daemon, &out.Daemon + *out = new(bool) + **out = **in + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]ParallelSteps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(v1.Container) + (*in).DeepCopyInto(*out) + } + if in.ContainerSet != nil { + in, out := &in.ContainerSet, &out.ContainerSet + *out = new(ContainerSetTemplate) + (*in).DeepCopyInto(*out) + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(ScriptTemplate) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceTemplate) + (*in).DeepCopyInto(*out) + } + if in.DAG != nil { + in, out := &in.DAG, &out.DAG + *out = new(DAGTemplate) + (*in).DeepCopyInto(*out) + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(SuspendTemplate) + **out = **in + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(Data) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTP) + (*in).DeepCopyInto(*out) + } + if in.Plugin != nil { + in, out := &in.Plugin, &out.Plugin + *out = new(Plugin) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]UserContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make([]UserContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ArchiveLocation != nil { + in, out := &in.ArchiveLocation, &out.ArchiveLocation + *out = new(ArtifactLocation) + (*in).DeepCopyInto(*out) + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(intstr.IntOrString) + **out = **in + } + if in.RetryStrategy != nil { + in, out := &in.RetryStrategy, &out.RetryStrategy + *out = new(RetryStrategy) + (*in).DeepCopyInto(*out) + } + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(int64) + **out = **in + } + if in.FailFast != nil { + in, out := &in.FailFast, &out.FailFast + *out = new(bool) + **out = **in + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.Executor != nil { + in, out := &in.Executor, &out.Executor + *out = new(ExecutorConfig) + **out = **in + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]v1.HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = new(Metrics) + (*in).DeepCopyInto(*out) + } + if in.Synchronization != nil { + in, out := &in.Synchronization, &out.Synchronization + *out = new(Synchronization) + (*in).DeepCopyInto(*out) + } + if in.Memoize != nil { + in, out := &in.Memoize, &out.Memoize + *out = new(Memoize) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template. +func (in *Template) DeepCopy() *Template { + if in == nil { + return nil + } + out := new(Template) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateRef) DeepCopyInto(out *TemplateRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateRef. +func (in *TemplateRef) DeepCopy() *TemplateRef { + if in == nil { + return nil + } + out := new(TemplateRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Transformation) DeepCopyInto(out *Transformation) { + { + in := &in + *out = make(Transformation, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transformation. +func (in Transformation) DeepCopy() Transformation { + if in == nil { + return nil + } + out := new(Transformation) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationStep) DeepCopyInto(out *TransformationStep) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationStep. +func (in *TransformationStep) DeepCopy() *TransformationStep { + if in == nil { + return nil + } + out := new(TransformationStep) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserContainer) DeepCopyInto(out *UserContainer) { + *out = *in + in.Container.DeepCopyInto(&out.Container) + if in.MirrorVolumeMounts != nil { + in, out := &in.MirrorVolumeMounts, &out.MirrorVolumeMounts + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserContainer. +func (in *UserContainer) DeepCopy() *UserContainer { + if in == nil { + return nil + } + out := new(UserContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueFrom) DeepCopyInto(out *ValueFrom) { + *out = *in + if in.Supplied != nil { + in, out := &in.Supplied, &out.Supplied + *out = new(SuppliedValueFrom) + **out = **in + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(v1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(AnyString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom. +func (in *ValueFrom) DeepCopy() *ValueFrom { + if in == nil { + return nil + } + out := new(ValueFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Version) DeepCopyInto(out *Version) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Version. +func (in *Version) DeepCopy() *Version { + if in == nil { + return nil + } + out := new(Version) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeClaimGC) DeepCopyInto(out *VolumeClaimGC) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeClaimGC. +func (in *VolumeClaimGC) DeepCopy() *VolumeClaimGC { + if in == nil { + return nil + } + out := new(VolumeClaimGC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workflow) DeepCopyInto(out *Workflow) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workflow. +func (in *Workflow) DeepCopy() *Workflow { + if in == nil { + return nil + } + out := new(Workflow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workflow) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowArtifactGCTask) DeepCopyInto(out *WorkflowArtifactGCTask) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowArtifactGCTask. +func (in *WorkflowArtifactGCTask) DeepCopy() *WorkflowArtifactGCTask { + if in == nil { + return nil + } + out := new(WorkflowArtifactGCTask) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowArtifactGCTask) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowArtifactGCTaskList) DeepCopyInto(out *WorkflowArtifactGCTaskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WorkflowArtifactGCTask, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowArtifactGCTaskList. +func (in *WorkflowArtifactGCTaskList) DeepCopy() *WorkflowArtifactGCTaskList { + if in == nil { + return nil + } + out := new(WorkflowArtifactGCTaskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowArtifactGCTaskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowEventBinding) DeepCopyInto(out *WorkflowEventBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowEventBinding. +func (in *WorkflowEventBinding) DeepCopy() *WorkflowEventBinding { + if in == nil { + return nil + } + out := new(WorkflowEventBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowEventBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowEventBindingList) DeepCopyInto(out *WorkflowEventBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WorkflowEventBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowEventBindingList. +func (in *WorkflowEventBindingList) DeepCopy() *WorkflowEventBindingList { + if in == nil { + return nil + } + out := new(WorkflowEventBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowEventBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowEventBindingSpec) DeepCopyInto(out *WorkflowEventBindingSpec) { + *out = *in + out.Event = in.Event + if in.Submit != nil { + in, out := &in.Submit, &out.Submit + *out = new(Submit) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowEventBindingSpec. +func (in *WorkflowEventBindingSpec) DeepCopy() *WorkflowEventBindingSpec { + if in == nil { + return nil + } + out := new(WorkflowEventBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowLevelArtifactGC) DeepCopyInto(out *WorkflowLevelArtifactGC) { + *out = *in + in.ArtifactGC.DeepCopyInto(&out.ArtifactGC) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowLevelArtifactGC. +func (in *WorkflowLevelArtifactGC) DeepCopy() *WorkflowLevelArtifactGC { + if in == nil { + return nil + } + out := new(WorkflowLevelArtifactGC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowList) DeepCopyInto(out *WorkflowList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make(Workflows, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowList. +func (in *WorkflowList) DeepCopy() *WorkflowList { + if in == nil { + return nil + } + out := new(WorkflowList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowMetadata) DeepCopyInto(out *WorkflowMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.LabelsFrom != nil { + in, out := &in.LabelsFrom, &out.LabelsFrom + *out = make(map[string]LabelValueFrom, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowMetadata. +func (in *WorkflowMetadata) DeepCopy() *WorkflowMetadata { + if in == nil { + return nil + } + out := new(WorkflowMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { + *out = *in + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = make([]Template, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Arguments.DeepCopyInto(&out.Arguments) + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.Executor != nil { + in, out := &in.Executor, &out.Executor + *out = new(ExecutorConfig) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(int64) + **out = **in + } + if in.ArtifactRepositoryRef != nil { + in, out := &in.ArtifactRepositoryRef, &out.ArtifactRepositoryRef + *out = new(ArtifactRepositoryRef) + **out = **in + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]v1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(v1.DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(v1.PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.TTLStrategy != nil { + in, out := &in.TTLStrategy, &out.TTLStrategy + *out = new(TTLStrategy) + (*in).DeepCopyInto(*out) + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } + if in.PodGC != nil { + in, out := &in.PodGC, &out.PodGC + *out = new(PodGC) + (*in).DeepCopyInto(*out) + } + if in.PodPriority != nil { + in, out := &in.PodPriority, &out.PodPriority + *out = new(int32) + **out = **in + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]v1.HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.PodDisruptionBudget != nil { + in, out := &in.PodDisruptionBudget, &out.PodDisruptionBudget + *out = new(policyv1.PodDisruptionBudgetSpec) + (*in).DeepCopyInto(*out) + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = new(Metrics) + (*in).DeepCopyInto(*out) + } + if in.WorkflowTemplateRef != nil { + in, out := &in.WorkflowTemplateRef, &out.WorkflowTemplateRef + *out = new(WorkflowTemplateRef) + **out = **in + } + if in.Synchronization != nil { + in, out := &in.Synchronization, &out.Synchronization + *out = new(Synchronization) + (*in).DeepCopyInto(*out) + } + if in.VolumeClaimGC != nil { + in, out := &in.VolumeClaimGC, &out.VolumeClaimGC + *out = new(VolumeClaimGC) + **out = **in + } + if in.RetryStrategy != nil { + in, out := &in.RetryStrategy, &out.RetryStrategy + *out = new(RetryStrategy) + (*in).DeepCopyInto(*out) + } + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(Metadata) + (*in).DeepCopyInto(*out) + } + if in.TemplateDefaults != nil { + in, out := &in.TemplateDefaults, &out.TemplateDefaults + *out = new(Template) + (*in).DeepCopyInto(*out) + } + if in.ArchiveLogs != nil { + in, out := &in.ArchiveLogs, &out.ArchiveLogs + *out = new(bool) + **out = **in + } + if in.Hooks != nil { + in, out := &in.Hooks, &out.Hooks + *out = make(LifecycleHooks, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.WorkflowMetadata != nil { + in, out := &in.WorkflowMetadata, &out.WorkflowMetadata + *out = new(WorkflowMetadata) + (*in).DeepCopyInto(*out) + } + if in.ArtifactGC != nil { + in, out := &in.ArtifactGC, &out.ArtifactGC + *out = new(WorkflowLevelArtifactGC) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowSpec. +func (in *WorkflowSpec) DeepCopy() *WorkflowSpec { + if in == nil { + return nil + } + out := new(WorkflowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + in.FinishedAt.DeepCopyInto(&out.FinishedAt) + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make(Nodes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.StoredTemplates != nil { + in, out := &in.StoredTemplates, &out.StoredTemplates + *out = make(map[string]Template, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.PersistentVolumeClaims != nil { + in, out := &in.PersistentVolumeClaims, &out.PersistentVolumeClaims + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = new(Outputs) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + copy(*out, *in) + } + if in.ResourcesDuration != nil { + in, out := &in.ResourcesDuration, &out.ResourcesDuration + *out = make(ResourcesDuration, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.StoredWorkflowSpec != nil { + in, out := &in.StoredWorkflowSpec, &out.StoredWorkflowSpec + *out = new(WorkflowSpec) + (*in).DeepCopyInto(*out) + } + if in.Synchronization != nil { + in, out := &in.Synchronization, &out.Synchronization + *out = new(SynchronizationStatus) + (*in).DeepCopyInto(*out) + } + if in.ArtifactRepositoryRef != nil { + in, out := &in.ArtifactRepositoryRef, &out.ArtifactRepositoryRef + *out = new(ArtifactRepositoryRefStatus) + (*in).DeepCopyInto(*out) + } + if in.ArtifactGCStatus != nil { + in, out := &in.ArtifactGCStatus, &out.ArtifactGCStatus + *out = new(ArtGCStatus) + (*in).DeepCopyInto(*out) + } + if in.TaskResultsCompletionStatus != nil { + in, out := &in.TaskResultsCompletionStatus, &out.TaskResultsCompletionStatus + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatus. +func (in *WorkflowStatus) DeepCopy() *WorkflowStatus { + if in == nil { + return nil + } + out := new(WorkflowStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowStep) DeepCopyInto(out *WorkflowStep) { + *out = *in + if in.Inline != nil { + in, out := &in.Inline, &out.Inline + *out = new(Template) + (*in).DeepCopyInto(*out) + } + in.Arguments.DeepCopyInto(&out.Arguments) + if in.TemplateRef != nil { + in, out := &in.TemplateRef, &out.TemplateRef + *out = new(TemplateRef) + **out = **in + } + if in.WithItems != nil { + in, out := &in.WithItems, &out.WithItems + *out = make([]Item, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WithSequence != nil { + in, out := &in.WithSequence, &out.WithSequence + *out = new(Sequence) + (*in).DeepCopyInto(*out) + } + if in.ContinueOn != nil { + in, out := &in.ContinueOn, &out.ContinueOn + *out = new(ContinueOn) + **out = **in + } + if in.Hooks != nil { + in, out := &in.Hooks, &out.Hooks + *out = make(LifecycleHooks, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStep. +func (in *WorkflowStep) DeepCopy() *WorkflowStep { + if in == nil { + return nil + } + out := new(WorkflowStep) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTaskResult) DeepCopyInto(out *WorkflowTaskResult) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.NodeResult.DeepCopyInto(&out.NodeResult) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskResult. +func (in *WorkflowTaskResult) DeepCopy() *WorkflowTaskResult { + if in == nil { + return nil + } + out := new(WorkflowTaskResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowTaskResult) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTaskResultList) DeepCopyInto(out *WorkflowTaskResultList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WorkflowTaskResult, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskResultList. +func (in *WorkflowTaskResultList) DeepCopy() *WorkflowTaskResultList { + if in == nil { + return nil + } + out := new(WorkflowTaskResultList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowTaskResultList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTaskSet) DeepCopyInto(out *WorkflowTaskSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskSet. +func (in *WorkflowTaskSet) DeepCopy() *WorkflowTaskSet { + if in == nil { + return nil + } + out := new(WorkflowTaskSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowTaskSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTaskSetList) DeepCopyInto(out *WorkflowTaskSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WorkflowTaskSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskSetList. +func (in *WorkflowTaskSetList) DeepCopy() *WorkflowTaskSetList { + if in == nil { + return nil + } + out := new(WorkflowTaskSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowTaskSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTaskSetSpec) DeepCopyInto(out *WorkflowTaskSetSpec) { + *out = *in + if in.Tasks != nil { + in, out := &in.Tasks, &out.Tasks + *out = make(map[string]Template, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskSetSpec. +func (in *WorkflowTaskSetSpec) DeepCopy() *WorkflowTaskSetSpec { + if in == nil { + return nil + } + out := new(WorkflowTaskSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTaskSetStatus) DeepCopyInto(out *WorkflowTaskSetStatus) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make(map[string]NodeResult, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskSetStatus. +func (in *WorkflowTaskSetStatus) DeepCopy() *WorkflowTaskSetStatus { + if in == nil { + return nil + } + out := new(WorkflowTaskSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplate) DeepCopyInto(out *WorkflowTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplate. +func (in *WorkflowTemplate) DeepCopy() *WorkflowTemplate { + if in == nil { + return nil + } + out := new(WorkflowTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplateList) DeepCopyInto(out *WorkflowTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make(WorkflowTemplates, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplateList. +func (in *WorkflowTemplateList) DeepCopy() *WorkflowTemplateList { + if in == nil { + return nil + } + out := new(WorkflowTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplateRef) DeepCopyInto(out *WorkflowTemplateRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplateRef. +func (in *WorkflowTemplateRef) DeepCopy() *WorkflowTemplateRef { + if in == nil { + return nil + } + out := new(WorkflowTemplateRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in WorkflowTemplates) DeepCopyInto(out *WorkflowTemplates) { + { + in := &in + *out = make(WorkflowTemplates, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplates. +func (in WorkflowTemplates) DeepCopy() WorkflowTemplates { + if in == nil { + return nil + } + out := new(WorkflowTemplates) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Workflows) DeepCopyInto(out *Workflows) { + { + in := &in + *out = make(Workflows, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workflows. +func (in Workflows) DeepCopy() Workflows { + if in == nil { + return nil + } + out := new(Workflows) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZipStrategy) DeepCopyInto(out *ZipStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZipStrategy. +func (in *ZipStrategy) DeepCopy() *ZipStrategy { + if in == nil { + return nil + } + out := new(ZipStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 00000000..e1475cc2 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,81 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + argoprojv1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + argoprojV1alpha1 *argoprojv1alpha1.ArgoprojV1alpha1Client +} + +// ArgoprojV1alpha1 retrieves the ArgoprojV1alpha1Client +func (c *Clientset) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface { + return c.argoprojV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.argoprojV1alpha1 = argoprojv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.argoprojV1alpha1 = argoprojv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/doc.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/doc.go new file mode 100644 index 00000000..0e0c2a89 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 00000000..14db57a5 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 00000000..e9726960 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + argoprojv1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + argoprojv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/clusterworkflowtemplate.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/clusterworkflowtemplate.go new file mode 100644 index 00000000..698f3c0f --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/clusterworkflowtemplate.go @@ -0,0 +1,152 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterWorkflowTemplatesGetter has a method to return a ClusterWorkflowTemplateInterface. +// A group's client should implement this interface. +type ClusterWorkflowTemplatesGetter interface { + ClusterWorkflowTemplates() ClusterWorkflowTemplateInterface +} + +// ClusterWorkflowTemplateInterface has methods to work with ClusterWorkflowTemplate resources. +type ClusterWorkflowTemplateInterface interface { + Create(ctx context.Context, clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate, opts v1.CreateOptions) (*v1alpha1.ClusterWorkflowTemplate, error) + Update(ctx context.Context, clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate, opts v1.UpdateOptions) (*v1alpha1.ClusterWorkflowTemplate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterWorkflowTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterWorkflowTemplateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterWorkflowTemplate, err error) + ClusterWorkflowTemplateExpansion +} + +// clusterWorkflowTemplates implements ClusterWorkflowTemplateInterface +type clusterWorkflowTemplates struct { + client rest.Interface +} + +// newClusterWorkflowTemplates returns a ClusterWorkflowTemplates +func newClusterWorkflowTemplates(c *ArgoprojV1alpha1Client) *clusterWorkflowTemplates { + return &clusterWorkflowTemplates{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterWorkflowTemplate, and returns the corresponding clusterWorkflowTemplate object, and an error if there is any. +func (c *clusterWorkflowTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + result = &v1alpha1.ClusterWorkflowTemplate{} + err = c.client.Get(). + Resource("clusterworkflowtemplates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterWorkflowTemplates that match those selectors. +func (c *clusterWorkflowTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterWorkflowTemplateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterWorkflowTemplateList{} + err = c.client.Get(). + Resource("clusterworkflowtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterWorkflowTemplates. +func (c *clusterWorkflowTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterworkflowtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterWorkflowTemplate and creates it. Returns the server's representation of the clusterWorkflowTemplate, and an error, if there is any. +func (c *clusterWorkflowTemplates) Create(ctx context.Context, clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate, opts v1.CreateOptions) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + result = &v1alpha1.ClusterWorkflowTemplate{} + err = c.client.Post(). + Resource("clusterworkflowtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterWorkflowTemplate). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterWorkflowTemplate and updates it. Returns the server's representation of the clusterWorkflowTemplate, and an error, if there is any. +func (c *clusterWorkflowTemplates) Update(ctx context.Context, clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate, opts v1.UpdateOptions) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + result = &v1alpha1.ClusterWorkflowTemplate{} + err = c.client.Put(). + Resource("clusterworkflowtemplates"). + Name(clusterWorkflowTemplate.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterWorkflowTemplate). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterWorkflowTemplate and deletes it. Returns an error if one occurs. +func (c *clusterWorkflowTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterworkflowtemplates"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterWorkflowTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterworkflowtemplates"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterWorkflowTemplate. +func (c *clusterWorkflowTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + result = &v1alpha1.ClusterWorkflowTemplate{} + err = c.client.Patch(pt). + Resource("clusterworkflowtemplates"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/cronworkflow.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/cronworkflow.go new file mode 100644 index 00000000..78568040 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/cronworkflow.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CronWorkflowsGetter has a method to return a CronWorkflowInterface. +// A group's client should implement this interface. +type CronWorkflowsGetter interface { + CronWorkflows(namespace string) CronWorkflowInterface +} + +// CronWorkflowInterface has methods to work with CronWorkflow resources. +type CronWorkflowInterface interface { + Create(ctx context.Context, cronWorkflow *v1alpha1.CronWorkflow, opts v1.CreateOptions) (*v1alpha1.CronWorkflow, error) + Update(ctx context.Context, cronWorkflow *v1alpha1.CronWorkflow, opts v1.UpdateOptions) (*v1alpha1.CronWorkflow, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CronWorkflow, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CronWorkflowList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CronWorkflow, err error) + CronWorkflowExpansion +} + +// cronWorkflows implements CronWorkflowInterface +type cronWorkflows struct { + client rest.Interface + ns string +} + +// newCronWorkflows returns a CronWorkflows +func newCronWorkflows(c *ArgoprojV1alpha1Client, namespace string) *cronWorkflows { + return &cronWorkflows{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cronWorkflow, and returns the corresponding cronWorkflow object, and an error if there is any. +func (c *cronWorkflows) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CronWorkflow, err error) { + result = &v1alpha1.CronWorkflow{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronworkflows"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CronWorkflows that match those selectors. +func (c *cronWorkflows) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CronWorkflowList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.CronWorkflowList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronworkflows"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cronWorkflows. +func (c *cronWorkflows) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cronworkflows"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cronWorkflow and creates it. Returns the server's representation of the cronWorkflow, and an error, if there is any. +func (c *cronWorkflows) Create(ctx context.Context, cronWorkflow *v1alpha1.CronWorkflow, opts v1.CreateOptions) (result *v1alpha1.CronWorkflow, err error) { + result = &v1alpha1.CronWorkflow{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cronworkflows"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cronWorkflow). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cronWorkflow and updates it. Returns the server's representation of the cronWorkflow, and an error, if there is any. +func (c *cronWorkflows) Update(ctx context.Context, cronWorkflow *v1alpha1.CronWorkflow, opts v1.UpdateOptions) (result *v1alpha1.CronWorkflow, err error) { + result = &v1alpha1.CronWorkflow{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronworkflows"). + Name(cronWorkflow.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cronWorkflow). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cronWorkflow and deletes it. Returns an error if one occurs. +func (c *cronWorkflows) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronworkflows"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cronWorkflows) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cronworkflows"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cronWorkflow. +func (c *cronWorkflows) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CronWorkflow, err error) { + result = &v1alpha1.CronWorkflow{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cronworkflows"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/doc.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/doc.go new file mode 100644 index 00000000..93a7ca4e --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go new file mode 100644 index 00000000..eb6fc209 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ClusterWorkflowTemplateExpansion interface{} + +type CronWorkflowExpansion interface{} + +type WorkflowExpansion interface{} + +type WorkflowArtifactGCTaskExpansion interface{} + +type WorkflowEventBindingExpansion interface{} + +type WorkflowTaskResultExpansion interface{} + +type WorkflowTaskSetExpansion interface{} + +type WorkflowTemplateExpansion interface{} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow.go new file mode 100644 index 00000000..3761f3c3 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// WorkflowsGetter has a method to return a WorkflowInterface. +// A group's client should implement this interface. +type WorkflowsGetter interface { + Workflows(namespace string) WorkflowInterface +} + +// WorkflowInterface has methods to work with Workflow resources. +type WorkflowInterface interface { + Create(ctx context.Context, workflow *v1alpha1.Workflow, opts v1.CreateOptions) (*v1alpha1.Workflow, error) + Update(ctx context.Context, workflow *v1alpha1.Workflow, opts v1.UpdateOptions) (*v1alpha1.Workflow, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Workflow, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Workflow, err error) + WorkflowExpansion +} + +// workflows implements WorkflowInterface +type workflows struct { + client rest.Interface + ns string +} + +// newWorkflows returns a Workflows +func newWorkflows(c *ArgoprojV1alpha1Client, namespace string) *workflows { + return &workflows{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the workflow, and returns the corresponding workflow object, and an error if there is any. +func (c *workflows) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Workflow, err error) { + result = &v1alpha1.Workflow{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workflows"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Workflows that match those selectors. +func (c *workflows) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.WorkflowList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workflows"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested workflows. +func (c *workflows) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("workflows"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a workflow and creates it. Returns the server's representation of the workflow, and an error, if there is any. +func (c *workflows) Create(ctx context.Context, workflow *v1alpha1.Workflow, opts v1.CreateOptions) (result *v1alpha1.Workflow, err error) { + result = &v1alpha1.Workflow{} + err = c.client.Post(). + Namespace(c.ns). + Resource("workflows"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflow). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a workflow and updates it. Returns the server's representation of the workflow, and an error, if there is any. +func (c *workflows) Update(ctx context.Context, workflow *v1alpha1.Workflow, opts v1.UpdateOptions) (result *v1alpha1.Workflow, err error) { + result = &v1alpha1.Workflow{} + err = c.client.Put(). + Namespace(c.ns). + Resource("workflows"). + Name(workflow.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflow). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the workflow and deletes it. Returns an error if one occurs. +func (c *workflows) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("workflows"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *workflows) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("workflows"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched workflow. +func (c *workflows) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Workflow, err error) { + result = &v1alpha1.Workflow{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("workflows"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go new file mode 100644 index 00000000..49da7948 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go @@ -0,0 +1,108 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type ArgoprojV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterWorkflowTemplatesGetter + CronWorkflowsGetter + WorkflowsGetter + WorkflowArtifactGCTasksGetter + WorkflowEventBindingsGetter + WorkflowTaskResultsGetter + WorkflowTaskSetsGetter + WorkflowTemplatesGetter +} + +// ArgoprojV1alpha1Client is used to interact with features provided by the argoproj.io group. +type ArgoprojV1alpha1Client struct { + restClient rest.Interface +} + +func (c *ArgoprojV1alpha1Client) ClusterWorkflowTemplates() ClusterWorkflowTemplateInterface { + return newClusterWorkflowTemplates(c) +} + +func (c *ArgoprojV1alpha1Client) CronWorkflows(namespace string) CronWorkflowInterface { + return newCronWorkflows(c, namespace) +} + +func (c *ArgoprojV1alpha1Client) Workflows(namespace string) WorkflowInterface { + return newWorkflows(c, namespace) +} + +func (c *ArgoprojV1alpha1Client) WorkflowArtifactGCTasks(namespace string) WorkflowArtifactGCTaskInterface { + return newWorkflowArtifactGCTasks(c, namespace) +} + +func (c *ArgoprojV1alpha1Client) WorkflowEventBindings(namespace string) WorkflowEventBindingInterface { + return newWorkflowEventBindings(c, namespace) +} + +func (c *ArgoprojV1alpha1Client) WorkflowTaskResults(namespace string) WorkflowTaskResultInterface { + return newWorkflowTaskResults(c, namespace) +} + +func (c *ArgoprojV1alpha1Client) WorkflowTaskSets(namespace string) WorkflowTaskSetInterface { + return newWorkflowTaskSets(c, namespace) +} + +func (c *ArgoprojV1alpha1Client) WorkflowTemplates(namespace string) WorkflowTemplateInterface { + return newWorkflowTemplates(c, namespace) +} + +// NewForConfig creates a new ArgoprojV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*ArgoprojV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ArgoprojV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new ArgoprojV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ArgoprojV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ArgoprojV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *ArgoprojV1alpha1Client { + return &ArgoprojV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ArgoprojV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowartifactgctask.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowartifactgctask.go new file mode 100644 index 00000000..3d8e3c1f --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowartifactgctask.go @@ -0,0 +1,179 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// WorkflowArtifactGCTasksGetter has a method to return a WorkflowArtifactGCTaskInterface. +// A group's client should implement this interface. +type WorkflowArtifactGCTasksGetter interface { + WorkflowArtifactGCTasks(namespace string) WorkflowArtifactGCTaskInterface +} + +// WorkflowArtifactGCTaskInterface has methods to work with WorkflowArtifactGCTask resources. +type WorkflowArtifactGCTaskInterface interface { + Create(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.CreateOptions) (*v1alpha1.WorkflowArtifactGCTask, error) + Update(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.UpdateOptions) (*v1alpha1.WorkflowArtifactGCTask, error) + UpdateStatus(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.UpdateOptions) (*v1alpha1.WorkflowArtifactGCTask, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.WorkflowArtifactGCTask, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowArtifactGCTaskList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowArtifactGCTask, err error) + WorkflowArtifactGCTaskExpansion +} + +// workflowArtifactGCTasks implements WorkflowArtifactGCTaskInterface +type workflowArtifactGCTasks struct { + client rest.Interface + ns string +} + +// newWorkflowArtifactGCTasks returns a WorkflowArtifactGCTasks +func newWorkflowArtifactGCTasks(c *ArgoprojV1alpha1Client, namespace string) *workflowArtifactGCTasks { + return &workflowArtifactGCTasks{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the workflowArtifactGCTask, and returns the corresponding workflowArtifactGCTask object, and an error if there is any. +func (c *workflowArtifactGCTasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.WorkflowArtifactGCTask, err error) { + result = &v1alpha1.WorkflowArtifactGCTask{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workflowartifactgctasks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of WorkflowArtifactGCTasks that match those selectors. +func (c *workflowArtifactGCTasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowArtifactGCTaskList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.WorkflowArtifactGCTaskList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workflowartifactgctasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested workflowArtifactGCTasks. +func (c *workflowArtifactGCTasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("workflowartifactgctasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a workflowArtifactGCTask and creates it. Returns the server's representation of the workflowArtifactGCTask, and an error, if there is any. +func (c *workflowArtifactGCTasks) Create(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.CreateOptions) (result *v1alpha1.WorkflowArtifactGCTask, err error) { + result = &v1alpha1.WorkflowArtifactGCTask{} + err = c.client.Post(). + Namespace(c.ns). + Resource("workflowartifactgctasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowArtifactGCTask). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a workflowArtifactGCTask and updates it. Returns the server's representation of the workflowArtifactGCTask, and an error, if there is any. +func (c *workflowArtifactGCTasks) Update(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.UpdateOptions) (result *v1alpha1.WorkflowArtifactGCTask, err error) { + result = &v1alpha1.WorkflowArtifactGCTask{} + err = c.client.Put(). + Namespace(c.ns). + Resource("workflowartifactgctasks"). + Name(workflowArtifactGCTask.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowArtifactGCTask). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *workflowArtifactGCTasks) UpdateStatus(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.UpdateOptions) (result *v1alpha1.WorkflowArtifactGCTask, err error) { + result = &v1alpha1.WorkflowArtifactGCTask{} + err = c.client.Put(). + Namespace(c.ns). + Resource("workflowartifactgctasks"). + Name(workflowArtifactGCTask.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowArtifactGCTask). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the workflowArtifactGCTask and deletes it. Returns an error if one occurs. +func (c *workflowArtifactGCTasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("workflowartifactgctasks"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *workflowArtifactGCTasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("workflowartifactgctasks"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched workflowArtifactGCTask. +func (c *workflowArtifactGCTasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowArtifactGCTask, err error) { + result = &v1alpha1.WorkflowArtifactGCTask{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("workflowartifactgctasks"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workfloweventbinding.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workfloweventbinding.go new file mode 100644 index 00000000..c1dee227 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workfloweventbinding.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// WorkflowEventBindingsGetter has a method to return a WorkflowEventBindingInterface. +// A group's client should implement this interface. +type WorkflowEventBindingsGetter interface { + WorkflowEventBindings(namespace string) WorkflowEventBindingInterface +} + +// WorkflowEventBindingInterface has methods to work with WorkflowEventBinding resources. +type WorkflowEventBindingInterface interface { + Create(ctx context.Context, workflowEventBinding *v1alpha1.WorkflowEventBinding, opts v1.CreateOptions) (*v1alpha1.WorkflowEventBinding, error) + Update(ctx context.Context, workflowEventBinding *v1alpha1.WorkflowEventBinding, opts v1.UpdateOptions) (*v1alpha1.WorkflowEventBinding, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.WorkflowEventBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowEventBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowEventBinding, err error) + WorkflowEventBindingExpansion +} + +// workflowEventBindings implements WorkflowEventBindingInterface +type workflowEventBindings struct { + client rest.Interface + ns string +} + +// newWorkflowEventBindings returns a WorkflowEventBindings +func newWorkflowEventBindings(c *ArgoprojV1alpha1Client, namespace string) *workflowEventBindings { + return &workflowEventBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the workflowEventBinding, and returns the corresponding workflowEventBinding object, and an error if there is any. +func (c *workflowEventBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.WorkflowEventBinding, err error) { + result = &v1alpha1.WorkflowEventBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workfloweventbindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of WorkflowEventBindings that match those selectors. +func (c *workflowEventBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowEventBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.WorkflowEventBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workfloweventbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested workflowEventBindings. +func (c *workflowEventBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("workfloweventbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a workflowEventBinding and creates it. Returns the server's representation of the workflowEventBinding, and an error, if there is any. +func (c *workflowEventBindings) Create(ctx context.Context, workflowEventBinding *v1alpha1.WorkflowEventBinding, opts v1.CreateOptions) (result *v1alpha1.WorkflowEventBinding, err error) { + result = &v1alpha1.WorkflowEventBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("workfloweventbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowEventBinding). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a workflowEventBinding and updates it. Returns the server's representation of the workflowEventBinding, and an error, if there is any. +func (c *workflowEventBindings) Update(ctx context.Context, workflowEventBinding *v1alpha1.WorkflowEventBinding, opts v1.UpdateOptions) (result *v1alpha1.WorkflowEventBinding, err error) { + result = &v1alpha1.WorkflowEventBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("workfloweventbindings"). + Name(workflowEventBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowEventBinding). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the workflowEventBinding and deletes it. Returns an error if one occurs. +func (c *workflowEventBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("workfloweventbindings"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *workflowEventBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("workfloweventbindings"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched workflowEventBinding. +func (c *workflowEventBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowEventBinding, err error) { + result = &v1alpha1.WorkflowEventBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("workfloweventbindings"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskresult.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskresult.go new file mode 100644 index 00000000..a833fa6f --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskresult.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// WorkflowTaskResultsGetter has a method to return a WorkflowTaskResultInterface. +// A group's client should implement this interface. +type WorkflowTaskResultsGetter interface { + WorkflowTaskResults(namespace string) WorkflowTaskResultInterface +} + +// WorkflowTaskResultInterface has methods to work with WorkflowTaskResult resources. +type WorkflowTaskResultInterface interface { + Create(ctx context.Context, workflowTaskResult *v1alpha1.WorkflowTaskResult, opts v1.CreateOptions) (*v1alpha1.WorkflowTaskResult, error) + Update(ctx context.Context, workflowTaskResult *v1alpha1.WorkflowTaskResult, opts v1.UpdateOptions) (*v1alpha1.WorkflowTaskResult, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.WorkflowTaskResult, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowTaskResultList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTaskResult, err error) + WorkflowTaskResultExpansion +} + +// workflowTaskResults implements WorkflowTaskResultInterface +type workflowTaskResults struct { + client rest.Interface + ns string +} + +// newWorkflowTaskResults returns a WorkflowTaskResults +func newWorkflowTaskResults(c *ArgoprojV1alpha1Client, namespace string) *workflowTaskResults { + return &workflowTaskResults{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the workflowTaskResult, and returns the corresponding workflowTaskResult object, and an error if there is any. +func (c *workflowTaskResults) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.WorkflowTaskResult, err error) { + result = &v1alpha1.WorkflowTaskResult{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workflowtaskresults"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of WorkflowTaskResults that match those selectors. +func (c *workflowTaskResults) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowTaskResultList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.WorkflowTaskResultList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workflowtaskresults"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested workflowTaskResults. +func (c *workflowTaskResults) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("workflowtaskresults"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a workflowTaskResult and creates it. Returns the server's representation of the workflowTaskResult, and an error, if there is any. +func (c *workflowTaskResults) Create(ctx context.Context, workflowTaskResult *v1alpha1.WorkflowTaskResult, opts v1.CreateOptions) (result *v1alpha1.WorkflowTaskResult, err error) { + result = &v1alpha1.WorkflowTaskResult{} + err = c.client.Post(). + Namespace(c.ns). + Resource("workflowtaskresults"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowTaskResult). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a workflowTaskResult and updates it. Returns the server's representation of the workflowTaskResult, and an error, if there is any. +func (c *workflowTaskResults) Update(ctx context.Context, workflowTaskResult *v1alpha1.WorkflowTaskResult, opts v1.UpdateOptions) (result *v1alpha1.WorkflowTaskResult, err error) { + result = &v1alpha1.WorkflowTaskResult{} + err = c.client.Put(). + Namespace(c.ns). + Resource("workflowtaskresults"). + Name(workflowTaskResult.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowTaskResult). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the workflowTaskResult and deletes it. Returns an error if one occurs. +func (c *workflowTaskResults) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("workflowtaskresults"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *workflowTaskResults) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("workflowtaskresults"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched workflowTaskResult. +func (c *workflowTaskResults) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTaskResult, err error) { + result = &v1alpha1.WorkflowTaskResult{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("workflowtaskresults"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskset.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskset.go new file mode 100644 index 00000000..d0d9e48f --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskset.go @@ -0,0 +1,179 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// WorkflowTaskSetsGetter has a method to return a WorkflowTaskSetInterface. +// A group's client should implement this interface. +type WorkflowTaskSetsGetter interface { + WorkflowTaskSets(namespace string) WorkflowTaskSetInterface +} + +// WorkflowTaskSetInterface has methods to work with WorkflowTaskSet resources. +type WorkflowTaskSetInterface interface { + Create(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.CreateOptions) (*v1alpha1.WorkflowTaskSet, error) + Update(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.UpdateOptions) (*v1alpha1.WorkflowTaskSet, error) + UpdateStatus(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.UpdateOptions) (*v1alpha1.WorkflowTaskSet, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.WorkflowTaskSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowTaskSetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTaskSet, err error) + WorkflowTaskSetExpansion +} + +// workflowTaskSets implements WorkflowTaskSetInterface +type workflowTaskSets struct { + client rest.Interface + ns string +} + +// newWorkflowTaskSets returns a WorkflowTaskSets +func newWorkflowTaskSets(c *ArgoprojV1alpha1Client, namespace string) *workflowTaskSets { + return &workflowTaskSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the workflowTaskSet, and returns the corresponding workflowTaskSet object, and an error if there is any. +func (c *workflowTaskSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.WorkflowTaskSet, err error) { + result = &v1alpha1.WorkflowTaskSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workflowtasksets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of WorkflowTaskSets that match those selectors. +func (c *workflowTaskSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowTaskSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.WorkflowTaskSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workflowtasksets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested workflowTaskSets. +func (c *workflowTaskSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("workflowtasksets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a workflowTaskSet and creates it. Returns the server's representation of the workflowTaskSet, and an error, if there is any. +func (c *workflowTaskSets) Create(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.CreateOptions) (result *v1alpha1.WorkflowTaskSet, err error) { + result = &v1alpha1.WorkflowTaskSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("workflowtasksets"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowTaskSet). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a workflowTaskSet and updates it. Returns the server's representation of the workflowTaskSet, and an error, if there is any. +func (c *workflowTaskSets) Update(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.UpdateOptions) (result *v1alpha1.WorkflowTaskSet, err error) { + result = &v1alpha1.WorkflowTaskSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("workflowtasksets"). + Name(workflowTaskSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowTaskSet). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *workflowTaskSets) UpdateStatus(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.UpdateOptions) (result *v1alpha1.WorkflowTaskSet, err error) { + result = &v1alpha1.WorkflowTaskSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("workflowtasksets"). + Name(workflowTaskSet.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowTaskSet). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the workflowTaskSet and deletes it. Returns an error if one occurs. +func (c *workflowTaskSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("workflowtasksets"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *workflowTaskSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("workflowtasksets"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched workflowTaskSet. +func (c *workflowTaskSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTaskSet, err error) { + result = &v1alpha1.WorkflowTaskSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("workflowtasksets"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtemplate.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtemplate.go new file mode 100644 index 00000000..992d044c --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtemplate.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// WorkflowTemplatesGetter has a method to return a WorkflowTemplateInterface. +// A group's client should implement this interface. +type WorkflowTemplatesGetter interface { + WorkflowTemplates(namespace string) WorkflowTemplateInterface +} + +// WorkflowTemplateInterface has methods to work with WorkflowTemplate resources. +type WorkflowTemplateInterface interface { + Create(ctx context.Context, workflowTemplate *v1alpha1.WorkflowTemplate, opts v1.CreateOptions) (*v1alpha1.WorkflowTemplate, error) + Update(ctx context.Context, workflowTemplate *v1alpha1.WorkflowTemplate, opts v1.UpdateOptions) (*v1alpha1.WorkflowTemplate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.WorkflowTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowTemplateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTemplate, err error) + WorkflowTemplateExpansion +} + +// workflowTemplates implements WorkflowTemplateInterface +type workflowTemplates struct { + client rest.Interface + ns string +} + +// newWorkflowTemplates returns a WorkflowTemplates +func newWorkflowTemplates(c *ArgoprojV1alpha1Client, namespace string) *workflowTemplates { + return &workflowTemplates{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the workflowTemplate, and returns the corresponding workflowTemplate object, and an error if there is any. +func (c *workflowTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.WorkflowTemplate, err error) { + result = &v1alpha1.WorkflowTemplate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workflowtemplates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of WorkflowTemplates that match those selectors. +func (c *workflowTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowTemplateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.WorkflowTemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("workflowtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested workflowTemplates. +func (c *workflowTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("workflowtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a workflowTemplate and creates it. Returns the server's representation of the workflowTemplate, and an error, if there is any. +func (c *workflowTemplates) Create(ctx context.Context, workflowTemplate *v1alpha1.WorkflowTemplate, opts v1.CreateOptions) (result *v1alpha1.WorkflowTemplate, err error) { + result = &v1alpha1.WorkflowTemplate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("workflowtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowTemplate). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a workflowTemplate and updates it. Returns the server's representation of the workflowTemplate, and an error, if there is any. +func (c *workflowTemplates) Update(ctx context.Context, workflowTemplate *v1alpha1.WorkflowTemplate, opts v1.UpdateOptions) (result *v1alpha1.WorkflowTemplate, err error) { + result = &v1alpha1.WorkflowTemplate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("workflowtemplates"). + Name(workflowTemplate.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(workflowTemplate). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the workflowTemplate and deletes it. Returns an error if one occurs. +func (c *workflowTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("workflowtemplates"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *workflowTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("workflowtemplates"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched workflowTemplate. +func (c *workflowTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTemplate, err error) { + result = &v1alpha1.WorkflowTemplate{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("workflowtemplates"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/context/context.go b/vendor/github.com/argoproj/argo-workflows/v3/util/context/context.go new file mode 100644 index 00000000..78b1cad2 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/util/context/context.go @@ -0,0 +1,35 @@ +// Package context contains common functions for storing and retrieving information from +// standard go context +package context + +import ( + "context" + + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type objectIdentifier string + +const ( + name objectIdentifier = `object_name` + namespace objectIdentifier = `object_namespace` +) + +func InjectObjectMeta(ctx context.Context, meta *meta.ObjectMeta) context.Context { + ctx = context.WithValue(ctx, name, meta.Name) + return context.WithValue(ctx, namespace, meta.Namespace) +} + +func ObjectName(ctx context.Context) string { + if n, ok := ctx.Value(name).(string); ok { + return n + } + return "" +} + +func ObjectNamespace(ctx context.Context) string { + if n, ok := ctx.Value(namespace).(string); ok { + return n + } + return "" +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/deprecation/deprecation.go b/vendor/github.com/argoproj/argo-workflows/v3/util/deprecation/deprecation.go new file mode 100644 index 00000000..d8eeb662 --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/util/deprecation/deprecation.go @@ -0,0 +1,56 @@ +// Package deprecation records uses of deprecated features so that users can be made aware of +// things that may be removed in a future version and move away from them. +package deprecation + +// This is a deliberate singleton devised to be functional when initialised with an +// instance of metrics, and otherwise to remain quiet +// +// This avoids the problem of injecting the metrics package (or whatever recording method the deprecation +// recorder is using) temporarily into packages and then painfully removing the injection later when the +// package no longer has deprecated features (as they've been removed) + +import ( + "context" + + wfctx "github.com/argoproj/argo-workflows/v3/util/context" +) + +type metricsFunc func(context.Context, string, string) + +var ( + metricsF metricsFunc +) + +type Type int + +const ( + Schedule Type = iota + Mutex + Semaphore + PodPriority +) + +func (t *Type) asString() string { + switch *t { + case Schedule: + return `cronworkflow schedule` + case Mutex: + return `synchronization mutex` + case Semaphore: + return `synchronization semaphore` + case PodPriority: + return `workflow podpriority` + default: + return `unknown` + } +} + +func Initialize(m metricsFunc) { + metricsF = m +} + +func Record(ctx context.Context, deprecation Type) { + if metricsF != nil { + metricsF(ctx, deprecation.asString(), wfctx.ObjectNamespace(ctx)) + } +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/json/fix.go b/vendor/github.com/argoproj/argo-workflows/v3/util/json/fix.go new file mode 100644 index 00000000..6a7ee41e --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/util/json/fix.go @@ -0,0 +1,11 @@ +package json + +import "strings" + +func Fix(s string) string { + // https://stackoverflow.com/questions/28595664/how-to-stop-json-marshal-from-escaping-and/28596225 + s = strings.Replace(s, "\\u003c", "<", -1) + s = strings.Replace(s, "\\u003e", ">", -1) + s = strings.Replace(s, "\\u0026", "&", -1) + return s +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go b/vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go new file mode 100644 index 00000000..fda3296e --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go @@ -0,0 +1,36 @@ +package json + +import ( + "encoding/json" + "io" + + gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" +) + +// JSONMarshaler is a type which satisfies the grpc-gateway Marshaler interface +type JSONMarshaler struct{} + +// ContentType implements gwruntime.Marshaler. +func (j *JSONMarshaler) ContentType() string { + return "application/json" +} + +// Marshal implements gwruntime.Marshaler. +func (j *JSONMarshaler) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// NewDecoder implements gwruntime.Marshaler. +func (j *JSONMarshaler) NewDecoder(r io.Reader) gwruntime.Decoder { + return json.NewDecoder(r) +} + +// NewEncoder implements gwruntime.Marshaler. +func (j *JSONMarshaler) NewEncoder(w io.Writer) gwruntime.Encoder { + return json.NewEncoder(w) +} + +// Unmarshal implements gwruntime.Marshaler. +func (j *JSONMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go b/vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go new file mode 100644 index 00000000..bdb25bce --- /dev/null +++ b/vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go @@ -0,0 +1,12 @@ +package json + +import "encoding/json" + +func Jsonify(v interface{}) (map[string]interface{}, error) { + data, err := json.Marshal(v) + if err != nil { + return nil, err + } + x := make(map[string]interface{}) + return x, json.Unmarshal(data, &x) +} diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md index 28e35169..97e319b2 100644 --- a/vendor/github.com/evanphx/json-patch/README.md +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -4,7 +4,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). [![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Build Status](https://github.com/evanphx/json-patch/actions/workflows/go.yml/badge.svg)](https://github.com/evanphx/json-patch/actions/workflows/go.yml) [![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) # Get It! @@ -314,4 +314,4 @@ go test -cover ./... ``` Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). +using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml). diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index 4bce5936..cd0274e1 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -359,7 +359,7 @@ func findObject(pd *container, path string) (container, string) { next, ok := doc.get(decodePatchKey(part)) - if next == nil || ok != nil { + if next == nil || ok != nil || next.raw == nil { return nil, "" } @@ -568,6 +568,29 @@ func (p Patch) replace(doc *container, op Operation) error { return errors.Wrapf(err, "replace operation failed to decode path") } + if path == "" { + val := op.value() + + if val.which == eRaw { + if !val.tryDoc() { + if !val.tryAry() { + return errors.Wrapf(err, "replace operation value must be object or array") + } + } + } + + switch val.which { + case eAry: + *doc = &val.ary + case eDoc: + *doc = &val.doc + case eRaw: + return errors.Wrapf(err, "replace operation hit impossible case") + } + + return nil + } + con, key := findObject(doc, path) if con == nil { @@ -634,6 +657,25 @@ func (p Patch) test(doc *container, op Operation) error { return errors.Wrapf(err, "test operation failed to decode path") } + if path == "" { + var self lazyNode + + switch sv := (*doc).(type) { + case *partialDoc: + self.doc = *sv + self.which = eDoc + case *partialArray: + self.ary = *sv + self.which = eAry + } + + if self.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + con, key := findObject(doc, path) if con == nil { @@ -646,7 +688,7 @@ func (p Patch) test(doc *container, op Operation) error { } if val == nil { - if op.value().raw == nil { + if op.value() == nil || op.value().raw == nil { return nil } return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go new file mode 100644 index 00000000..ffde8a65 --- /dev/null +++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go @@ -0,0 +1,180 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descriptor provides functions for obtaining the protocol buffer +// descriptors of generated Go types. +// +// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package +// for how to obtain an EnumDescriptor or MessageDescriptor in order to +// programatically interact with the protobuf type system. +package descriptor + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" + + descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +// Message is proto.Message with a method to return its descriptor. +// +// Deprecated: The Descriptor method may not be generated by future +// versions of protoc-gen-go, meaning that this interface may not +// be implemented by many concrete message types. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns the file descriptor proto containing +// the message and the message descriptor proto for the message itself. +// The returned proto messages must not be mutated. +// +// Deprecated: Not all concrete message types satisfy the Message interface. +// Use MessageDescriptorProto instead. If possible, the calling code should +// be rewritten to use protobuf reflection instead. +// See package "google.golang.org/protobuf/reflect/protoreflect" for details. +func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { + return MessageDescriptorProto(m) +} + +type rawDesc struct { + fileDesc []byte + indexes []int +} + +var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc + +func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) { + // Fast-path: check whether raw descriptors are already cached. + origDesc := d + if v, ok := rawDescCache.Load(origDesc); ok { + return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes + } + + // Slow-path: derive the raw descriptor from the v2 descriptor. + + // Start with the leaf (a given enum or message declaration) and + // ascend upwards until we hit the parent file descriptor. + var idxs []int + for { + idxs = append(idxs, d.Index()) + d = d.Parent() + if d == nil { + // TODO: We could construct a FileDescriptor stub for standalone + // descriptors to satisfy the API. + return nil, nil + } + if _, ok := d.(protoreflect.FileDescriptor); ok { + break + } + } + + // Obtain the raw file descriptor. + fd := d.(protoreflect.FileDescriptor) + b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd)) + file := protoimpl.X.CompressGZIP(b) + + // Reverse the indexes, since we populated it in reverse. + for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 { + idxs[i], idxs[j] = idxs[j], idxs[i] + } + + if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok { + return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes + } + return file, idxs +} + +// EnumRawDescriptor returns the GZIP'd raw file descriptor representing +// the enum and the index path to reach the enum declaration. +// The returned slices must not be mutated. +func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) { + if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok { + return ev.EnumDescriptor() + } + ed := protoimpl.X.EnumTypeOf(e) + return deriveRawDescriptor(ed.Descriptor()) +} + +// MessageRawDescriptor returns the GZIP'd raw file descriptor representing +// the message and the index path to reach the message declaration. +// The returned slices must not be mutated. +func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) { + if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok { + return mv.Descriptor() + } + md := protoimpl.X.MessageTypeOf(m) + return deriveRawDescriptor(md.Descriptor()) +} + +var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto + +func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto { + // Fast-path: check whether descriptor protos are already cached. + if v, ok := fileDescCache.Load(&rawDesc[0]); ok { + return v.(*descriptorpb.FileDescriptorProto) + } + + // Slow-path: derive the descriptor proto from the GZIP'd message. + zr, err := gzip.NewReader(bytes.NewReader(rawDesc)) + if err != nil { + panic(err) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(err) + } + fd := new(descriptorpb.FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + panic(err) + } + if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok { + return v.(*descriptorpb.FileDescriptorProto) + } + return fd +} + +// EnumDescriptorProto returns the file descriptor proto representing +// the enum and the enum descriptor proto for the enum itself. +// The returned proto messages must not be mutated. +func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) { + rawDesc, idxs := EnumRawDescriptor(e) + if rawDesc == nil || idxs == nil { + return nil, nil + } + fd := deriveFileDescriptor(rawDesc) + if len(idxs) == 1 { + return fd, fd.EnumType[idxs[0]] + } + md := fd.MessageType[idxs[0]] + for _, i := range idxs[1 : len(idxs)-1] { + md = md.NestedType[i] + } + ed := md.EnumType[idxs[len(idxs)-1]] + return fd, ed +} + +// MessageDescriptorProto returns the file descriptor proto representing +// the message and the message descriptor proto for the message itself. +// The returned proto messages must not be mutated. +func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { + rawDesc, idxs := MessageRawDescriptor(m) + if rawDesc == nil || idxs == nil { + return nil, nil + } + fd := deriveFileDescriptor(rawDesc) + md := fd.MessageType[idxs[0]] + for _, i := range idxs[1:] { + md = md.NestedType[i] + } + return fd, md +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 00000000..c6f66f10 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,531 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if fd.Cardinality() == protoreflect.Repeated { + return false + } + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" + } + if ed := fd.Enum(); ed != nil { + return ed.FullName() == "google.protobuf.NullValue" + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 00000000..e9438a93 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,560 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 00000000..480e2448 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 00000000..a5a13861 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto + +package descriptor + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/descriptor.proto. + +type Edition = descriptorpb.Edition + +const Edition_EDITION_UNKNOWN = descriptorpb.Edition_EDITION_UNKNOWN +const Edition_EDITION_PROTO2 = descriptorpb.Edition_EDITION_PROTO2 +const Edition_EDITION_PROTO3 = descriptorpb.Edition_EDITION_PROTO3 +const Edition_EDITION_2023 = descriptorpb.Edition_EDITION_2023 +const Edition_EDITION_2024 = descriptorpb.Edition_EDITION_2024 +const Edition_EDITION_1_TEST_ONLY = descriptorpb.Edition_EDITION_1_TEST_ONLY +const Edition_EDITION_2_TEST_ONLY = descriptorpb.Edition_EDITION_2_TEST_ONLY +const Edition_EDITION_99997_TEST_ONLY = descriptorpb.Edition_EDITION_99997_TEST_ONLY +const Edition_EDITION_99998_TEST_ONLY = descriptorpb.Edition_EDITION_99998_TEST_ONLY +const Edition_EDITION_99999_TEST_ONLY = descriptorpb.Edition_EDITION_99999_TEST_ONLY +const Edition_EDITION_MAX = descriptorpb.Edition_EDITION_MAX + +var Edition_name = descriptorpb.Edition_name +var Edition_value = descriptorpb.Edition_value + +type ExtensionRangeOptions_VerificationState = descriptorpb.ExtensionRangeOptions_VerificationState + +const ExtensionRangeOptions_DECLARATION = descriptorpb.ExtensionRangeOptions_DECLARATION +const ExtensionRangeOptions_UNVERIFIED = descriptorpb.ExtensionRangeOptions_UNVERIFIED + +var ExtensionRangeOptions_VerificationState_name = descriptorpb.ExtensionRangeOptions_VerificationState_name +var ExtensionRangeOptions_VerificationState_value = descriptorpb.ExtensionRangeOptions_VerificationState_value + +type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type + +const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE +const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT +const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64 +const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64 +const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32 +const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64 +const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32 +const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL +const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING +const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP +const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE +const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES +const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32 +const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM +const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32 +const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64 +const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32 +const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64 + +var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name +var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value + +type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label + +const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL +const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED +const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED + +var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name +var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value + +type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode + +const FileOptions_SPEED = descriptorpb.FileOptions_SPEED +const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE +const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME + +var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name +var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value + +type FieldOptions_CType = descriptorpb.FieldOptions_CType + +const FieldOptions_STRING = descriptorpb.FieldOptions_STRING +const FieldOptions_CORD = descriptorpb.FieldOptions_CORD +const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE + +var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name +var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value + +type FieldOptions_JSType = descriptorpb.FieldOptions_JSType + +const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL +const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING +const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER + +var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name +var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value + +type FieldOptions_OptionRetention = descriptorpb.FieldOptions_OptionRetention + +const FieldOptions_RETENTION_UNKNOWN = descriptorpb.FieldOptions_RETENTION_UNKNOWN +const FieldOptions_RETENTION_RUNTIME = descriptorpb.FieldOptions_RETENTION_RUNTIME +const FieldOptions_RETENTION_SOURCE = descriptorpb.FieldOptions_RETENTION_SOURCE + +var FieldOptions_OptionRetention_name = descriptorpb.FieldOptions_OptionRetention_name +var FieldOptions_OptionRetention_value = descriptorpb.FieldOptions_OptionRetention_value + +type FieldOptions_OptionTargetType = descriptorpb.FieldOptions_OptionTargetType + +const FieldOptions_TARGET_TYPE_UNKNOWN = descriptorpb.FieldOptions_TARGET_TYPE_UNKNOWN +const FieldOptions_TARGET_TYPE_FILE = descriptorpb.FieldOptions_TARGET_TYPE_FILE +const FieldOptions_TARGET_TYPE_EXTENSION_RANGE = descriptorpb.FieldOptions_TARGET_TYPE_EXTENSION_RANGE +const FieldOptions_TARGET_TYPE_MESSAGE = descriptorpb.FieldOptions_TARGET_TYPE_MESSAGE +const FieldOptions_TARGET_TYPE_FIELD = descriptorpb.FieldOptions_TARGET_TYPE_FIELD +const FieldOptions_TARGET_TYPE_ONEOF = descriptorpb.FieldOptions_TARGET_TYPE_ONEOF +const FieldOptions_TARGET_TYPE_ENUM = descriptorpb.FieldOptions_TARGET_TYPE_ENUM +const FieldOptions_TARGET_TYPE_ENUM_ENTRY = descriptorpb.FieldOptions_TARGET_TYPE_ENUM_ENTRY +const FieldOptions_TARGET_TYPE_SERVICE = descriptorpb.FieldOptions_TARGET_TYPE_SERVICE +const FieldOptions_TARGET_TYPE_METHOD = descriptorpb.FieldOptions_TARGET_TYPE_METHOD + +var FieldOptions_OptionTargetType_name = descriptorpb.FieldOptions_OptionTargetType_name +var FieldOptions_OptionTargetType_value = descriptorpb.FieldOptions_OptionTargetType_value + +type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel + +const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN +const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS +const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT + +var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name +var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value + +type FeatureSet_FieldPresence = descriptorpb.FeatureSet_FieldPresence + +const FeatureSet_FIELD_PRESENCE_UNKNOWN = descriptorpb.FeatureSet_FIELD_PRESENCE_UNKNOWN +const FeatureSet_EXPLICIT = descriptorpb.FeatureSet_EXPLICIT +const FeatureSet_IMPLICIT = descriptorpb.FeatureSet_IMPLICIT +const FeatureSet_LEGACY_REQUIRED = descriptorpb.FeatureSet_LEGACY_REQUIRED + +var FeatureSet_FieldPresence_name = descriptorpb.FeatureSet_FieldPresence_name +var FeatureSet_FieldPresence_value = descriptorpb.FeatureSet_FieldPresence_value + +type FeatureSet_EnumType = descriptorpb.FeatureSet_EnumType + +const FeatureSet_ENUM_TYPE_UNKNOWN = descriptorpb.FeatureSet_ENUM_TYPE_UNKNOWN +const FeatureSet_OPEN = descriptorpb.FeatureSet_OPEN +const FeatureSet_CLOSED = descriptorpb.FeatureSet_CLOSED + +var FeatureSet_EnumType_name = descriptorpb.FeatureSet_EnumType_name +var FeatureSet_EnumType_value = descriptorpb.FeatureSet_EnumType_value + +type FeatureSet_RepeatedFieldEncoding = descriptorpb.FeatureSet_RepeatedFieldEncoding + +const FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN = descriptorpb.FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +const FeatureSet_PACKED = descriptorpb.FeatureSet_PACKED +const FeatureSet_EXPANDED = descriptorpb.FeatureSet_EXPANDED + +var FeatureSet_RepeatedFieldEncoding_name = descriptorpb.FeatureSet_RepeatedFieldEncoding_name +var FeatureSet_RepeatedFieldEncoding_value = descriptorpb.FeatureSet_RepeatedFieldEncoding_value + +type FeatureSet_Utf8Validation = descriptorpb.FeatureSet_Utf8Validation + +const FeatureSet_UTF8_VALIDATION_UNKNOWN = descriptorpb.FeatureSet_UTF8_VALIDATION_UNKNOWN +const FeatureSet_VERIFY = descriptorpb.FeatureSet_VERIFY +const FeatureSet_NONE = descriptorpb.FeatureSet_NONE + +var FeatureSet_Utf8Validation_name = descriptorpb.FeatureSet_Utf8Validation_name +var FeatureSet_Utf8Validation_value = descriptorpb.FeatureSet_Utf8Validation_value + +type FeatureSet_MessageEncoding = descriptorpb.FeatureSet_MessageEncoding + +const FeatureSet_MESSAGE_ENCODING_UNKNOWN = descriptorpb.FeatureSet_MESSAGE_ENCODING_UNKNOWN +const FeatureSet_LENGTH_PREFIXED = descriptorpb.FeatureSet_LENGTH_PREFIXED +const FeatureSet_DELIMITED = descriptorpb.FeatureSet_DELIMITED + +var FeatureSet_MessageEncoding_name = descriptorpb.FeatureSet_MessageEncoding_name +var FeatureSet_MessageEncoding_value = descriptorpb.FeatureSet_MessageEncoding_value + +type FeatureSet_JsonFormat = descriptorpb.FeatureSet_JsonFormat + +const FeatureSet_JSON_FORMAT_UNKNOWN = descriptorpb.FeatureSet_JSON_FORMAT_UNKNOWN +const FeatureSet_ALLOW = descriptorpb.FeatureSet_ALLOW +const FeatureSet_LEGACY_BEST_EFFORT = descriptorpb.FeatureSet_LEGACY_BEST_EFFORT + +var FeatureSet_JsonFormat_name = descriptorpb.FeatureSet_JsonFormat_name +var FeatureSet_JsonFormat_value = descriptorpb.FeatureSet_JsonFormat_value + +type GeneratedCodeInfo_Annotation_Semantic = descriptorpb.GeneratedCodeInfo_Annotation_Semantic + +const GeneratedCodeInfo_Annotation_NONE = descriptorpb.GeneratedCodeInfo_Annotation_NONE +const GeneratedCodeInfo_Annotation_SET = descriptorpb.GeneratedCodeInfo_Annotation_SET +const GeneratedCodeInfo_Annotation_ALIAS = descriptorpb.GeneratedCodeInfo_Annotation_ALIAS + +var GeneratedCodeInfo_Annotation_Semantic_name = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_name +var GeneratedCodeInfo_Annotation_Semantic_value = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_value + +type FileDescriptorSet = descriptorpb.FileDescriptorSet +type FileDescriptorProto = descriptorpb.FileDescriptorProto +type DescriptorProto = descriptorpb.DescriptorProto +type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions + +const Default_ExtensionRangeOptions_Verification = descriptorpb.Default_ExtensionRangeOptions_Verification + +type FieldDescriptorProto = descriptorpb.FieldDescriptorProto +type OneofDescriptorProto = descriptorpb.OneofDescriptorProto +type EnumDescriptorProto = descriptorpb.EnumDescriptorProto +type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto +type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto +type MethodDescriptorProto = descriptorpb.MethodDescriptorProto + +const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming +const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming + +type FileOptions = descriptorpb.FileOptions + +const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles +const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8 +const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor +const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices +const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices +const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices +const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated +const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas + +type MessageOptions = descriptorpb.MessageOptions + +const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat +const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor +const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated + +type FieldOptions = descriptorpb.FieldOptions + +const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype +const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype +const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy +const Default_FieldOptions_UnverifiedLazy = descriptorpb.Default_FieldOptions_UnverifiedLazy +const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated +const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak +const Default_FieldOptions_DebugRedact = descriptorpb.Default_FieldOptions_DebugRedact + +type OneofOptions = descriptorpb.OneofOptions +type EnumOptions = descriptorpb.EnumOptions + +const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated + +type EnumValueOptions = descriptorpb.EnumValueOptions + +const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated +const Default_EnumValueOptions_DebugRedact = descriptorpb.Default_EnumValueOptions_DebugRedact + +type ServiceOptions = descriptorpb.ServiceOptions + +const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated + +type MethodOptions = descriptorpb.MethodOptions + +const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated +const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel + +type UninterpretedOption = descriptorpb.UninterpretedOption +type FeatureSet = descriptorpb.FeatureSet +type FeatureSetDefaults = descriptorpb.FeatureSetDefaults +type SourceCodeInfo = descriptorpb.SourceCodeInfo +type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo +type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange +type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange +type ExtensionRangeOptions_Declaration = descriptorpb.ExtensionRangeOptions_Declaration +type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange +type FieldOptions_EditionDefault = descriptorpb.FieldOptions_EditionDefault +type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart +type FeatureSetDefaults_FeatureSetEditionDefault = descriptorpb.FeatureSetDefaults_FeatureSetEditionDefault +type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location +type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation + +var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, + 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x32, +} + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() } +func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() { + if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 00000000..0ef27d33 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/any/any.proto + +package any + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/any.proto. + +type Any = anypb.Any + +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 00000000..d0079ee3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,63 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto + +package duration + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/duration.proto. + +type Duration = durationpb.Duration + +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 00000000..a76f8076 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +package timestamp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/timestamp.proto. + +type Timestamp = timestamppb.Timestamp + +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 00000000..cc40f27a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,71 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto + +package wrappers + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/wrappers.proto. + +type DoubleValue = wrapperspb.DoubleValue +type FloatValue = wrapperspb.FloatValue +type Int64Value = wrapperspb.Int64Value +type UInt64Value = wrapperspb.UInt64Value +type Int32Value = wrapperspb.Int32Value +type UInt32Value = wrapperspb.UInt32Value +type BoolValue = wrapperspb.BoolValue +type StringValue = wrapperspb.StringValue +type BytesValue = wrapperspb.BytesValue + +var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() } +func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() { + if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File + file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil +} diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig new file mode 100644 index 00000000..2940ec92 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore index cd3fcd1e..84039fec 100644 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -1,25 +1 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.idea/ -*.iml +coverage.coverprofile diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml new file mode 100644 index 00000000..34882139 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.golangci.yml @@ -0,0 +1,3 @@ +run: + skip-dirs: + - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS deleted file mode 100644 index 1931f400..00000000 --- a/vendor/github.com/gorilla/websocket/AUTHORS +++ /dev/null @@ -1,9 +0,0 @@ -# This is the official list of Gorilla WebSocket authors for copyright -# purposes. -# -# Please keep the list sorted. - -Gary Burd -Google LLC (https://opensource.google.com/) -Joachim Bauch - diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE index 9171c972..bb9d80bc 100644 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -1,22 +1,27 @@ -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are +met: - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile new file mode 100644 index 00000000..603a63f5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec -exclude-dir examples ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 2517a287..1fd5e9c4 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -1,17 +1,14 @@ -# Gorilla WebSocket +# gorilla/websocket -[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) +![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) +[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) ---- - -⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** - ---- ### Documentation @@ -20,6 +17,7 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the * [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) * [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) * [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) +* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) ### Status @@ -36,4 +34,3 @@ package API is stable. The Gorilla WebSocket package passes the server tests in the [Autobahn Test Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 2efd8355..815b0ca5 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -9,14 +9,18 @@ import ( "context" "crypto/tls" "errors" + "fmt" "io" - "io/ioutil" + "log" + "net" "net/http" "net/http/httptrace" "net/url" "strings" "time" + + "golang.org/x/net/proxy" ) // ErrBadHandshake is returned when the server response to opening handshake is @@ -224,6 +228,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h k == "Connection" || k == "Sec-Websocket-Key" || k == "Sec-Websocket-Version" || + //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) @@ -289,7 +294,9 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } err = c.SetDeadline(deadline) if err != nil { - c.Close() + if err := c.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, err } return c, nil @@ -303,7 +310,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h return nil, nil, err } if proxyURL != nil { - dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) if err != nil { return nil, nil, err } @@ -318,18 +325,20 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } if trace != nil && trace.GotConn != nil { trace.GotConn(httptrace.GotConnInfo{ Conn: netConn, }) } - if err != nil { - return nil, nil, err - } defer func() { if netConn != nil { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } } }() @@ -370,6 +379,17 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h resp, err := http.ReadResponse(conn.br, req) if err != nil { + if d.TLSClientConfig != nil { + for _, proto := range d.TLSClientConfig.NextProtos { + if proto != "http/1.1" { + return nil, nil, fmt.Errorf( + "websocket: protocol %q was given but is not supported;"+ + "sharing tls.Config with net/http Transport can cause this error: %w", + proto, err, + ) + } + } + } return nil, nil, err } @@ -388,7 +408,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h // debugging. buf := make([]byte, 1024) n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) return nil, resp, ErrBadHandshake } @@ -406,17 +426,19 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h break } - resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + resp.Body = io.NopCloser(bytes.NewReader([]byte{})) conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - netConn.SetDeadline(time.Time{}) + if err := netConn.SetDeadline(time.Time{}); err != nil { + return nil, nil, err + } netConn = nil // to avoid close in defer. return conn, resp, nil } func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { - return &tls.Config{} + return &tls.Config{MinVersion: tls.VersionTLS12} } return cfg.Clone() } diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go index 813ffb1e..9fed0ef5 100644 --- a/vendor/github.com/gorilla/websocket/compression.go +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -8,6 +8,7 @@ import ( "compress/flate" "errors" "io" + "log" "strings" "sync" ) @@ -33,7 +34,9 @@ func decompressNoContextTakeover(r io.Reader) io.ReadCloser { "\x01\x00\x00\xff\xff" fr, _ := flateReaderPool.Get().(io.ReadCloser) - fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { + panic(err) + } return &flateReadWrapper{fr} } @@ -132,7 +135,9 @@ func (r *flateReadWrapper) Read(p []byte) (int, error) { // Preemptively place the reader back in the pool. This helps with // scenarios where the application does not call NextReader() soon after // this final read. - r.Close() + if err := r.Close(); err != nil { + log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) + } } return n, err } diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index 331eebc8..221e6cf7 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -6,11 +6,11 @@ package websocket import ( "bufio" + "crypto/rand" "encoding/binary" "errors" "io" - "io/ioutil" - "math/rand" + "log" "net" "strconv" "strings" @@ -181,13 +181,20 @@ var ( errInvalidControlFrame = errors.New("websocket: invalid control frame") ) +// maskRand is an io.Reader for generating mask bytes. The reader is initialized +// to crypto/rand Reader. Tests swap the reader to a math/rand reader for +// reproducible results. +var maskRand = rand.Reader + +// newMaskKey returns a new 32 bit value for masking client frames. func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} + var k [4]byte + _, _ = io.ReadFull(maskRand, k[:]) + return k } func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { + if e, ok := err.(net.Error); ok { err = &netError{msg: e.Error(), timeout: e.Timeout()} } return err @@ -372,7 +379,9 @@ func (c *Conn) read(n int) ([]byte, error) { if err == io.EOF { err = errUnexpectedEOF } - c.br.Discard(len(p)) + if _, err := c.br.Discard(len(p)); err != nil { + return p, err + } return p, err } @@ -387,7 +396,9 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return err } - c.conn.SetWriteDeadline(deadline) + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } if len(buf1) == 0 { _, err = c.conn.Write(buf0) } else { @@ -397,7 +408,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return c.writeFatal(err) } if frameType == CloseMessage { - c.writeFatal(ErrCloseSent) + _ = c.writeFatal(ErrCloseSent) } return nil } @@ -438,7 +449,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er d := 1000 * time.Hour if !deadline.IsZero() { - d = deadline.Sub(time.Now()) + d = time.Until(deadline) if d < 0 { return errWriteTimeout } @@ -460,13 +471,15 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er return err } - c.conn.SetWriteDeadline(deadline) + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } _, err = c.conn.Write(buf) if err != nil { return c.writeFatal(err) } if messageType == CloseMessage { - c.writeFatal(ErrCloseSent) + _ = c.writeFatal(ErrCloseSent) } return err } @@ -477,7 +490,9 @@ func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. if c.writer != nil { - c.writer.Close() + if err := c.writer.Close(); err != nil { + log.Printf("websocket: discarding writer close error: %v", err) + } c.writer = nil } @@ -630,7 +645,7 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error { } if final { - w.endMessage(errWriteClosed) + _ = w.endMessage(errWriteClosed) return nil } @@ -795,7 +810,7 @@ func (c *Conn) advanceFrame() (int, error) { // 1. Skip remainder of previous frame. if c.readRemaining > 0 { - if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { return noFrame, err } } @@ -817,7 +832,9 @@ func (c *Conn) advanceFrame() (int, error) { rsv2 := p[0]&rsv2Bit != 0 rsv3 := p[0]&rsv3Bit != 0 mask := p[1]&maskBit != 0 - c.setReadRemaining(int64(p[1] & 0x7f)) + if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { + return noFrame, err + } c.readDecompress = false if rsv1 { @@ -922,7 +939,9 @@ func (c *Conn) advanceFrame() (int, error) { } if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { + return noFrame, err + } return noFrame, ErrReadLimit } @@ -934,7 +953,9 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) - c.setReadRemaining(0) + if err := c.setReadRemaining(0); err != nil { + return noFrame, err + } if err != nil { return noFrame, err } @@ -981,7 +1002,9 @@ func (c *Conn) handleProtocolError(message string) error { if len(data) > maxControlFramePayloadSize { data = data[:maxControlFramePayloadSize] } - c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { + return err + } return errors.New("websocket: " + message) } @@ -998,7 +1021,9 @@ func (c *Conn) handleProtocolError(message string) error { func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { // Close previous reader, only relevant for decompression. if c.reader != nil { - c.reader.Close() + if err := c.reader.Close(); err != nil { + log.Printf("websocket: discarding reader close error: %v", err) + } c.reader = nil } @@ -1054,7 +1079,9 @@ func (r *messageReader) Read(b []byte) (int, error) { } rem := c.readRemaining rem -= int64(n) - c.setReadRemaining(rem) + if err := c.setReadRemaining(rem); err != nil { + return 0, err + } if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } @@ -1094,7 +1121,7 @@ func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { if err != nil { return messageType, nil, err } - p, err = ioutil.ReadAll(r) + p, err = io.ReadAll(r) return messageType, p, err } @@ -1136,7 +1163,9 @@ func (c *Conn) SetCloseHandler(h func(code int, text string) error) { if h == nil { h = func(code int, text string) error { message := FormatCloseMessage(code, "") - c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { + return err + } return nil } } @@ -1161,7 +1190,7 @@ func (c *Conn) SetPingHandler(h func(appData string) error) { err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) if err == ErrCloseSent { return nil - } else if e, ok := err.(net.Error); ok && e.Temporary() { + } else if _, ok := err.(net.Error); ok { return nil } return err @@ -1189,8 +1218,16 @@ func (c *Conn) SetPongHandler(h func(appData string) error) { c.handlePong = h } +// NetConn returns the underlying connection that is wrapped by c. +// Note that writing to or reading from this connection directly will corrupt the +// WebSocket connection. +func (c *Conn) NetConn() net.Conn { + return c.conn +} + // UnderlyingConn returns the internal net.Conn. This can be used to further // modifications to connection specific flags. +// Deprecated: Use the NetConn method. func (c *Conn) UnderlyingConn() net.Conn { return c.conn } diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go index d0742bf2..67d0968b 100644 --- a/vendor/github.com/gorilla/websocket/mask.go +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -9,6 +9,7 @@ package websocket import "unsafe" +// #nosec G103 -- (CWE-242) Has been audited const wordSize = int(unsafe.Sizeof(uintptr(0))) func maskBytes(key [4]byte, pos int, b []byte) int { @@ -22,6 +23,7 @@ func maskBytes(key [4]byte, pos int, b []byte) int { } // Mask one byte at a time to word boundary. + //#nosec G103 -- (CWE-242) Has been audited if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { n = wordSize - n for i := range b[:n] { @@ -36,11 +38,13 @@ func maskBytes(key [4]byte, pos int, b []byte) int { for i := range k { k[i] = key[(pos+i)&3] } + //#nosec G103 -- (CWE-242) Has been audited kw := *(*uintptr)(unsafe.Pointer(&k)) // Mask one word at a time. n := (len(b) / wordSize) * wordSize for i := 0; i < n; i += wordSize { + //#nosec G103 -- (CWE-242) Has been audited *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go index e0f466b7..80f55d1e 100644 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -8,10 +8,13 @@ import ( "bufio" "encoding/base64" "errors" + "log" "net" "net/http" "net/url" "strings" + + "golang.org/x/net/proxy" ) type netDialerFunc func(network, addr string) (net.Conn, error) @@ -21,7 +24,7 @@ func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { } func init() { - proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil }) } @@ -55,7 +58,9 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) } if err := connectReq.Write(conn); err != nil { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } return nil, err } @@ -64,12 +69,16 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) br := bufio.NewReader(conn) resp, err := http.ReadResponse(br, connectReq) if err != nil { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } return nil, err } if resp.StatusCode != 200 { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } f := strings.SplitN(resp.Status, " ", 2) return nil, errors.New(f[1]) } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index 24d53b38..1e720e1d 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -8,6 +8,7 @@ import ( "bufio" "errors" "io" + "log" "net/http" "net/url" "strings" @@ -154,8 +155,8 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } challengeKey := r.Header.Get("Sec-Websocket-Key") - if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") + if !isValidChallengeKey(challengeKey) { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length") } subprotocol := u.selectSubprotocol(r, responseHeader) @@ -183,7 +184,9 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } if brw.Reader.Buffered() > 0 { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, errors.New("websocket: client sent data before handshake is complete") } @@ -248,17 +251,34 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade p = append(p, "\r\n"...) // Clear deadlines set by HTTP server. - netConn.SetDeadline(time.Time{}) + if err := netConn.SetDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } } if _, err = netConn.Write(p); err != nil { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, err } if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Time{}) + if err := netConn.SetWriteDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } } return c, nil @@ -356,8 +376,12 @@ func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { // bufio.Writer's underlying writer. var wh writeHook bw.Reset(&wh) - bw.WriteByte(0) - bw.Flush() + if err := bw.WriteByte(0); err != nil { + panic(err) + } + if err := bw.Flush(); err != nil { + log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) + } bw.Reset(originalWriter) diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go index a62b68cc..7f386453 100644 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -1,6 +1,3 @@ -//go:build go1.17 -// +build go1.17 - package websocket import ( diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go deleted file mode 100644 index e1b2b44f..00000000 --- a/vendor/github.com/gorilla/websocket/tls_handshake_116.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -package websocket - -import ( - "context" - "crypto/tls" -) - -func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.Handshake(); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go index 7bf2f66c..9b1a629b 100644 --- a/vendor/github.com/gorilla/websocket/util.go +++ b/vendor/github.com/gorilla/websocket/util.go @@ -6,7 +6,7 @@ package websocket import ( "crypto/rand" - "crypto/sha1" + "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 "encoding/base64" "io" "net/http" @@ -17,7 +17,7 @@ import ( var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { - h := sha1.New() + h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 h.Write([]byte(challengeKey)) h.Write(keyGUID) return base64.StdEncoding.EncodeToString(h.Sum(nil)) @@ -281,3 +281,18 @@ headers: } return result } + +// isValidChallengeKey checks if the argument meets RFC6455 specification. +func isValidChallengeKey(s string) bool { + // From RFC6455: + // + // A |Sec-WebSocket-Key| header field with a base64-encoded (see + // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in + // length. + + if s == "" { + return false + } + decoded, err := base64.StdEncoding.DecodeString(s) + return err == nil && len(decoded) == 16 +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go deleted file mode 100644 index 2e668f6b..00000000 --- a/vendor/github.com/gorilla/websocket/x_net_proxy.go +++ /dev/null @@ -1,473 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy - -// Package proxy provides support for a variety of protocols to proxy network -// data. -// - -package websocket - -import ( - "errors" - "io" - "net" - "net/url" - "os" - "strconv" - "strings" - "sync" -) - -type proxy_direct struct{} - -// Direct is a direct proxy: one that makes network connections directly. -var proxy_Direct = proxy_direct{} - -func (proxy_direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type proxy_PerHost struct { - def, bypass proxy_Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { - return &proxy_PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *proxy_PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *proxy_PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *proxy_PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *proxy_PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} - -// A Dialer is a means to establish a connection. -type proxy_Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type proxy_Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. -func proxy_FromEnvironment() proxy_Dialer { - allProxy := proxy_allProxyEnv.Get() - if len(allProxy) == 0 { - return proxy_Direct - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return proxy_Direct - } - proxy, err := proxy_FromURL(proxyURL, proxy_Direct) - if err != nil { - return proxy_Direct - } - - noProxy := proxy_noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := proxy_NewPerHost(proxy, proxy_Direct) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { - if proxy_proxySchemes == nil { - proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) - } - proxy_proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { - var auth *proxy_Auth - if u.User != nil { - auth = new(proxy_Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5": - return proxy_SOCKS5("tcp", u.Host, auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxy_proxySchemes != nil { - if f, ok := proxy_proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - proxy_allProxyEnv = &proxy_envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - proxy_noProxyEnv = &proxy_envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type proxy_envOnce struct { - names []string - once sync.Once - val string -} - -func (e *proxy_envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *proxy_envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928 and RFC 1929. -func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { - s := &proxy_socks5{ - network: network, - addr: addr, - forward: forward, - } - if auth != nil { - s.user = auth.User - s.password = auth.Password - } - - return s, nil -} - -type proxy_socks5 struct { - user, password string - network, addr string - forward proxy_Dialer -} - -const proxy_socks5Version = 5 - -const ( - proxy_socks5AuthNone = 0 - proxy_socks5AuthPassword = 2 -) - -const proxy_socks5Connect = 1 - -const ( - proxy_socks5IP4 = 1 - proxy_socks5Domain = 3 - proxy_socks5IP6 = 4 -) - -var proxy_socks5Errors = []string{ - "", - "general failure", - "connection forbidden", - "network unreachable", - "host unreachable", - "connection refused", - "TTL expired", - "command not supported", - "address type not supported", -} - -// Dial connects to the address addr on the given network via the SOCKS5 proxy. -func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) - } - - conn, err := s.forward.Dial(s.network, s.addr) - if err != nil { - return nil, err - } - if err := s.connect(conn, addr); err != nil { - conn.Close() - return nil, err - } - return conn, nil -} - -// connect takes an existing connection to a socks5 proxy server, -// and commands the server to extend that connection to target, -// which must be a canonical address with a host and port. -func (s *proxy_socks5) connect(conn net.Conn, target string) error { - host, portStr, err := net.SplitHostPort(target) - if err != nil { - return err - } - - port, err := strconv.Atoi(portStr) - if err != nil { - return errors.New("proxy: failed to parse port number: " + portStr) - } - if port < 1 || port > 0xffff { - return errors.New("proxy: port number out of range: " + portStr) - } - - // the size here is just an estimate - buf := make([]byte, 0, 6+len(host)) - - buf = append(buf, proxy_socks5Version) - if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { - buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) - } else { - buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) - } - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - if buf[0] != 5 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) - } - if buf[1] == 0xff { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") - } - - // See RFC 1929 - if buf[1] == proxy_socks5AuthPassword { - buf = buf[:0] - buf = append(buf, 1 /* password protocol version */) - buf = append(buf, uint8(len(s.user))) - buf = append(buf, s.user...) - buf = append(buf, uint8(len(s.password))) - buf = append(buf, s.password...) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if buf[1] != 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") - } - } - - buf = buf[:0] - buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) - - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - buf = append(buf, proxy_socks5IP4) - ip = ip4 - } else { - buf = append(buf, proxy_socks5IP6) - } - buf = append(buf, ip...) - } else { - if len(host) > 255 { - return errors.New("proxy: destination host name too long: " + host) - } - buf = append(buf, proxy_socks5Domain) - buf = append(buf, byte(len(host))) - buf = append(buf, host...) - } - buf = append(buf, byte(port>>8), byte(port)) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - failure := "unknown error" - if int(buf[1]) < len(proxy_socks5Errors) { - failure = proxy_socks5Errors[buf[1]] - } - - if len(failure) > 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) - } - - bytesToDiscard := 0 - switch buf[3] { - case proxy_socks5IP4: - bytesToDiscard = net.IPv4len - case proxy_socks5IP6: - bytesToDiscard = net.IPv6len - case proxy_socks5Domain: - _, err := io.ReadFull(conn, buf[:1]) - if err != nil { - return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - bytesToDiscard = int(buf[0]) - default: - return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) - } - - if cap(buf) < bytesToDiscard { - buf = make([]byte, bytesToDiscard) - } else { - buf = buf[:bytesToDiscard] - } - if _, err := io.ReadFull(conn, buf); err != nil { - return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - // Also need to discard the port number - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - return nil -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt new file mode 100644 index 00000000..36451625 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel new file mode 100644 index 00000000..5242751f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel @@ -0,0 +1,23 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +package(default_visibility = ["//visibility:public"]) + +proto_library( + name = "internal_proto", + srcs = ["errors.proto"], + deps = ["@com_google_protobuf//:any_proto"], +) + +go_proto_library( + name = "internal_go_proto", + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", + proto = ":internal_proto", +) + +go_library( + name = "go_default_library", + embed = [":internal_go_proto"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go new file mode 100644 index 00000000..61101d71 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: internal/errors.proto + +package internal + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Error is the generic error returned from unary RPCs. +type Error struct { + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + // This is to make the error more compatible with users that expect errors to be Status objects: + // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto + // It should be the exact same message as the Error field. + Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_9b093362ca6d1e03, []int{0} +} + +func (m *Error) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Error.Unmarshal(m, b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) +} +func (m *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(m, src) +} +func (m *Error) XXX_Size() int { + return xxx_messageInfo_Error.Size(m) +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_Error proto.InternalMessageInfo + +func (m *Error) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *Error) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Error) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Error) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +type StreamError struct { + GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` + HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` + Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamError) Reset() { *m = StreamError{} } +func (m *StreamError) String() string { return proto.CompactTextString(m) } +func (*StreamError) ProtoMessage() {} +func (*StreamError) Descriptor() ([]byte, []int) { + return fileDescriptor_9b093362ca6d1e03, []int{1} +} + +func (m *StreamError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamError.Unmarshal(m, b) +} +func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) +} +func (m *StreamError) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamError.Merge(m, src) +} +func (m *StreamError) XXX_Size() int { + return xxx_messageInfo_StreamError.Size(m) +} +func (m *StreamError) XXX_DiscardUnknown() { + xxx_messageInfo_StreamError.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamError proto.InternalMessageInfo + +func (m *StreamError) GetGrpcCode() int32 { + if m != nil { + return m.GrpcCode + } + return 0 +} + +func (m *StreamError) GetHttpCode() int32 { + if m != nil { + return m.HttpCode + } + return 0 +} + +func (m *StreamError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *StreamError) GetHttpStatus() string { + if m != nil { + return m.HttpStatus + } + return "" +} + +func (m *StreamError) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error") + proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") +} + +func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) } + +var fileDescriptor_9b093362ca6d1e03 = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30, + 0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52, + 0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24, + 0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c, + 0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2, + 0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1, + 0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b, + 0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c, + 0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b, + 0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72, + 0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3, + 0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6, + 0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7, + 0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae, + 0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03, + 0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto new file mode 100644 index 00000000..4fb212c6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; +package grpc.gateway.runtime; +option go_package = "internal"; + +import "google/protobuf/any.proto"; + +// Error is the generic error returned from unary RPCs. +message Error { + string error = 1; + // This is to make the error more compatible with users that expect errors to be Status objects: + // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto + // It should be the exact same message as the Error field. + int32 code = 2; + string message = 3; + repeated google.protobuf.Any details = 4; +} + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +message StreamError { + int32 grpc_code = 1; + int32 http_code = 2; + string message = 3; + string http_status = 4; + repeated google.protobuf.Any details = 5; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel new file mode 100644 index 00000000..58b72b9c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel @@ -0,0 +1,85 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "proto_errors.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", + deps = [ + "//internal:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//descriptor:go_default_library_gen", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@go_googleapis//google/api:httpbody_go_proto", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//grpclog:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_test.go", + "pattern_test.go", + "query_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//internal:go_default_library", + "//runtime/internal/examplepb:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:struct_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go new file mode 100644 index 00000000..d8cbd4cc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go @@ -0,0 +1,291 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +var ( + // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound + // header isn't present. If the value is 0 the sent `context` will not have a timeout. + DefaultContextTimeout = 0 * time.Second +) + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewOutgoingContext(ctx, md), nil +} + +// AnnotateIncomingContext adds context information such as metadata from the request. +// Attach metadata as incoming context. +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewIncomingContext(ctx, md), nil +} + +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) { + var pairs []string + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + + for key, vals := range req.Header { + key = textproto.CanonicalMIMEHeaderKey(key) + for _, val := range vals { + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + if fwd := req.Header.Get(xForwardedFor); fwd == "" { + pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) + } else { + pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) + } + } + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + if len(pairs) == 0 { + return ctx, nil, nil + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + return ctx, md, nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +// ServerTransportStream implements grpc.ServerTransportStream. +// It should only be used by the generated files to support grpc.SendHeader +// outside of gRPC server use. +type ServerTransportStream struct { + mu sync.Mutex + header metadata.MD + trailer metadata.MD +} + +// Method returns the method for the stream. +func (s *ServerTransportStream) Method() string { + return "" +} + +// Header returns the header metadata of the stream. +func (s *ServerTransportStream) Header() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.header.Copy() +} + +// SetHeader sets the header metadata. +func (s *ServerTransportStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.header = metadata.Join(s.header, md) + s.mu.Unlock() + return nil +} + +// SendHeader sets the header metadata. +func (s *ServerTransportStream) SendHeader(md metadata.MD) error { + return s.SetHeader(md) +} + +// Trailer returns the cached trailer metadata. +func (s *ServerTransportStream) Trailer() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.trailer.Copy() +} + +// SetTrailer sets the trailer metadata. +func (s *ServerTransportStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() + return nil +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + } + return +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permanent request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go new file mode 100644 index 00000000..2c279344 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go @@ -0,0 +1,318 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/ptypes/wrappers" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into a int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into a int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamp.Timestamp, error) { + var r timestamp.Timestamp + err := jsonpb.UnmarshalString(val, &r) + if err != nil { + return nil, err + } + return &r, nil +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*duration.Duration, error) { + var r duration.Duration + err := jsonpb.UnmarshalString(val, &r) + if err != nil { + return nil, err + } + return &r, nil +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +/* + Support fot google.protobuf.wrappers on top of primitive types +*/ + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrappers.StringValue, error) { + return &wrappers.StringValue{Value: val}, nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrappers.FloatValue, error) { + parsedVal, err := Float32(val) + return &wrappers.FloatValue{Value: parsedVal}, err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrappers.DoubleValue, error) { + parsedVal, err := Float64(val) + return &wrappers.DoubleValue{Value: parsedVal}, err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrappers.BoolValue, error) { + parsedVal, err := Bool(val) + return &wrappers.BoolValue{Value: parsedVal}, err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrappers.Int32Value, error) { + parsedVal, err := Int32(val) + return &wrappers.Int32Value{Value: parsedVal}, err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrappers.UInt32Value, error) { + parsedVal, err := Uint32(val) + return &wrappers.UInt32Value{Value: parsedVal}, err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrappers.Int64Value, error) { + parsedVal, err := Int64(val) + return &wrappers.Int64Value{Value: parsedVal}, err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrappers.UInt64Value, error) { + parsedVal, err := Uint64(val) + return &wrappers.UInt64Value{Value: parsedVal}, err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrappers.BytesValue, error) { + parsedVal, err := Bytes(val) + return &wrappers.BytesValue{Value: parsedVal}, err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go new file mode 100644 index 00000000..b6e5ddf7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go new file mode 100644 index 00000000..b2ce743b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go @@ -0,0 +1,186 @@ +package runtime + +import ( + "context" + "io" + "net/http" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return http.StatusRequestTimeout + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + } + + grpclog.Infof("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError +} + +var ( + // HTTPError replies to the request with an error. + // + // HTTPError is called: + // - From generated per-endpoint gateway handler code, when calling the backend results in an error. + // - From gateway runtime code, when forwarding the response message results in an error. + // + // The default value for HTTPError calls the custom error handler configured on the ServeMux via the + // WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise. + // + // To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler + // serve option. + // + // To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve + // option, set GlobalHTTPErrorHandler to a custom function. + // + // Setting this variable directly to customize error format is deprecated. + HTTPError = MuxOrGlobalHTTPError + + // GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the + // WithProtoErrorHandler serve option. + // + // You can set a custom function to this variable to customize error format. + GlobalHTTPErrorHandler = DefaultHTTPError + + // OtherErrorHandler handles gateway errors from parsing and routing client requests for all + // ServeMux instances not using the WithProtoErrorHandler serve option. + // + // It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest + // + // To customize parsing and routing error handling of a particular ServeMux instance, use the + // WithProtoErrorHandler serve option. + // + // To customize parsing and routing error handling of all ServeMux instances not using the + // WithProtoErrorHandler serve option, set a custom function to this variable. + OtherErrorHandler = DefaultOtherErrorHandler +) + +// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler. +func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + if mux.protoErrorHandler != nil { + mux.protoErrorHandler(ctx, mux, marshaler, w, r, err) + } else { + GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err) + } +} + +// DefaultHTTPError is the default implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a JSON object, +// which contains a member whose key is "error" and whose value is err.Error(). +func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + const fallback = `{"error": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + w.Header().Del("Transfer-Encoding") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatibility + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { + pb := s.Proto() + contentType = typeMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + body := &internal.Error{ + Error: s.Message(), + Message: s.Message(), + Code: int32(s.Code()), + Details: s.Proto().GetDetails(), + } + + buf, merr := marshaler.Marshal(body) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", body, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + var wantsTrailers bool + + if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") { + wantsTrailers = true + handleForwardResponseTrailerHeader(w, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + if wantsTrailers { + handleForwardResponseTrailer(w, md) + } +} + +// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. +// It simply writes a string representation of the given error into "w". +func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { + http.Error(w, msg, code) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go new file mode 100644 index 00000000..aef645e4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go @@ -0,0 +1,89 @@ +package runtime + +import ( + "encoding/json" + "io" + "strings" + + descriptor2 "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/protoc-gen-go/descriptor" + "google.golang.org/genproto/protobuf/field_mask" +) + +func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) { + // TODO - should really gate this with a test that the marshaller has used json names + if md != nil { + for _, f := range md.Field { + if f.JsonName != nil && f.Name != nil && *f.JsonName == name { + var subType *descriptor.DescriptorProto + + // If the field has a TypeName then we retrieve the nested type for translating the embedded message names. + if f.TypeName != nil { + typeSplit := strings.Split(*f.TypeName, ".") + typeName := typeSplit[len(typeSplit)-1] + for _, t := range md.NestedType { + if typeName == *t.Name { + subType = t + } + } + } + return *f.Name, subType + } + } + } + return name, nil +} + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + if err := json.NewDecoder(r).Decode(&root); err != nil { + if err == io.EOF { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root, md: md}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + if m, ok := item.node.(map[string]interface{}); ok { + // if the item is an object, then enqueue all of its children + for k, v := range m { + protoName, subMd := translateName(k, item.md) + if subMsg, ok := v.(descriptor2.Message); ok { + _, subMd = descriptor2.ForMessage(subMsg) + } + + var path string + if item.path == "" { + path = protoName + } else { + path = item.path + "." + protoName + } + queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd}) + } + } else if len(item.path) > 0 { + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, item.path) + } + } + + return fm, nil +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node connected by dots + path string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} + + // descriptor for parent message + md *descriptor.DescriptorProto +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go new file mode 100644 index 00000000..e6e8f286 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -0,0 +1,212 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/grpclog" +) + +var errEmptyResponse = errors.New("empty response") + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + f, ok := w.(http.Flusher) + if !ok { + grpclog.Infof("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + w.Header().Set("Content-Type", marshaler.ContentType()) + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if err == io.EOF { + return + } + if err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + + var buf []byte + switch { + case resp == nil: + buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse))) + default: + result := map[string]interface{}{"result": resp} + if rb, ok := resp.(responseBody); ok { + result["result"] = rb.XXX_ResponseBody() + } + + buf, err = marshaler.Marshal(result) + } + + if err != nil { + grpclog.Infof("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err = w.Write(delimiter); err != nil { + grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } + f.Flush() + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { + for k := range md.TrailerMD { + tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) + w.Header().Add("Trailer", tKey) + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { + for k, vs := range md.TrailerMD { + tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) + for _, v := range vs { + w.Header().Add(tKey, v) + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatibility + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { + contentType = typeMarshaler.ContentTypeFromMessage(resp) + } + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + var err error + if rb, ok := resp.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(resp) + } + if err != nil { + grpclog.Infof("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + grpclog.Infof("Error handling ForwardResponseOptions: %v", err) + return err + } + } + return nil +} + +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { + serr := streamError(ctx, mux.streamErrorHandler, err) + if !wroteHeader { + w.WriteHeader(int(serr.HttpCode)) + } + buf, merr := marshaler.Marshal(errorChunk(serr)) + if merr != nil { + grpclog.Infof("Failed to marshal an error: %v", merr) + return + } + if _, werr := w.Write(buf); werr != nil { + grpclog.Infof("Failed to notify error to client: %v", werr) + return + } +} + +// streamError returns the payload for the final message in a response stream +// that represents the given err. +func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError { + serr := errHandler(ctx, err) + if serr != nil { + return serr + } + // TODO: log about misbehaving stream error handler? + return DefaultHTTPStreamErrorHandler(ctx, err) +} + +func errorChunk(err *StreamError) map[string]proto.Message { + return map[string]proto.Message{"error": (*internal.StreamError)(err)} +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go new file mode 100644 index 00000000..525b0338 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go @@ -0,0 +1,43 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler +func SetHTTPBodyMarshaler(serveMux *ServeMux) { + serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{OrigName: true}, + } +} + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType implementation to keep backwards compatibility with marshal interface +func (h *HTTPBodyMarshaler) ContentType() string { + return h.ContentTypeFromMessage(nil) +} + +// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns +// its specified content type otherwise fall back to the default Marshaler. +func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType() +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.Data, nil + } + return h.Marshaler.Marshal(v) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go new file mode 100644 index 00000000..f9d3a585 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go @@ -0,0 +1,45 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go new file mode 100644 index 00000000..f0de351b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "github.com/golang/protobuf/jsonpb". +// It supports fully functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb jsonpb.Marshaler + +// ContentType always returns "application/json". +func (*JSONPb) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + if _, ok := v.(proto.Message); !ok { + return j.marshalNonProtoField(v) + } + + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + _, err = w.Write(buf) + return err + } + return (*jsonpb.Marshaler)(j).Marshal(w, p) +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshals arbitrary data structure into JSON, +// but it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitDefaults { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + if j.Indent != "" { + return json.MarshalIndent(m, "", j.Indent) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{Decoder: d} +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, v) +} + +func decodeJSONPb(d *json.Decoder, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, v) + } + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, p) +} + +func decodeNonProtoField(d *json.Decoder, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +// allowUnknownFields helps not to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +var allowUnknownFields = true + +// DisallowUnknownFields enables option in decoder (unmarshaller) to +// return an error when it finds an unknown field. This function must be +// called before using the JSON marshaller. +func DisallowUnknownFields() { + allowUnknownFields = false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go new file mode 100644 index 00000000..f65d1a26 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go @@ -0,0 +1,62 @@ +package runtime + +import ( + "io" + + "errors" + "github.com/golang/protobuf/proto" + "io/ioutil" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType() string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + _, err = writer.Write(buffer) + if err != nil { + return err + } + + return nil + }) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go new file mode 100644 index 00000000..46153294 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go @@ -0,0 +1,55 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + ContentType() string +} + +// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called +// to set the Content-Type header on the response +type contentTypeMarshaler interface { + // ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message + ContentTypeFromMessage(v interface{}) string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record separator for the stream. + Delimiter() []byte +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go new file mode 100644 index 00000000..8dd5c24d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go @@ -0,0 +1,99 @@ +package runtime + +import ( + "errors" + "mime" + "net/http" + + "google.golang.org/grpc/grpclog" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &JSONPb{OrigName: true} +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + contentType, _, err := mime.ParseMediaType(contentTypeVal) + if err != nil { + grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err) + continue + } + if m, ok := mux.marshalers.mimeMap[contentType]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go new file mode 100644 index 00000000..523a9cb4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -0,0 +1,300 @@ +package runtime + +import ( + "context" + "fmt" + "net/http" + "net/textproto" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when +// a request is received with a URI path that does not match any registered +// service method. +// +// Since gRPC servers return an "Unimplemented" code for requests with an +// unrecognized URI path, this error also has a gRPC "Unimplemented" code. +var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + streamErrorHandler StreamErrorHandlerFunc + protoErrorHandler ProtoErrorHandlerFunc + disablePathLengthFallback bool + lastMatchWins bool +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. +// Configuring this will mean the generated swagger output is no longer correct, and it should be +// done with careful consideration. +func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { + return func(serveMux *ServeMux) { + currentQueryParser = queryParameterParser + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler. +// +// This can be used to handle an error as general proto message defined by gRPC. +// When this option is used, the mux uses the configured error handler instead of HTTPError and +// OtherErrorHandler. +func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.protoErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ProtoErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithLastMatchWins returns a ServeMuxOption that will enable "last +// match wins" behavior, where if multiple path patterns match a +// request path, the last one defined in the .proto file will be used. +func WithLastMatchWins() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.lastMatchWins = true + } +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + marshalers: makeMarshalerMIMERegistry(), + streamErrorHandler: DefaultHTTPStreamErrorHandler, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if s.lastMatchWins { + s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...) + } else { + s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) + } +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + } + return + } + + components := strings.Split(path[1:], "/") + l := len(components) + var verb string + if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } + return + } else if idx > 0 { + c := components[l-1] + components[l-1], verb = c[:idx], c[idx+1:] + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + r.Method = strings.ToUpper(override) + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + } + for _, h := range s.handlers[r.Method] { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + h.h(w, r, pathParams) + return + } + + // lookup other methods to handle fallback from GET to POST and + // to determine if it is MethodNotAllowed or NotFound. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + if s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + h.h(w, r, pathParams) + return + } + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } + return + } + } + + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go new file mode 100644 index 00000000..09053695 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "errors" + "fmt" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string + // assumeColonVerb indicates whether a path suffix after a final + // colon may only be interpreted as a verb. + assumeColonVerb bool +} + +type patternOptions struct { + assumeColonVerb bool +} + +// PatternOpt is an option for creating Patterns. +type PatternOpt func(*patternOptions) + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) { + options := patternOptions{ + assumeColonVerb: true, + } + for _, o := range opts { + o(&options) + } + + if version != 1 { + grpclog.Infof("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Infof("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Infof("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Infof("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Print("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Infof("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Infof("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + assumeColonVerb: options.assumeColonVerb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// Match examines components if it matches to the Pattern. +// If it matches, the function returns a mapping from field paths to their captured values. +// If otherwise, the function returns an error. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + if p.verb != verb { + if p.assumeColonVerb || p.verb != "" { + return nil, ErrNotMatch + } + if len(components) == 0 { + components = []string{":" + verb} + } else { + components = append([]string{}, components...) + components[len(components)-1] += ":" + verb + } + verb = "" + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + stack = append(stack, strings.Join(components[pos:end], "/")) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} + +// AssumeColonVerbOpt indicates whether a path suffix after a final +// colon may only be interpreted as a verb. +func AssumeColonVerbOpt(val bool) PatternOpt { + return PatternOpt(func(o *patternOptions) { + o.assumeColonVerb = val + }) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go new file mode 100644 index 00000000..a3151e2a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go @@ -0,0 +1,80 @@ +package runtime + +import ( + "github.com/golang/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to a int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to a int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go new file mode 100644 index 00000000..3fd30da2 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go @@ -0,0 +1,106 @@ +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a +// a proto struct used to represent error at the end of a stream. +type StreamErrorHandlerFunc func(context.Context, error) *StreamError + +// StreamError is the payload for the final message in a server stream in the event that the server returns an +// error after a response message has already been sent. +type StreamError internal.StreamError + +// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. +type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler + +// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a Status message marshaled by a Marshaler. +// +// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. +func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatibility + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { + pb := s.Proto() + contentType = typeMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via +// default logic. +// +// It extracts the gRPC status from err if possible. The fields of the status are +// used to populate the returned StreamError, and the HTTP status code is derived +// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a +// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code. +func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError { + grpcCode := codes.Unknown + grpcMessage := err.Error() + var grpcDetails []*any.Any + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + grpcMessage = s.Message() + grpcDetails = s.Proto().GetDetails() + } + httpCode := HTTPStatusFromCode(grpcCode) + return &StreamError{ + GrpcCode: int32(grpcCode), + HttpCode: int32(httpCode), + Message: grpcMessage, + HttpStatus: http.StatusText(httpCode), + Details: grpcDetails, + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go new file mode 100644 index 00000000..ba66842c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go @@ -0,0 +1,406 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$") + +var currentQueryParser QueryParameterParser = &defaultQueryParser{} + +// QueryParameterParser defines interface for all query parameter parsers +type QueryParameterParser interface { + Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error +} + +// PopulateQueryParameters parses query parameters +// into "msg" using current query parser +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + return currentQueryParser.Parse(msg, values, filter) +} + +type defaultQueryParser struct{} + +// Parse populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + match := valuesKeyRegexp.FindStringSubmatch(key) + if len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + fieldPath := strings.Split(key, ".") + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +// It instantiates missing protobuf fields as it goes. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg, fieldPath, []string{value}) +} + +func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { + m := reflect.ValueOf(msg) + if m.Kind() != reflect.Ptr { + return fmt.Errorf("unexpected type %T: %v", msg, msg) + } + var props *proto.Properties + m = m.Elem() + for i, fieldName := range fieldPath { + isLast := i == len(fieldPath)-1 + if !isLast && m.Kind() != reflect.Struct { + return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) + } + var f reflect.Value + var err error + f, props, err = fieldByProtoName(m, fieldName) + if err != nil { + return err + } else if !f.IsValid() { + grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) + return nil + } + + switch f.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + m = f + case reflect.Slice: + if !isLast { + return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) + } + // Handle []byte + if f.Type().Elem().Kind() == reflect.Uint8 { + m = f + break + } + return populateRepeatedField(f, values, props) + case reflect.Ptr: + if f.IsNil() { + m = reflect.New(f.Type().Elem()) + f.Set(m.Convert(f.Type())) + } + m = f.Elem() + continue + case reflect.Struct: + m = f + continue + case reflect.Map: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + return populateMapField(f, values, props) + default: + return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) + } + } + switch len(values) { + case 0: + return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) + case 1: + default: + grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) + } + return populateField(m, values[0], props) +} + +// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". +// "m" must be a struct value. It returns zero reflect.Value if no such field found. +func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { + props := proto.GetProperties(m.Type()) + + // look up field name in oneof map + for _, op := range props.OneofTypes { + if name == op.Prop.OrigName || name == op.Prop.JSONName { + v := reflect.New(op.Type.Elem()) + field := m.Field(op.Field) + if !field.IsNil() { + return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) + } + field.Set(v) + return v.Elem().Field(0), op.Prop, nil + } + } + + for _, p := range props.Prop { + if p.OrigName == name { + return m.FieldByName(p.Name), p, nil + } + if p.JSONName == name { + return m.FieldByName(p.Name), p, nil + } + } + return reflect.Value{}, nil, nil +} + +func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) + } + + key, value := values[0], values[1] + keyType := f.Type().Key() + valueType := f.Type().Elem() + if f.IsNil() { + f.Set(reflect.MakeMap(f.Type())) + } + + keyConv, ok := convFromType[keyType.Kind()] + if !ok { + return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) + } + valueConv, ok := convFromType[valueType.Kind()] + if !ok { + return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) + } + + keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) + if err := keyV[1].Interface(); err != nil { + return err.(error) + } + valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := valueV[1].Interface(); err != nil { + return err.(error) + } + + f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) + + return nil +} + +func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { + elemType := f.Type().Elem() + + // is the destination field a slice of an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnumRepeated(f, values, enumValMap) + } + + conv, ok := convFromType[elemType.Kind()] + if !ok { + return fmt.Errorf("unsupported field type %s", elemType) + } + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Index(i).Set(result[0].Convert(f.Index(i).Type())) + } + return nil +} + +func populateField(f reflect.Value, value string, props *proto.Properties) error { + i := f.Addr().Interface() + + // Handle protobuf well known types + var name string + switch m := i.(type) { + case interface{ XXX_WellKnownType() string }: + name = m.XXX_WellKnownType() + case proto.Message: + const wktPrefix = "google.protobuf." + if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) { + name = fullName[len(wktPrefix):] + } + } + switch name { + case "Timestamp": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + f.FieldByName("Seconds").SetInt(int64(t.Unix())) + f.FieldByName("Nanos").SetInt(int64(t.Nanosecond())) + return nil + case "Duration": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + f.FieldByName("Seconds").SetInt(s) + f.FieldByName("Nanos").SetInt(ns) + return nil + case "DoubleValue": + fallthrough + case "FloatValue": + float64Val, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetFloat(float64Val) + return nil + case "Int64Value": + fallthrough + case "Int32Value": + int64Val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetInt(int64Val) + return nil + case "UInt64Value": + fallthrough + case "UInt32Value": + uint64Val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetUint(uint64Val) + return nil + case "BoolValue": + if value == "true" { + f.FieldByName("Value").SetBool(true) + } else if value == "false" { + f.FieldByName("Value").SetBool(false) + } else { + return fmt.Errorf("bad BoolValue: %s", value) + } + return nil + case "StringValue": + f.FieldByName("Value").SetString(value) + return nil + case "BytesValue": + bytesVal, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return fmt.Errorf("bad BytesValue: %s", value) + } + f.FieldByName("Value").SetBytes(bytesVal) + return nil + case "FieldMask": + p := f.FieldByName("Paths") + for _, v := range strings.Split(value, ",") { + if v != "" { + p.Set(reflect.Append(p, reflect.ValueOf(v))) + } + } + return nil + } + + // Handle Time and Duration stdlib types + switch t := i.(type) { + case *time.Time: + pt, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + *t = pt + return nil + case *time.Duration: + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + *t = d + return nil + } + + // is the destination field an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnum(f, value, enumValMap) + } + + conv, ok := convFromType[f.Kind()] + if !ok { + return fmt.Errorf("field type %T is not supported in query parameters", i) + } + result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Set(result[0].Convert(f.Type())) + return nil +} + +func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { + // see if it's an enumeration string + if enumVal, ok := enumValMap[value]; ok { + return reflect.ValueOf(enumVal).Convert(t), nil + } + + // check for an integer that matches an enumeration value + eVal, err := strconv.Atoi(value) + if err != nil { + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) + } + for _, v := range enumValMap { + if v == int32(eVal) { + return reflect.ValueOf(eVal).Convert(t), nil + } + } + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) +} + +func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { + cval, err := convertEnum(value, f.Type(), enumValMap) + if err != nil { + return err + } + f.Set(cval) + return nil +} + +func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { + elemType := f.Type().Elem() + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result, err := convertEnum(v, elemType, enumValMap) + if err != nil { + return err + } + f.Index(i).Set(result) + } + return nil +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel new file mode 100644 index 00000000..7109d793 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", +) + +go_test( + name = "go_default_test", + size = "small", + srcs = ["trie_test.go"], + embed = [":go_default_library"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go new file mode 100644 index 00000000..cf79a4d5 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go new file mode 100644 index 00000000..dfe7de48 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go @@ -0,0 +1,22 @@ +package utilities + +// An OpCode is a opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go new file mode 100644 index 00000000..6dd38546 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go @@ -0,0 +1,20 @@ +package utilities + +import ( + "bytes" + "io" + "io/ioutil" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go new file mode 100644 index 00000000..c2b7b30d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go @@ -0,0 +1,177 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + var encoded []int + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + if k < len(sj) { + return true + } + return false +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md new file mode 100644 index 00000000..0a1ff9f9 --- /dev/null +++ b/vendor/github.com/imdario/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 7e6f7aee..4f028749 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,6 +1,5 @@ # Mergo - [![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] @@ -9,6 +8,7 @@ [![Sourcegraph][11]][12] [![FOSSA Status][13]][14] [![Become my sponsor][15]][16] +[![Tidelift][17]][18] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -26,6 +26,8 @@ [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield [15]: https://img.shields.io/github/sponsors/imdario [16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -55,7 +57,6 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont ### Mergo in the wild -- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md new file mode 100644 index 00000000..a5de61f7 --- /dev/null +++ b/vendor/github.com/imdario/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index a13a7ee4..b50d5c2a 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } zeroValue := reflect.Value{} switch dst.Kind() { @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } @@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8b4e2f47..0ef9b213 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool { } type Config struct { + Transformers Transformers Overwrite bool + ShouldNotDereference bool AppendSlice bool TypeCheck bool - Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool sliceDeepCopy bool @@ -76,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { @@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { dst.Set(src) } } @@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if src.Kind() != reflect.Map { - if overwrite { + if overwrite && dst.CanSet() { dst.Set(src) } return @@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } @@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } } - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } case reflect.Slice: if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if src.Kind() != reflect.Interface { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } } } else if dst.Elem().Type() == src.Type() { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { @@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } break @@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co break } default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) if mustSet { if dst.CanSet() { dst.Set(src) @@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + // WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true @@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) { func merge(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 9fe362d4..0a721e2d 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,7 +20,7 @@ var ( ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") + ErrNonPointerArgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -28,13 +28,13 @@ var ( // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { - ptr uintptr typ reflect.Type next *visit + ptr uintptr } // From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 @@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool { if v.IsNil() { return true } - return isEmptyValue(v.Elem()) + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false case reflect.Func: return v.IsNil() case reflect.Invalid: diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 4d4b4aad..7e19eba0 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -7,10 +7,13 @@ import ( "time" ) -type CompareType int +// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it. +type CompareType = compareResult + +type compareResult int const ( - compareLess CompareType = iota - 1 + compareLess compareResult = iota - 1 compareEqual compareGreater ) @@ -39,7 +42,7 @@ var ( bytesType = reflect.TypeOf([]byte{}) ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) { obj1Value := reflect.ValueOf(obj1) obj2Value := reflect.ValueOf(obj2) @@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) } - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + if timeObj1.Before(timeObj2) { + return compareLess, true + } + if timeObj1.Equal(timeObj2) { + return compareEqual, true + } + return compareGreater, true } case reflect.Slice: { @@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) } - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true } case reflect.Uintptr: { @@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) } -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare return true } -func containsValue(values []CompareType, value CompareType) bool { +func containsValue(values []compareResult, value compareResult) bool { for _, v := range values { if v == value { return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 3ddab109..19063416 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index a84e09bd..21629087 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { @@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin return NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface return NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a0..1d2f7182 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -6,7 +6,7 @@ import ( ) // isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { objKind := reflect.TypeOf(object).Kind() if objKind != reflect.Slice && objKind != reflect.Array { return false @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0b7570f2..4e91332b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" + + // Wrapper around gopkg.in/yaml.v3 + "github.com/stretchr/testify/assert/yaml" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool +// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful +// for table driven tests. +type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool + // Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + return Fail(t, "Both arguments must be pointers", msgAndArgs...) + } + + if !same { + // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ "expected: %p %#v\n"+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) @@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} h.Helper() } - if samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + //fails when the arguments are not pointers + return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) + } + + if same { return Fail(t, fmt.Sprintf( "Expected and actual point to the same object: %p %#v", expected, expected), msgAndArgs...) @@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} return true } -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { +// samePointers checks if two generic interface objects are pointers of the same +// type pointing to the same object. It returns two values: same indicating if +// they are the same type and point to the same object, and ok indicating that +// both inputs are pointers. +func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + return false, false //not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) if firstType != secondType { - return false + return false, true // both are pointers, but of different types } // compare pointer addresses - return first == second + return first == second, true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - expected = copyExportedFields(expected) actual = copyExportedFields(actual) @@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri return msg.String() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + if !isList(t, listA, msgAndArgs...) { + return Fail(t, "listA is not a list type", msgAndArgs...) + } + if !isList(t, listB, msgAndArgs...) { + return Fail(t, "listB is not a list type", msgAndArgs...) + } + + extraA, extraB := diffLists(listA, listB) + if len(extraA) == 0 && len(extraB) == 0 { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if err != nil { return Fail(t, err.Error(), msgAndArgs...) } + if math.IsNaN(actualEpsilon) { + return Fail(t, "relative error is NaN", msgAndArgs...) + } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) @@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { - var r *regexp.Regexp if rr, ok := rx.(*regexp.Regexp); ok { r = rr @@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool { r = regexp.MustCompile(fmt.Sprint(rx)) } - return (r.FindStringIndex(fmt.Sprint(str)) != nil) + switch v := str.(type) { + case []byte: + return r.Match(v) + case string: + return r.MatchString(v) + default: + return r.MatchString(fmt.Sprint(v)) + } } @@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{ MaxDepth: 10, } -type tHelper interface { +type tHelper = interface { Helper() } @@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // CollectT implements the TestingT interface and collects all errors. type CollectT struct { + // A slice of errors. Non-nil slice denotes a failure. + // If it's non-nil but len(c.errors) == 0, this is also a failure + // obtained by direct c.FailNow() call. errors []error } @@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) } -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") +// FailNow stops execution by calling runtime.Goexit. +func (c *CollectT) FailNow() { + c.fail() + runtime.Goexit() } // Deprecated: That was a method for internal usage that should not have been published. Now just panics. @@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) { panic("Copy() is deprecated") } +func (c *CollectT) fail() { + if !c.failed() { + c.errors = []error{} // Make it non-nil to mark a failure. + } +} + +func (c *CollectT) failed() bool { + return c.errors != nil +} + // EventuallyWithT asserts that given condition will be met in waitFor time, // periodically checking target function each tick. In contrast to Eventually, // it supplies a CollectT to the condition function, so that the condition @@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) { // assert.EventuallyWithT(t, func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } var lastFinishedTickErrs []error - ch := make(chan []error, 1) + ch := make(chan *CollectT, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time go func() { collect := new(CollectT) defer func() { - ch <- collect.errors + ch <- collect }() condition(collect) }() - case errs := <-ch: - if len(errs) == 0 { + case collect := <-ch: + if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs + lastFinishedTickErrs = collect.errors tick = ticker.C } } @@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { ), msgAndArgs...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ ), msgAndArgs...) } +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + func buildErrorChainString(err error) string { if err == nil { return "" diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go new file mode 100644 index 00000000..baa0cc7d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -0,0 +1,25 @@ +//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default +// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that calls a pluggable implementation. +// +// This implementation is selected with the testify_yaml_custom build tag. +// +// go test -tags testify_yaml_custom +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]. +// +// In your test package: +// +// import assertYaml "github.com/stretchr/testify/assert/yaml" +// +// func init() { +// assertYaml.Unmarshal = func (in []byte, out interface{}) error { +// // ... +// return nil +// } +// } +package yaml + +var Unmarshal func(in []byte, out interface{}) error diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go new file mode 100644 index 00000000..b83c6cf6 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -0,0 +1,37 @@ +//go:build !testify_yaml_fail && !testify_yaml_custom +// +build !testify_yaml_fail,!testify_yaml_custom + +// Package yaml is just an indirection to handle YAML deserialization. +// +// This package is just an indirection that allows the builder to override the +// indirection with an alternative implementation of this package that uses +// another implementation of YAML deserialization. This allows to not either not +// use YAML deserialization at all, or to use another implementation than +// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]). +// +// Alternative implementations are selected using build tags: +// +// - testify_yaml_fail: [Unmarshal] always fails with an error +// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it +// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or +// [github.com/stretchr/testify/assert.YAMLEqf]. +// +// Usage: +// +// go test -tags testify_yaml_fail +// +// You can check with "go list" which implementation is linked: +// +// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// +// [PR #1120]: https://github.com/stretchr/testify/pull/1120 +package yaml + +import goyaml "gopkg.in/yaml.v3" + +// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal]. +func Unmarshal(in []byte, out interface{}) error { + return goyaml.Unmarshal(in, out) +} diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go new file mode 100644 index 00000000..e78f7dfe --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -0,0 +1,18 @@ +//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default +// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that always fail. +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]: +// +// go test -tags testify_yaml_fail +package yaml + +import "errors" + +var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)") + +func Unmarshal([]byte, interface{}) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 74f052aa..eacdd7fd 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) { if tf.refreshToken != tk.RefreshToken { tf.refreshToken = tk.RefreshToken } - return tk, err + return tk, nil } // reuseTokenSource is a TokenSource that holds a single token in memory @@ -356,11 +356,15 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { return internal.ContextClient(ctx) } + cc := internal.ContextClient(ctx) return &http.Client{ Transport: &Transport{ - Base: internal.ContextClient(ctx).Transport, + Base: cc.Transport, Source: ReuseTokenSource(nil, src), }, + CheckRedirect: cc.CheckRedirect, + Jar: cc.Jar, + Timeout: cc.Timeout, } } diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index 50593b6d..6a95da97 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -21,7 +21,7 @@ const ( // // A fresh verifier should be generated for each authorization. // S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange // (or Config.DeviceAccessToken). func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee6..a4ea5d14 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b..00000000 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce3343..00000000 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 00000000..37a82528 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 00000000..1200487f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af0..abc39554 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 6ebc48b3..4f432bfe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1245,6 +1245,7 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1330,8 +1331,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1551,6 +1554,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1623,6 +1627,8 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1867,6 +1873,7 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1967,6 +1974,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2083,6 +2091,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2163,6 +2172,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2491,6 +2501,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2499,6 +2510,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2525,6 +2537,8 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2592,6 +2606,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2602,6 +2617,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2911,7 +2929,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2920,6 +2937,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index c0d45e32..75207613 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -304,6 +306,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c731d24f..c68acda5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,6 +307,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 680018a4..a8c607ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -310,6 +312,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index a63909f3..18563dd8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,6 +109,7 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 @@ -119,6 +120,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -302,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9b0a2573..22912cda 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -297,6 +299,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 958e6e06..29344eb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 50c7f25b..20d51fb9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ced21d66..321b6090 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 226c0441..9bacdf1e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 3122737c..c2242726 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -358,6 +360,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eb5d3467..6270c8ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e921ebc6..9966c194 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 38ba81c5..848e5fcc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -294,6 +296,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 71f04009..669b2adb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -366,6 +368,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c44a3133..4834e575 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -357,6 +359,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87fe..c6545413 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 524b0820..c79aaff3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,4 +458,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f485dbf4..5eb45069 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,4 +381,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 70b35bf3..05e50297 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,4 +422,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1893e2fe..38c53ec5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,4 +325,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 16a4017d..31d2e71a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,4 +321,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 7e567f1e..f4184a33 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 38ae55e5..05b99622 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 55e92e60..43a256e9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 60658d6a..eea5ddfc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index e203e8a7..0d777bfb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,4 +449,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5944b97d..b4463650 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index c66d416d..0c7d21c1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a5459e76..84053916 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,4 +326,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 01d86825..fcf1b790 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,4 +387,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 7b703e77..52d15b5f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,4 +400,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 5537148d..a46abe64 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4747,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14c + NL80211_ATTR_MAX = 0x14d NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5519,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6174,3 +6174,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go index 4d57222e..053336e2 100644 --- a/vendor/golang.org/x/text/language/parse.go +++ b/vendor/golang.org/x/text/language/parse.go @@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) { if changed { tt.RemakeString() } - return makeTag(tt), err + return makeTag(tt), nil } // Compose creates a Tag from individual parts, which may be of type Tag, Base, diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go new file mode 100644 index 00000000..e7d3805e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -0,0 +1,235 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/httpbody.proto + +package httpbody + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Message that represents an arbitrary HTTP body. It should only be used for +// payload formats that can't be represented as JSON, such as raw binary or +// an HTML page. +// +// This message can be used both in streaming and non-streaming API methods in +// the request as well as the response. +// +// It can be used as a top-level request field, which is convenient if one +// wants to extract parameters from either the URL or HTTP template into the +// request fields and also want access to the raw HTTP body. +// +// Example: +// +// message GetResourceRequest { +// // A unique request id. +// string request_id = 1; +// +// // The raw HTTP body is bound to this field. +// google.api.HttpBody http_body = 2; +// +// } +// +// service ResourceService { +// rpc GetResource(GetResourceRequest) +// returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) +// returns (google.protobuf.Empty); +// +// } +// +// Example with streaming methods: +// +// service CaldavService { +// rpc GetCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// rpc UpdateCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// +// } +// +// Use of this type only changes how the request and response bodies are +// handled, all other features will continue to work unchanged. +type HttpBody struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP Content-Type header value specifying the content type of the body. + ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // The HTTP request/response body as raw binary. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Application specific response metadata. Must be set in the first response + // for streaming APIs. + Extensions []*anypb.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *HttpBody) Reset() { + *x = HttpBody{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_httpbody_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpBody) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpBody) ProtoMessage() {} + +func (x *HttpBody) ProtoReflect() protoreflect.Message { + mi := &file_google_api_httpbody_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpBody.ProtoReflect.Descriptor instead. +func (*HttpBody) Descriptor() ([]byte, []int) { + return file_google_api_httpbody_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpBody) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *HttpBody) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *HttpBody) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +var File_google_api_httpbody_proto protoreflect.FileDescriptor + +var file_google_api_httpbody_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x77, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, + 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_httpbody_proto_rawDescOnce sync.Once + file_google_api_httpbody_proto_rawDescData = file_google_api_httpbody_proto_rawDesc +) + +func file_google_api_httpbody_proto_rawDescGZIP() []byte { + file_google_api_httpbody_proto_rawDescOnce.Do(func() { + file_google_api_httpbody_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_httpbody_proto_rawDescData) + }) + return file_google_api_httpbody_proto_rawDescData +} + +var file_google_api_httpbody_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_httpbody_proto_goTypes = []interface{}{ + (*HttpBody)(nil), // 0: google.api.HttpBody + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_api_httpbody_proto_depIdxs = []int32{ + 1, // 0: google.api.HttpBody.extensions:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_httpbody_proto_init() } +func file_google_api_httpbody_proto_init() { + if File_google_api_httpbody_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_httpbody_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpBody); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_httpbody_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_httpbody_proto_goTypes, + DependencyIndexes: file_google_api_httpbody_proto_depIdxs, + MessageInfos: file_google_api_httpbody_proto_msgTypes, + }.Build() + File_google_api_httpbody_proto = out.File + file_google_api_httpbody_proto_rawDesc = nil + file_google_api_httpbody_proto_goTypes = nil + file_google_api_httpbody_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 00000000..6ad1b1c1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,203 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/rpc/status.proto + +package status + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go new file mode 100644 index 00000000..d10ad665 --- /dev/null +++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go @@ -0,0 +1,23 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package field_mask aliases all exported identifiers in +// package "google.golang.org/protobuf/types/known/fieldmaskpb". +package field_mask + +import "google.golang.org/protobuf/types/known/fieldmaskpb" + +type FieldMask = fieldmaskpb.FieldMask + +var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 00000000..e491a9e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 00000000..53019774 --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 00000000..934fac2b --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,111 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 00000000..0b42c302 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,250 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + // + // This error code will not be generated by the gRPC framework. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + // + // This error code will not be generated by the gRPC framework. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + // + // This error code will not be generated by the gRPC framework. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %d", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 00000000..4a899264 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,94 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +package connectivity + +import ( + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + logger.Errorf("unknown connectivity state: %d", s) + return "INVALID_STATE" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 00000000..ac73c9ce --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...any) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...any) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...any) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...any) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...any) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...any) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...any) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...any) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...any) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...any) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...any) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...any) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 00000000..16928c9c --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import ( + "os" + + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return grpclog.Logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...any) { + grpclog.Logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...any) { + grpclog.Logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...any) { + grpclog.Logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...any) { + grpclog.Logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...any) { + grpclog.Logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...any) { + grpclog.Logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...any) { + grpclog.Logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...any) { + grpclog.Logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...any) { + grpclog.Logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...any) { + grpclog.Logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...any) { + grpclog.Logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...any) { + grpclog.Logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...any) { + grpclog.Logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...any) { + grpclog.Logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...any) { + grpclog.Logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 00000000..b1674d82 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import "google.golang.org/grpc/internal/grpclog" + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + grpclog.Logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 00000000..ecfd36d7 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,258 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" + "strconv" + "strings" + + "google.golang.org/grpc/internal/grpclog" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...any) +} diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 00000000..7f7044e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 00000000..bfc45102 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +import ( + "os" +) + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatalln(args...) + } + os.Exit(1) +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...any) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 00000000..faa998de --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...any) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...any) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + if !Logger.V(2) { + return + } + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 00000000..5d665398 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,239 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc any // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig any // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfig. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder any // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString any // func (codes.Code) string + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions any // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions any // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions any // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) + + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. + UserSetDefaultScheme bool = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) +) + +// HealthChecker defines the signature of the client-side LB channel health +// checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 00000000..c7dbc820 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,205 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/types/known/anypb" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...any) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{s: s} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := anypb.New(protoadapt.MessageV2Of(detail)) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []any { + if s == nil || s.s == nil { + return nil + } + details := make([]any, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail, err := any.UnmarshalNew() + if err != nil { + details = append(details, err) + continue + } + details = append(details, detail) + } + return details +} + +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + s *Status +} + +func (e *Error) Error() string { + return e.s.String() +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return e.s +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.s.s, tse.s.s) +} + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go new file mode 100644 index 00000000..4f347edd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -0,0 +1,29 @@ +//go:build !unix && !windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 00000000..078137b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 00000000..fd7d43a8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 00000000..1e9485fd --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,300 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := make(MD, len(m)) + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := make(MD, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out +} + +// Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + +// Join joins any number of mds into a single MD. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := make(MD, len(md)) + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + return out, true +} + +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { + return copyOf(v) + } + } + return nil +} + +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } + } + return out, ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 00000000..35e7a20a --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 00000000..a93360ef --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/status" +) + +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return status.New(c, msg) +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...any) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return status.FromProto(s) +} + +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. +// +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return grpcStatus, true + } + var gs grpcstatus + if errors.As(err, &gs) { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := grpcStatus.Proto() + p.Message = err.Error() + return status.FromProto(p), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + + return Convert(err).Code() +} + +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. +func FromContextError(err error) *Status { + if err == nil { + return nil + } + if errors.Is(err, context.DeadlineExceeded) { + return New(codes.DeadlineExceeded, err.Error()) + } + if errors.Is(err, context.Canceled) { + return New(codes.Canceled, err.Error()) + } + return New(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 00000000..cffdfda9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,685 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given [proto.Message]. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields and enum name values are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int +} + +// Unmarshal reads the given []byte and populates the given [proto.Message] +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd protoreflect.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := protoreflect.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + if flags.ProtoLegacyWeak { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd protoreflect.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd protoreflect.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value + var err error + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + if val.IsValid() { + m.Set(fd, val) + } + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return protoreflect.Value{}, err + } + + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + if tok.Kind() == json.Bool { + return protoreflect.ValueOfBool(tok.Bool()), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case protoreflect.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case protoreflect.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case protoreflect.StringKind: + if tok.Kind() == json.String { + return protoreflect.ValueOfString(tok.ParsedString()), nil + } + + case protoreflect.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case protoreflect.EnumKind: + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getInt(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfInt32(int32(n)), true + } + return protoreflect.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getUint(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfUint32(uint32(n)), true + } + return protoreflect.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.NaN())), true + } + return protoreflect.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getFloat(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(n)), true + } + return protoreflect.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { + if tok.Kind() != json.String { + return protoreflect.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return protoreflect.Value{}, false + } + return protoreflect.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true + } + if discardUnknown { + return protoreflect.Value{}, true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return protoreflect.ValueOfEnum(0), true + } + } + + return protoreflect.Value{}, false +} + +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + if val.IsValid() { + list.Append(val) + } + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return protoreflect.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + if pval.IsValid() { + mmap.Set(pkey, pval) + } + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil + + case protoreflect.BoolKind: + switch name { + case "true": + return protoreflect.ValueOfBool(true).MapKey(), nil + case "false": + return protoreflect.ValueOfBool(false).MapKey(), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 00000000..ae71007c --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://protobuf.dev/programming-guides/proto3#json. +// +// This package produces a different output than the standard [encoding/json] +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 00000000..0e72d853 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,380 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given [proto.Message] in JSON format using default options. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // ╔═══════╤════════════════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════════════════╣ + // ║ false │ non-optional scalar boolean fields ║ + // ║ 0 │ non-optional scalar numeric fields ║ + // ║ "" │ non-optional scalar string/byte fields ║ + // ║ [] │ empty repeated fields ║ + // ║ {} │ empty map fields ║ + // ╚═══════╧════════════════════════════════════════╝ + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given [proto.Message] in the JSON format using options in +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(b, o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return append(b, '{', '}'), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} + +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + if fd.HasPresence() { + if m.skipNull { + continue + } + v = protoreflect.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + + case protoreflect.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + e.WriteInt(val.Int()) + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + e.WriteUint(val.Uint()) + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case protoreflect.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case protoreflect.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case protoreflect.MessageKind, protoreflect.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 00000000..e9fe1039 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,880 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, protoreflect.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, protoreflect.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m protoreflect.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + var open int + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") + } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") + } + if open == 0 { + return nil + } + } +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type, + // for compatibility with other proto runtimes that have interpreted the spec differently. + if m.Descriptor().FullName() != genid.Empty_message_fullname { + return d.newError(tok.Pos(), `missing "value" field`) + } + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(protoreflect.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m protoreflect.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd protoreflect.FieldDescriptor + var val protoreflect.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = protoreflect.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = protoreflect.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = protoreflect.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !protoreflect.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(protoreflect.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 00000000..ea1d3e65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind&(Name|comma) != 0 || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-length of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 00000000..2999d713 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 00000000..f7fea7d8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 00000000..50578d65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 00000000..934f2dcb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,278 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = strconv.AppendInt(e.out, n, 10) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = strconv.AppendUint(e.out, n, 10) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go new file mode 100644 index 00000000..ea276d15 --- /dev/null +++ b/vendor/google.golang.org/protobuf/protoadapt/convert.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoadapt bridges the original and new proto APIs. +package protoadapt + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type. +type MessageV1 = protoiface.MessageV1 + +// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the +// current [google.golang.org/protobuf] module, adding support for reflection. +type MessageV2 = proto.Message + +// MessageV1Of converts a v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1Of(m MessageV2) MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2Of converts a v1 message to a v2 message. +// It returns nil if m is nil. +func MessageV2Of(m MessageV1) MessageV2 { + return protoimpl.X.ProtoMessageV2Of(m) +} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go new file mode 100644 index 00000000..34d76e6c --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -0,0 +1,357 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// # Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// # Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +package durationpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + time "time" +) + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +type Duration struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + +func (x *Duration) Reset() { + *x = Duration{} + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Duration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Duration) ProtoMessage() {} + +func (x *Duration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Duration.ProtoReflect.Descriptor instead. +func (*Duration) Descriptor() ([]byte, []int) { + return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} +} + +func (x *Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Duration) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_duration_proto protoreflect.FileDescriptor + +var file_google_protobuf_duration_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_duration_proto_rawDescOnce sync.Once + file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc +) + +func file_google_protobuf_duration_proto_rawDescGZIP() []byte { + file_google_protobuf_duration_proto_rawDescOnce.Do(func() { + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + }) + return file_google_protobuf_duration_proto_rawDescData +} + +var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_duration_proto_goTypes = []any{ + (*Duration)(nil), // 0: google.protobuf.Duration +} +var file_google_protobuf_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_duration_proto_init() } +func file_google_protobuf_duration_proto_init() { + if File_google_protobuf_duration_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_duration_proto_goTypes, + DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, + MessageInfos: file_google_protobuf_duration_proto_msgTypes, + }.Build() + File_google_protobuf_duration_proto = out.File + file_google_protobuf_duration_proto_rawDesc = nil + file_google_protobuf_duration_proto_goTypes = nil + file_google_protobuf_duration_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go new file mode 100644 index 00000000..e5d7da38 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -0,0 +1,571 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto. +// +// The FieldMask message represents a set of symbolic field paths. +// The paths are specific to some target message type, +// which is not stored within the FieldMask message itself. +// +// # Constructing a FieldMask +// +// The New function is used construct a FieldMask: +// +// var messageType *descriptorpb.DescriptorProto +// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") +// if err != nil { +// ... // handle error +// } +// ... // make use of fm +// +// The "field.name" and "field.number" paths are valid paths according to the +// google.protobuf.DescriptorProto message. Use of a path that does not correlate +// to valid fields reachable from DescriptorProto would result in an error. +// +// Once a FieldMask message has been constructed, +// the Append method can be used to insert additional paths to the path set: +// +// var messageType *descriptorpb.DescriptorProto +// if err := fm.Append(messageType, "options"); err != nil { +// ... // handle error +// } +// +// # Type checking a FieldMask +// +// In order to verify that a FieldMask represents a set of fields that are +// reachable from some target message type, use the IsValid method: +// +// var messageType *descriptorpb.DescriptorProto +// if fm.IsValid(messageType) { +// ... // make use of fm +// } +// +// IsValid needs to be passed the target message type as an input since the +// FieldMask message itself does not store the message type that the set of paths +// are for. +package fieldmaskpb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sort "sort" + strings "strings" + sync "sync" +) + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is unmappable. +type FieldMask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// New constructs a field mask from a list of paths and verifies that +// each one is valid according to the specified message type. +func New(m proto.Message, paths ...string) (*FieldMask, error) { + x := new(FieldMask) + return x, x.Append(m, paths...) +} + +// Union returns the union of all the paths in the input field masks. +func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var out []string + out = append(out, mx.GetPaths()...) + out = append(out, my.GetPaths()...) + for _, m := range ms { + out = append(out, m.GetPaths()...) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// Intersect returns the intersection of all the paths in the input field masks. +func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var ss1, ss2 []string // reused buffers for performance + intersect := func(out, in []string) []string { + ss1 = normalizePaths(append(ss1[:0], in...)) + ss2 = normalizePaths(append(ss2[:0], out...)) + out = out[:0] + for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); { + switch s1, s2 := ss1[i1], ss2[i2]; { + case hasPathPrefix(s1, s2): + out = append(out, s1) + i1++ + case hasPathPrefix(s2, s1): + out = append(out, s2) + i2++ + case lessPath(s1, s2): + i1++ + case lessPath(s2, s1): + i2++ + } + } + return out + } + + out := Union(mx, my, ms...).GetPaths() + out = intersect(out, mx.GetPaths()) + out = intersect(out, my.GetPaths()) + for _, m := range ms { + out = intersect(out, m.GetPaths()) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// IsValid reports whether all the paths are syntactically valid and +// refer to known fields in the specified message type. +// It reports false for a nil FieldMask. +func (x *FieldMask) IsValid(m proto.Message) bool { + paths := x.GetPaths() + return x != nil && numValidPaths(m, paths) == len(paths) +} + +// Append appends a list of paths to the mask and verifies that each one +// is valid according to the specified message type. +// An invalid path is not appended and breaks insertion of subsequent paths. +func (x *FieldMask) Append(m proto.Message, paths ...string) error { + numValid := numValidPaths(m, paths) + x.Paths = append(x.Paths, paths[:numValid]...) + paths = paths[numValid:] + if len(paths) > 0 { + name := m.ProtoReflect().Descriptor().FullName() + return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name) + } + return nil +} + +func numValidPaths(m proto.Message, paths []string) int { + md0 := m.ProtoReflect().Descriptor() + for i, path := range paths { + md := md0 + if !rangeFields(path, func(field string) bool { + // Search the field within the message. + if md == nil { + return false // not within a message + } + fd := md.Fields().ByName(protoreflect.Name(field)) + // The real field name of a group is the message name. + if fd == nil { + gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field { + fd = gd + } + } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field { + fd = nil + } + if fd == nil { + return false // message has does not have this field + } + + // Identify the next message to search within. + md = fd.Message() // may be nil + + // Repeated fields are only allowed at the last position. + if fd.IsList() || fd.IsMap() { + md = nil + } + + return true + }) { + return i + } + } + return len(paths) +} + +// Normalize converts the mask to its canonical form where all paths are sorted +// and redundant paths are removed. +func (x *FieldMask) Normalize() { + x.Paths = normalizePaths(x.Paths) +} + +func normalizePaths(paths []string) []string { + sort.Slice(paths, func(i, j int) bool { + return lessPath(paths[i], paths[j]) + }) + + // Elide any path that is a prefix match on the previous. + out := paths[:0] + for _, path := range paths { + if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) { + continue + } + out = append(out, path) + } + return out +} + +// hasPathPrefix is like strings.HasPrefix, but further checks for either +// an exact matche or that the prefix is delimited by a dot. +func hasPathPrefix(path, prefix string) bool { + return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.') +} + +// lessPath is a lexicographical comparison where dot is specially treated +// as the smallest symbol. +func lessPath(x, y string) bool { + for i := 0; i < len(x) && i < len(y); i++ { + if x[i] != y[i] { + return (x[i] - '.') < (y[i] - '.') + } + } + return len(x) < len(y) +} + +// rangeFields is like strings.Split(path, "."), but avoids allocations by +// iterating over each field in place and calling a iterator function. +func rangeFields(path string, f func(field string) bool) bool { + for { + var field string + if i := strings.IndexByte(path, '.'); i >= 0 { + field, path = path[:i], path[i:] + } else { + field, path = path, "" + } + + if !f(field) { + return false + } + + if len(path) == 0 { + return true + } + path = strings.TrimPrefix(path, ".") + } +} + +func (x *FieldMask) Reset() { + *x = FieldMask{} + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMask) ProtoMessage() {} + +func (x *FieldMask) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead. +func (*FieldMask) Descriptor() ([]byte, []int) { + return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldMask) GetPaths() []string { + if x != nil { + return x.Paths + } + return nil +} + +var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor + +var file_google_protobuf_field_mask_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, + 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_field_mask_proto_rawDescOnce sync.Once + file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc +) + +func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { + file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + }) + return file_google_protobuf_field_mask_proto_rawDescData +} + +var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_field_mask_proto_goTypes = []any{ + (*FieldMask)(nil), // 0: google.protobuf.FieldMask +} +var file_google_protobuf_field_mask_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_field_mask_proto_init() } +func file_google_protobuf_field_mask_proto_init() { + if File_google_protobuf_field_mask_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_field_mask_proto_goTypes, + DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs, + MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, + }.Build() + File_google_protobuf_field_mask_proto = out.File + file_google_protobuf_field_mask_proto_rawDesc = nil + file_google_protobuf_field_mask_proto_goTypes = nil + file_google_protobuf_field_mask_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go new file mode 100644 index 00000000..15b424ec --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -0,0 +1,623 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrapperspb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// Double stores v in a new DoubleValue and returns a pointer to it. +func Double(v float64) *DoubleValue { + return &DoubleValue{Value: v} +} + +func (x *DoubleValue) Reset() { + *x = DoubleValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DoubleValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleValue) ProtoMessage() {} + +func (x *DoubleValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead. +func (*DoubleValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0} +} + +func (x *DoubleValue) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// Float stores v in a new FloatValue and returns a pointer to it. +func Float(v float32) *FloatValue { + return &FloatValue{Value: v} +} + +func (x *FloatValue) Reset() { + *x = FloatValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FloatValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatValue) ProtoMessage() {} + +func (x *FloatValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead. +func (*FloatValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1} +} + +func (x *FloatValue) GetValue() float32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// Int64 stores v in a new Int64Value and returns a pointer to it. +func Int64(v int64) *Int64Value { + return &Int64Value{Value: v} +} + +func (x *Int64Value) Reset() { + *x = Int64Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Value) ProtoMessage() {} + +func (x *Int64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead. +func (*Int64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2} +} + +func (x *Int64Value) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// UInt64 stores v in a new UInt64Value and returns a pointer to it. +func UInt64(v uint64) *UInt64Value { + return &UInt64Value{Value: v} +} + +func (x *UInt64Value) Reset() { + *x = UInt64Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UInt64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt64Value) ProtoMessage() {} + +func (x *UInt64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead. +func (*UInt64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3} +} + +func (x *UInt64Value) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// Int32 stores v in a new Int32Value and returns a pointer to it. +func Int32(v int32) *Int32Value { + return &Int32Value{Value: v} +} + +func (x *Int32Value) Reset() { + *x = Int32Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Value) ProtoMessage() {} + +func (x *Int32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead. +func (*Int32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4} +} + +func (x *Int32Value) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// UInt32 stores v in a new UInt32Value and returns a pointer to it. +func UInt32(v uint32) *UInt32Value { + return &UInt32Value{Value: v} +} + +func (x *UInt32Value) Reset() { + *x = UInt32Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UInt32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt32Value) ProtoMessage() {} + +func (x *UInt32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead. +func (*UInt32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5} +} + +func (x *UInt32Value) GetValue() uint32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// Bool stores v in a new BoolValue and returns a pointer to it. +func Bool(v bool) *BoolValue { + return &BoolValue{Value: v} +} + +func (x *BoolValue) Reset() { + *x = BoolValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BoolValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolValue) ProtoMessage() {} + +func (x *BoolValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead. +func (*BoolValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6} +} + +func (x *BoolValue) GetValue() bool { + if x != nil { + return x.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// String stores v in a new StringValue and returns a pointer to it. +func String(v string) *StringValue { + return &StringValue{Value: v} +} + +func (x *StringValue) Reset() { + *x = StringValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringValue) ProtoMessage() {} + +func (x *StringValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringValue.ProtoReflect.Descriptor instead. +func (*StringValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7} +} + +func (x *StringValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// Bytes stores v in a new BytesValue and returns a pointer to it. +func Bytes(v []byte) *BytesValue { + return &BytesValue{Value: v} +} + +func (x *BytesValue) Reset() { + *x = BytesValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BytesValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BytesValue) ProtoMessage() {} + +func (x *BytesValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead. +func (*BytesValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8} +} + +func (x *BytesValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor + +var file_google_protobuf_wrappers_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, + 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, + 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_wrappers_proto_rawDescOnce sync.Once + file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc +) + +func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { + file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + }) + return file_google_protobuf_wrappers_proto_rawDescData +} + +var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_google_protobuf_wrappers_proto_goTypes = []any{ + (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue + (*FloatValue)(nil), // 1: google.protobuf.FloatValue + (*Int64Value)(nil), // 2: google.protobuf.Int64Value + (*UInt64Value)(nil), // 3: google.protobuf.UInt64Value + (*Int32Value)(nil), // 4: google.protobuf.Int32Value + (*UInt32Value)(nil), // 5: google.protobuf.UInt32Value + (*BoolValue)(nil), // 6: google.protobuf.BoolValue + (*StringValue)(nil), // 7: google.protobuf.StringValue + (*BytesValue)(nil), // 8: google.protobuf.BytesValue +} +var file_google_protobuf_wrappers_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_wrappers_proto_init() } +func file_google_protobuf_wrappers_proto_init() { + if File_google_protobuf_wrappers_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_wrappers_proto_goTypes, + DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs, + MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, + }.Build() + File_google_protobuf_wrappers_proto = out.File + file_google_protobuf_wrappers_proto_rawDesc = nil + file_google_protobuf_wrappers_proto_goTypes = nil + file_google_protobuf_wrappers_proto_depIdxs = nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go b/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go index 5180610e..03b66684 100644 --- a/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go +++ b/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go @@ -66,7 +66,7 @@ func (m *mapper) infoForData(data []byte, source string) (*Info, error) { mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { if _, ok := err.(*meta.NoKindMatchError); ok { - return nil, fmt.Errorf("resource mapping not found for name: %q namespace: %q from %q: %v\nensure CRDs are installed first", + return nil, fmt.Errorf("resource mapping not found for name: %q namespace: %q from %q: %w\nensure CRDs are installed first", name, namespace, source, err) } return nil, fmt.Errorf("unable to recognize %q: %v", source, err) diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go index 6d38fade..2218b9f5 100644 --- a/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -427,9 +427,10 @@ type FeatureGate string const ( ApplySet FeatureGate = "KUBECTL_APPLYSET" CmdPluginAsSubcommand FeatureGate = "KUBECTL_ENABLE_CMD_SHADOW" - InteractiveDelete FeatureGate = "KUBECTL_INTERACTIVE_DELETE" OpenAPIV3Patch FeatureGate = "KUBECTL_OPENAPIV3_PATCH" RemoteCommandWebsockets FeatureGate = "KUBECTL_REMOTE_COMMAND_WEBSOCKETS" + PortForwardWebsockets FeatureGate = "KUBECTL_PORT_FORWARD_WEBSOCKETS" + DebugCustomProfile FeatureGate = "KUBECTL_DEBUG_CUSTOM_PROFILE" ) // IsEnabled returns true iff environment variable is set to true. @@ -523,11 +524,9 @@ func AddLabelSelectorFlagVar(cmd *cobra.Command, p *string) { cmd.Flags().StringVarP(p, "selector", "l", *p, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints.") } -func AddPruningFlags(cmd *cobra.Command, prune *bool, pruneAllowlist *[]string, pruneWhitelist *[]string, all *bool, applySetRef *string) { +func AddPruningFlags(cmd *cobra.Command, prune *bool, pruneAllowlist *[]string, all *bool, applySetRef *string) { // Flags associated with the original allowlist-based alpha cmd.Flags().StringArrayVar(pruneAllowlist, "prune-allowlist", *pruneAllowlist, "Overwrite the default allowlist with for --prune") - cmd.Flags().StringArrayVar(pruneWhitelist, "prune-whitelist", *pruneWhitelist, "Overwrite the default whitelist with for --prune") // TODO: Remove this in kubectl 1.28 or later - _ = cmd.Flags().MarkDeprecated("prune-whitelist", "Use --prune-allowlist instead.") cmd.Flags().BoolVar(all, "all", *all, "Select all resources in the namespace of the specified resource types.") // Flags associated with the new ApplySet-based alpha diff --git a/vendor/k8s.io/kubectl/pkg/drain/drain.go b/vendor/k8s.io/kubectl/pkg/drain/drain.go index 1f6502ed..c1807541 100644 --- a/vendor/k8s.io/kubectl/pkg/drain/drain.go +++ b/vendor/k8s.io/kubectl/pkg/drain/drain.go @@ -417,7 +417,9 @@ func waitForDelete(params waitForDeleteParams) ([]corev1.Pod, error) { pendingPods := []corev1.Pod{} for i, pod := range pods { p, err := params.getPodFn(pod.Namespace, pod.Name) - if apierrors.IsNotFound(err) || (p != nil && p.ObjectMeta.UID != pod.ObjectMeta.UID) { + // The implementation of getPodFn that uses client-go returns an empty Pod struct when there is an error, + // so we need to check that err == nil and p != nil to know that a pod was found successfully. + if apierrors.IsNotFound(err) || (err == nil && p != nil && p.ObjectMeta.UID != pod.ObjectMeta.UID) { if params.onFinishFn != nil { params.onFinishFn(&pod, params.usingEviction, nil) } else if params.onDoneFn != nil { diff --git a/vendor/k8s.io/kubectl/pkg/drain/filters.go b/vendor/k8s.io/kubectl/pkg/drain/filters.go index 4e9a21b8..f34b5432 100644 --- a/vendor/k8s.io/kubectl/pkg/drain/filters.go +++ b/vendor/k8s.io/kubectl/pkg/drain/filters.go @@ -33,7 +33,7 @@ const ( daemonSetWarning = "ignoring DaemonSet-managed Pods" localStorageFatal = "Pods with local storage (use --delete-emptydir-data to override)" localStorageWarning = "deleting Pods with local storage" - unmanagedFatal = "Pods declare no controller (use --force to override)" + unmanagedFatal = "cannot delete Pods that declare no controller (use --force to override)" unmanagedWarning = "deleting Pods that declare no controller" ) diff --git a/vendor/k8s.io/kubectl/pkg/scheme/install.go b/vendor/k8s.io/kubectl/pkg/scheme/install.go index 52a7ce6a..34c98782 100644 --- a/vendor/k8s.io/kubectl/pkg/scheme/install.go +++ b/vendor/k8s.io/kubectl/pkg/scheme/install.go @@ -45,6 +45,7 @@ import ( rbacv1beta1 "k8s.io/api/rbac/v1beta1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" @@ -78,5 +79,5 @@ func init() { utilruntime.Must(Scheme.SetVersionPriority(policyv1beta1.SchemeGroupVersion, policyv1.SchemeGroupVersion)) utilruntime.Must(Scheme.SetVersionPriority(rbacv1.SchemeGroupVersion, rbacv1beta1.SchemeGroupVersion, rbacv1alpha1.SchemeGroupVersion)) utilruntime.Must(Scheme.SetVersionPriority(schedulingv1alpha1.SchemeGroupVersion)) - utilruntime.Must(Scheme.SetVersionPriority(storagev1.SchemeGroupVersion, storagev1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(storagev1.SchemeGroupVersion, storagev1beta1.SchemeGroupVersion, storagev1alpha1.SchemeGroupVersion)) } diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po index cd5d53c8..18b6a621 100644 --- a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po @@ -736,7 +736,7 @@ msgstr "kubectl kontrolliert den Kubernetes-Cluster-Manager" #~ "\n" #~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " #~ "configmaps that are not in the file.\n" -#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/" #~ "v1/ConfigMap" #~ msgstr "" #~ "\n" @@ -754,7 +754,7 @@ msgstr "kubectl kontrolliert den Kubernetes-Cluster-Manager" #~ "\n" #~ "\t\t# Wende die Konfiguration im manifest.yaml an und lösche alle " #~ "ConfigMaps, die nicht in der Datei sind.\n" -#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/" #~ "v1/ConfigMap" #, c-format diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.po index 538c7b2d..609c85d7 100644 --- a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.po +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.po @@ -260,7 +260,7 @@ msgid "" "\n" "\t\t# Apply the configuration in manifest.yaml and delete all the other " "config maps that are not in the file\n" -"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/" "ConfigMap" msgstr "" "\n" @@ -282,7 +282,7 @@ msgstr "" "\n" "\t\t# Apply the configuration in manifest.yaml and delete all the other " "config maps that are not in the file\n" -"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/" "ConfigMap" #: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:48 diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po index d07da117..c4f0e403 100644 --- a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po @@ -260,7 +260,7 @@ msgid "" "\n" "\t\t# Apply the configuration in manifest.yaml and delete all the other " "config maps that are not in the file\n" -"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/" "ConfigMap" msgstr "" "\n" @@ -282,7 +282,7 @@ msgstr "" "\n" "\t\t# Apply the configuration in manifest.yaml and delete all the other " "config maps that are not in the file\n" -"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/" "ConfigMap" #: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:48 diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po index ca119f64..ff6d8326 100644 --- a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po @@ -815,7 +815,7 @@ msgstr "Kubectl controlla il gestore cluster di Kubernetes" #~ "\n" #~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " #~ "configmaps that are not in the file.\n" -#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/" #~ "v1/ConfigMap" #~ msgstr "" #~ "\n" @@ -833,7 +833,7 @@ msgstr "Kubectl controlla il gestore cluster di Kubernetes" #~ "\n" #~ "\t\t# Applica la configurazione manifest.yaml ed elimina tutti gli altri " #~ "configmaps non presenti nel file.\n" -#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/" #~ "v1/ConfigMap" #, c-format diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po index d6f4aa2c..7036ad59 100644 --- a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po @@ -891,7 +891,7 @@ msgstr "kubectl controls the Kubernetes cluster manager" #~ "\n" #~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " #~ "configmaps that are not in the file.\n" -#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/" #~ "v1/ConfigMap" #~ msgstr "" #~ "\n" @@ -909,7 +909,7 @@ msgstr "kubectl controls the Kubernetes cluster manager" #~ "\n" #~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " #~ "configmaps that are not in the file.\n" -#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/" #~ "v1/ConfigMap" #, c-format diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po index dad45128..7fe6d2cf 100644 --- a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po @@ -814,7 +814,7 @@ msgstr "kubectl controla o gerenciador de cluster do Kubernetes" #~ "\n" #~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " #~ "configmaps that are not in the file.\n" -#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/" #~ "v1/ConfigMap" #~ msgstr "" #~ "\n" @@ -832,7 +832,7 @@ msgstr "kubectl controla o gerenciador de cluster do Kubernetes" #~ "\n" #~ "\t\t# Aplica a configuração do manifest.yaml e remove todos os outros " #~ "configmaps que não estão no arquivo.\n" -#~ "\t\tkubectl apply —prune -f manifest.yaml —all —prune-whitelist=core/v1/" +#~ "\t\tkubectl apply —prune -f manifest.yaml —all —prune-allowlist=core/v1/" #~ "ConfigMap" #, c-format diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po index 29bd5844..ffdf03cb 100644 --- a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po @@ -854,7 +854,7 @@ msgstr "kubectl 控制 Kubernetes 集群管理器" #~ "\n" #~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " #~ "configmaps that are not in the file.\n" -#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/" #~ "ConfigMap" #~ msgstr "" #~ "\n" @@ -870,7 +870,7 @@ msgstr "kubectl 控制 Kubernetes 集群管理器" #~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" #~ "\n" #~ "\t\t# 应用 manifest.yaml 的配置并删除所有不在这个文件中的 ConfigMaps。\n" -#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/" #~ "ConfigMap" #, c-format diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go index 08194d58..7d3bc612 100644 --- a/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go @@ -18,4 +18,4 @@ limitations under the License. // from a Kubernetes server and then indexing the type definitions. // The openapi spec contains the object model definitions and extensions metadata // such as the patchStrategy and patchMergeKey for creating patches. -package openapi // k8s.io/kubectl/pkg/util/openapi +package openapi // import "k8s.io/kubectl/pkg/util/openapi" diff --git a/vendor/modules.txt b/vendor/modules.txt index 0cfd61ac..7eba1a99 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -8,6 +8,17 @@ github.com/MakeNowJust/heredoc # github.com/a8m/envsubst v1.4.2 ## explicit; go 1.17 github.com/a8m/envsubst/parse +# github.com/argoproj/argo-workflows/v3 v3.6.5 +## explicit; go 1.23.1 +github.com/argoproj/argo-workflows/v3/errors +github.com/argoproj/argo-workflows/v3/pkg/apis/workflow +github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 +github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned +github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme +github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1 +github.com/argoproj/argo-workflows/v3/util/context +github.com/argoproj/argo-workflows/v3/util/deprecation +github.com/argoproj/argo-workflows/v3/util/json # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -27,7 +38,7 @@ github.com/davecgh/go-spew/spew ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log -# github.com/evanphx/json-patch v5.6.0+incompatible +# github.com/evanphx/json-patch v5.8.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/evanphx/json-patch/v5 v5.9.11 @@ -69,7 +80,14 @@ github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys # github.com/golang/protobuf v1.5.4 ## explicit; go 1.17 +github.com/golang/protobuf/descriptor +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto +github.com/golang/protobuf/protoc-gen-go/descriptor +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/timestamp +github.com/golang/protobuf/ptypes/wrappers # github.com/google/btree v1.1.3 ## explicit; go 1.18 github.com/google/btree @@ -100,13 +118,18 @@ github.com/google/shlex # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/gorilla/websocket v1.5.0 -## explicit; go 1.12 +# github.com/gorilla/websocket v1.5.1 +## explicit; go 1.20 github.com/gorilla/websocket # github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 ## explicit github.com/gregjones/httpcache -# github.com/imdario/mergo v0.3.13 +# github.com/grpc-ecosystem/grpc-gateway v1.16.0 +## explicit; go 1.14 +github.com/grpc-ecosystem/grpc-gateway/internal +github.com/grpc-ecosystem/grpc-gateway/runtime +github.com/grpc-ecosystem/grpc-gateway/utilities +# github.com/imdario/mergo v0.3.15 ## explicit; go 1.13 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 @@ -259,9 +282,10 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/testify v1.9.0 +# github.com/stretchr/testify v1.10.0 ## explicit; go 1.17 github.com/stretchr/testify/assert +github.com/stretchr/testify/assert/yaml # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 @@ -296,23 +320,23 @@ golang.org/x/net/idna golang.org/x/net/internal/socks golang.org/x/net/proxy golang.org/x/net/websocket -# golang.org/x/oauth2 v0.25.0 -## explicit; go 1.18 +# golang.org/x/oauth2 v0.28.0 +## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.10.0 -## explicit; go 1.18 +# golang.org/x/sync v0.12.0 +## explicit; go 1.23.0 golang.org/x/sync/errgroup -# golang.org/x/sys v0.29.0 -## explicit; go 1.18 +# golang.org/x/sys v0.31.0 +## explicit; go 1.23.0 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.28.0 -## explicit; go 1.18 +# golang.org/x/term v0.30.0 +## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.21.0 -## explicit; go 1.18 +# golang.org/x/text v0.23.0 +## explicit; go 1.23.0 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -343,9 +367,30 @@ golang.org/x/tools/go/ast/inspector # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 +# google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 +## explicit; go 1.19 +google.golang.org/genproto/protobuf/field_mask +# google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 +## explicit; go 1.21 +google.golang.org/genproto/googleapis/api/httpbody +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 +## explicit; go 1.21 +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.65.0 +## explicit; go 1.21 +google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/grpclog +google.golang.org/grpc/internal +google.golang.org/grpc/internal/grpclog +google.golang.org/grpc/internal/status +google.golang.org/grpc/metadata +google.golang.org/grpc/serviceconfig +google.golang.org/grpc/status # google.golang.org/protobuf v1.36.3 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim +google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt @@ -354,6 +399,7 @@ google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/editiondefaults google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text @@ -370,6 +416,7 @@ google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto +google.golang.org/protobuf/protoadapt google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry @@ -378,7 +425,10 @@ google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb +google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/timestamppb +google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c ## explicit; go 1.11 gopkg.in/check.v1 @@ -532,8 +582,8 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/cli-runtime v0.29.3 -## explicit; go 1.21 +# k8s.io/cli-runtime v0.30.3 +## explicit; go 1.22.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers @@ -858,8 +908,8 @@ k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubectl v0.29.3 -## explicit; go 1.21 +# k8s.io/kubectl v0.30.3 +## explicit; go 1.22.0 k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/drain k8s.io/kubectl/pkg/scheme