diff --git a/go.mod b/go.mod
index 7910e01e49db..483ac70d3224 100644
--- a/go.mod
+++ b/go.mod
@@ -32,7 +32,7 @@ require (
github.com/coreos/stream-metadata-go v0.4.9
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1
- github.com/docker/docker v27.3.1+incompatible
+ github.com/docker/docker v27.1.2+incompatible
github.com/fsouza/go-dockerclient v1.12.0
github.com/gebn/bmc v0.0.0-20250519231546-bf709e03fe3c
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
@@ -59,12 +59,12 @@ require (
github.com/onsi/gomega v1.37.0
github.com/opencontainers/go-digest v1.0.0
github.com/openshift-eng/openshift-tests-extension v0.0.0-20250711173707-dc2a20e5a5f8
- github.com/openshift/api v3.9.0+incompatible
+ github.com/openshift/api v0.0.0-20250710004639-926605d3338b
github.com/openshift/apiserver-library-go v0.0.0-20250710132015-f0d44ef6e53b
github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee
github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee
github.com/openshift/library-go v0.0.0-20250812160438-378de074fe7b
- github.com/operator-framework/operator-lifecycle-manager v0.30.1-0.20250114164243-1b6752ec65fa
+ github.com/operator-framework/api v0.27.0
github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417
github.com/pborman/uuid v1.2.0
github.com/pkg/errors v0.9.1
@@ -89,7 +89,7 @@ require (
go.etcd.io/etcd/client/pkg/v3 v3.5.21
go.etcd.io/etcd/client/v3 v3.5.21
golang.org/x/crypto v0.40.0
- golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/mod v0.25.0
golang.org/x/net v0.42.0
golang.org/x/oauth2 v0.30.0
@@ -133,6 +133,7 @@ require (
cloud.google.com/go/iam v1.5.2 // indirect
cloud.google.com/go/monitoring v1.24.2 // indirect
git.sr.ht/~sbinet/gg v0.5.0 // indirect
+ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 // indirect
@@ -196,9 +197,10 @@ require (
github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/camelcase v1.0.0 // indirect
+ github.com/fatih/color v1.18.0 // indirect
github.com/felixge/fgprof v0.9.4 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fsnotify/fsnotify v1.8.0 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
@@ -310,7 +312,6 @@ require (
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/opencontainers/selinux v1.11.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
- github.com/operator-framework/api v0.27.0 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
@@ -441,6 +442,3 @@ replace (
k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250906192346-6efb6a95323f
k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250906192346-6efb6a95323f
)
-
-// github.com/operator-framework/operator-lifecycle-manager requires this import.
-replace github.com/openshift/api => github.com/openshift/api v0.0.0-20250710004639-926605d3338b
diff --git a/go.sum b/go.sum
index cfe03afdd6b5..23281cafec4e 100644
--- a/go.sum
+++ b/go.sum
@@ -292,8 +292,8 @@ github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1 h1:go
github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1/go.mod h1:+AmQ9ZZMMxKQCOOUFHIN/5viLDj3tEQGPsLbNPSc0EI=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
-github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.1.2+incompatible h1:AhGzR1xaQIy53qCkxARaFluI00WPGtXn0AJuoQsVYTY=
+github.com/docker/docker v27.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -341,8 +341,8 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
-github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fsouza/go-dockerclient v1.12.0 h1:S2f2crEUbBNCFiF06kR/GvioEB8EMsb3Td/bpawD+aU=
github.com/fsouza/go-dockerclient v1.12.0/go.mod h1:YWUtjg8japrqD/80L98nTtCoxQFp5B5wrSsnyeB5lFo=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
@@ -870,8 +870,6 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/operator-framework/api v0.27.0 h1:OrVaGKZJvbZo58HTv2guz7aURkhVKYhFqZ/6VpifiXI=
github.com/operator-framework/api v0.27.0/go.mod h1:lg2Xx+S8NQWGYlEOvFwQvH46E5EK5IrAIL7HWfAhciM=
-github.com/operator-framework/operator-lifecycle-manager v0.30.1-0.20250114164243-1b6752ec65fa h1:VzZn+vxGFprQPnaLVDgU5Wfu+2UHsQJh/xLwJw8rXkA=
-github.com/operator-framework/operator-lifecycle-manager v0.30.1-0.20250114164243-1b6752ec65fa/go.mod h1:kzt/wadHjn76OoJeuu7BFzJFRh780BSMYuWaSxk9HGA=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417 h1:7k+dokKFfpICbkpX5TvvpFbKTFsl/6YQd46EpY2JNhc=
github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417/go.mod h1:9LxDV3rAHlGHAYtVrT62y/fqfIxc5RrDiYi9RVeD0gg=
@@ -1143,8 +1141,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
-golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo=
diff --git a/test/extended/router/gatewayapicontroller.go b/test/extended/router/gatewayapicontroller.go
index c5fbc8c32841..879f2299e4e0 100644
--- a/test/extended/router/gatewayapicontroller.go
+++ b/test/extended/router/gatewayapicontroller.go
@@ -14,11 +14,12 @@ import (
configv1 "github.com/openshift/api/config/v1"
operatoringressv1 "github.com/openshift/api/operatoringress/v1"
- operatorsv1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1"
+ operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
exutil "github.com/openshift/origin/test/extended/util"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
e2e "k8s.io/kubernetes/test/e2e/framework"
admissionapi "k8s.io/pod-security-admission/api"
@@ -160,10 +161,15 @@ var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feat
g.By("Deleting the OSSM Operator resources")
- operator, err := operatorsv1.NewForConfigOrDie(oc.AsAdmin().UserConfig()).Operators().Get(context.Background(), serviceMeshOperatorName, metav1.GetOptions{})
+ operator := &operatorsv1.Operator{}
+ restmapper := oc.AsAdmin().RESTMapper()
+ mapping, err := restmapper.RESTMapping(operator.GroupVersionKind().GroupKind())
+ o.Expect(err).NotTo(o.HaveOccurred())
+ us, err := oc.KubeFramework().DynamicClient.Resource(mapping.Resource).Get(context.Background(), serviceMeshOperatorName, metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get Operator %q", serviceMeshOperatorName)
+ err = runtime.DefaultUnstructuredConverter.FromUnstructured(us.UnstructuredContent(), operator)
+ o.Expect(err).NotTo(o.HaveOccurred(), "Failed to convert Operator %q", serviceMeshOperatorName)
- restmapper := oc.AsAdmin().RESTMapper()
for _, ref := range operator.Status.Components.Refs {
mapping, err := restmapper.RESTMapping(ref.GroupVersionKind().GroupKind())
o.Expect(err).NotTo(o.HaveOccurred())
diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go
index 93d64cd8d5ff..f831735f840e 100644
--- a/vendor/github.com/docker/docker/api/common.go
+++ b/vendor/github.com/docker/docker/api/common.go
@@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of the current REST API.
- DefaultVersion = "1.47"
+ DefaultVersion = "1.46"
// MinSupportedAPIVersion is the minimum API version that can be supported
// by the API server, specified as "major.minor". Note that the daemon
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index 7164e1eba53d..4a1b7087d8c0 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -19,10 +19,10 @@ produces:
consumes:
- "application/json"
- "text/plain"
-basePath: "/v1.47"
+basePath: "/v1.46"
info:
title: "Docker Engine API"
- version: "1.47"
+ version: "1.46"
x-logo:
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
description: |
@@ -55,8 +55,8 @@ info:
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
is returned.
- If you omit the version-prefix, the current version of the API (v1.47) is used.
- For example, calling `/info` is the same as calling `/v1.47/info`. Using the
+ If you omit the version-prefix, the current version of the API (v1.46) is used.
+ For example, calling `/info` is the same as calling `/v1.46/info`. Using the
API without a version-prefix is deprecated and will be removed in a future release.
Engine releases in the near future should support this version of the API,
@@ -393,7 +393,7 @@ definitions:
Make the mount non-recursively read-only, but still leave the mount recursive
(unless NonRecursive is set to `true` in conjunction).
- Added in v1.44, before that version all read-only mounts were
+ Addded in v1.44, before that version all read-only mounts were
non-recursive by default. To match the previous behaviour this
will default to `true` for clients on versions prior to v1.44.
type: "boolean"
@@ -1384,7 +1384,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always empty. It must not be used, and will be removed in API v1.48.
+ > always empty. It must not be used, and will be removed in API v1.47.
type: "string"
example: ""
Domainname:
@@ -1394,7 +1394,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always empty. It must not be used, and will be removed in API v1.48.
+ > always empty. It must not be used, and will be removed in API v1.47.
type: "string"
example: ""
User:
@@ -1408,7 +1408,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
+ > always false. It must not be used, and will be removed in API v1.47.
type: "boolean"
default: false
example: false
@@ -1419,7 +1419,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
+ > always false. It must not be used, and will be removed in API v1.47.
type: "boolean"
default: false
example: false
@@ -1430,7 +1430,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
+ > always false. It must not be used, and will be removed in API v1.47.
type: "boolean"
default: false
example: false
@@ -1457,7 +1457,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
+ > always false. It must not be used, and will be removed in API v1.47.
type: "boolean"
default: false
example: false
@@ -1468,7 +1468,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
+ > always false. It must not be used, and will be removed in API v1.47.
type: "boolean"
default: false
example: false
@@ -1479,7 +1479,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
+ > always false. It must not be used, and will be removed in API v1.47.
type: "boolean"
default: false
example: false
@@ -1516,7 +1516,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always empty. It must not be used, and will be removed in API v1.48.
+ > always empty. It must not be used, and will be removed in API v1.47.
type: "string"
default: ""
example: ""
@@ -1555,7 +1555,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always omitted. It must not be used, and will be removed in API v1.48.
+ > always omitted. It must not be used, and will be removed in API v1.47.
type: "boolean"
default: false
example: false
@@ -1567,7 +1567,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always omitted. It must not be used, and will be removed in API v1.48.
+ > always omitted. It must not be used, and will be removed in API v1.47.
type: "string"
default: ""
example: ""
@@ -1601,7 +1601,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always omitted. It must not be used, and will be removed in API v1.48.
+ > always omitted. It must not be used, and will be removed in API v1.47.
type: "integer"
default: 10
x-nullable: true
@@ -2216,7 +2216,7 @@ definitions:
Created:
description: |
Date and time at which the image was created as a Unix timestamp
- (number of seconds since EPOCH).
+ (number of seconds sinds EPOCH).
type: "integer"
x-nullable: false
example: "1644009612"
@@ -2265,19 +2265,6 @@ definitions:
x-nullable: false
type: "integer"
example: 2
- Manifests:
- description: |
- Manifests is a list of manifests available in this image.
- It provides a more detailed view of the platform-specific image manifests
- or other image-attached data like build attestations.
-
- WARNING: This is experimental and may change at any time without any backward
- compatibility.
- type: "array"
- x-nullable: false
- x-omitempty: true
- items:
- $ref: "#/definitions/ImageManifestSummary"
AuthConfig:
type: "object"
@@ -2513,7 +2500,7 @@ definitions:
example: false
Attachable:
description: |
- Whether a global / swarm scope network is manually attachable by regular
+ Wheter a global / swarm scope network is manually attachable by regular
containers from workers in swarm mode.
type: "boolean"
default: false
@@ -3736,7 +3723,7 @@ definitions:
example: "json-file"
Options:
description: |
- Driver-specific options for the selected log driver, specified
+ Driver-specific options for the selectd log driver, specified
as key/value pairs.
type: "object"
additionalProperties:
@@ -5331,7 +5318,7 @@ definitions:
description: |
The default (and highest) API version that is supported by the daemon
type: "string"
- example: "1.47"
+ example: "1.46"
MinAPIVersion:
description: |
The minimum API version that is supported by the daemon
@@ -5347,7 +5334,7 @@ definitions:
The version Go used to compile the daemon, and the version of the Go
runtime in use.
type: "string"
- example: "go1.22.7"
+ example: "go1.21.13"
Os:
description: |
The operating system that the daemon is running on ("linux" or "windows")
@@ -6657,120 +6644,6 @@ definitions:
additionalProperties:
type: "string"
- ImageManifestSummary:
- x-go-name: "ManifestSummary"
- description: |
- ImageManifestSummary represents a summary of an image manifest.
- type: "object"
- required: ["ID", "Descriptor", "Available", "Size", "Kind"]
- properties:
- ID:
- description: |
- ID is the content-addressable ID of an image and is the same as the
- digest of the image manifest.
- type: "string"
- example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f"
- Descriptor:
- $ref: "#/definitions/OCIDescriptor"
- Available:
- description: Indicates whether all the child content (image config, layers) is fully available locally.
- type: "boolean"
- example: true
- Size:
- type: "object"
- x-nullable: false
- required: ["Content", "Total"]
- properties:
- Total:
- type: "integer"
- format: "int64"
- example: 8213251
- description: |
- Total is the total size (in bytes) of all the locally present
- data (both distributable and non-distributable) that's related to
- this manifest and its children.
- This equal to the sum of [Content] size AND all the sizes in the
- [Size] struct present in the Kind-specific data struct.
- For example, for an image kind (Kind == "image")
- this would include the size of the image content and unpacked
- image snapshots ([Size.Content] + [ImageData.Size.Unpacked]).
- Content:
- description: |
- Content is the size (in bytes) of all the locally present
- content in the content store (e.g. image config, layers)
- referenced by this manifest and its children.
- This only includes blobs in the content store.
- type: "integer"
- format: "int64"
- example: 3987495
- Kind:
- type: "string"
- example: "image"
- enum:
- - "image"
- - "attestation"
- - "unknown"
- description: |
- The kind of the manifest.
-
- kind | description
- -------------|-----------------------------------------------------------
- image | Image manifest that can be used to start a container.
- attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest.
- ImageData:
- description: |
- The image data for the image manifest.
- This field is only populated when Kind is "image".
- type: "object"
- x-nullable: true
- x-omitempty: true
- required: ["Platform", "Containers", "Size", "UnpackedSize"]
- properties:
- Platform:
- $ref: "#/definitions/OCIPlatform"
- description: |
- OCI platform of the image. This will be the platform specified in the
- manifest descriptor from the index/manifest list.
- If it's not available, it will be obtained from the image config.
- Containers:
- description: |
- The IDs of the containers that are using this image.
- type: "array"
- items:
- type: "string"
- example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"]
- Size:
- type: "object"
- x-nullable: false
- required: ["Unpacked"]
- properties:
- Unpacked:
- type: "integer"
- format: "int64"
- example: 3987495
- description: |
- Unpacked is the size (in bytes) of the locally unpacked
- (uncompressed) image content that's directly usable by the containers
- running this image.
- It's independent of the distributable content - e.g.
- the image might still have an unpacked data that's still used by
- some container even when the distributable/compressed content is
- already gone.
- AttestationData:
- description: |
- The image data for the attestation manifest.
- This field is only populated when Kind is "attestation".
- type: "object"
- x-nullable: true
- x-omitempty: true
- required: ["For"]
- properties:
- For:
- description: |
- The digest of the image manifest that this attestation is for.
- type: "string"
- example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f"
-
paths:
/containers/json:
get:
@@ -7712,7 +7585,7 @@ paths:
* Memory usage % = `(used_memory / available_memory) * 100.0`
* cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage`
* system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage`
- * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus`
+ * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus`
* CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0`
operationId: "ContainerStats"
produces: ["application/json"]
@@ -8749,11 +8622,6 @@ paths:
description: "Show digest information as a `RepoDigests` field on each image."
type: "boolean"
default: false
- - name: "manifests"
- in: "query"
- description: "Include `Manifests` in the image summary."
- type: "boolean"
- default: false
tags: ["Image"]
/build:
post:
@@ -9226,23 +9094,12 @@ paths:
parameters:
- name: "name"
in: "path"
- description: |
- Name of the image to push. For example, `registry.example.com/myimage`.
- The image must be present in the local image store with the same name.
-
- The name should be provided without tag; if a tag is provided, it
- is ignored. For example, `registry.example.com/myimage:latest` is
- considered equivalent to `registry.example.com/myimage`.
-
- Use the `tag` parameter to specify the tag to push.
+ description: "Image name or ID."
type: "string"
required: true
- name: "tag"
in: "query"
- description: |
- Tag of the image to push. For example, `latest`. If no tag is provided,
- all tags of the given image that are present in the local image store
- are pushed.
+ description: "The tag to associate with the image on the registry."
type: "string"
- name: "X-Registry-Auth"
in: "header"
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go
index 03648fb7b5dc..727da8839cc2 100644
--- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go
@@ -1,7 +1,6 @@
package container // import "github.com/docker/docker/api/types/container"
import (
- "errors"
"fmt"
"strings"
@@ -326,12 +325,12 @@ func ValidateRestartPolicy(policy RestartPolicy) error {
if policy.MaximumRetryCount < 0 {
msg += " and cannot be negative"
}
- return &errInvalidParameter{errors.New(msg)}
+ return &errInvalidParameter{fmt.Errorf(msg)}
}
return nil
case RestartPolicyOnFailure:
if policy.MaximumRetryCount < 0 {
- return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")}
+ return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")}
}
return nil
case "":
diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go
index 0914b2a4410c..0c39ab5f18b5 100644
--- a/vendor/github.com/docker/docker/api/types/filters/parse.go
+++ b/vendor/github.com/docker/docker/api/types/filters/parse.go
@@ -196,7 +196,7 @@ func (args Args) Match(field, source string) bool {
}
// GetBoolOrDefault returns a boolean value of the key if the key is present
-// and is interpretable as a boolean value. Otherwise the default value is returned.
+// and is intepretable as a boolean value. Otherwise the default value is returned.
// Error is not nil only if the filter values are not valid boolean or are conflicting.
func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) {
fieldValues, ok := args.fields[key]
diff --git a/vendor/github.com/docker/docker/api/types/image/manifest.go b/vendor/github.com/docker/docker/api/types/image/manifest.go
deleted file mode 100644
index db8a00830e70..000000000000
--- a/vendor/github.com/docker/docker/api/types/image/manifest.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package image
-
-import (
- "github.com/opencontainers/go-digest"
- ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-type ManifestKind string
-
-const (
- ManifestKindImage ManifestKind = "image"
- ManifestKindAttestation ManifestKind = "attestation"
- ManifestKindUnknown ManifestKind = "unknown"
-)
-
-type ManifestSummary struct {
- // ID is the content-addressable ID of an image and is the same as the
- // digest of the image manifest.
- //
- // Required: true
- ID string `json:"ID"`
-
- // Descriptor is the OCI descriptor of the image.
- //
- // Required: true
- Descriptor ocispec.Descriptor `json:"Descriptor"`
-
- // Indicates whether all the child content (image config, layers) is
- // fully available locally
- //
- // Required: true
- Available bool `json:"Available"`
-
- // Size is the size information of the content related to this manifest.
- // Note: These sizes only take the locally available content into account.
- //
- // Required: true
- Size struct {
- // Content is the size (in bytes) of all the locally present
- // content in the content store (e.g. image config, layers)
- // referenced by this manifest and its children.
- // This only includes blobs in the content store.
- Content int64 `json:"Content"`
-
- // Total is the total size (in bytes) of all the locally present
- // data (both distributable and non-distributable) that's related to
- // this manifest and its children.
- // This equal to the sum of [Content] size AND all the sizes in the
- // [Size] struct present in the Kind-specific data struct.
- // For example, for an image kind (Kind == ManifestKindImage),
- // this would include the size of the image content and unpacked
- // image snapshots ([Size.Content] + [ImageData.Size.Unpacked]).
- Total int64 `json:"Total"`
- } `json:"Size"`
-
- // Kind is the kind of the image manifest.
- //
- // Required: true
- Kind ManifestKind `json:"Kind"`
-
- // Fields below are specific to the kind of the image manifest.
-
- // Present only if Kind == ManifestKindImage.
- ImageData *ImageProperties `json:"ImageData,omitempty"`
-
- // Present only if Kind == ManifestKindAttestation.
- AttestationData *AttestationProperties `json:"AttestationData,omitempty"`
-}
-
-type ImageProperties struct {
- // Platform is the OCI platform object describing the platform of the image.
- //
- // Required: true
- Platform ocispec.Platform `json:"Platform"`
-
- Size struct {
- // Unpacked is the size (in bytes) of the locally unpacked
- // (uncompressed) image content that's directly usable by the containers
- // running this image.
- // It's independent of the distributable content - e.g.
- // the image might still have an unpacked data that's still used by
- // some container even when the distributable/compressed content is
- // already gone.
- //
- // Required: true
- Unpacked int64 `json:"Unpacked"`
- }
-
- // Containers is an array containing the IDs of the containers that are
- // using this image.
- //
- // Required: true
- Containers []string `json:"Containers"`
-}
-
-type AttestationProperties struct {
- // For is the digest of the image manifest that this attestation is for.
- For digest.Digest `json:"For"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go
index 923ebe5a06a0..8e32c9af8689 100644
--- a/vendor/github.com/docker/docker/api/types/image/opts.go
+++ b/vendor/github.com/docker/docker/api/types/image/opts.go
@@ -76,9 +76,6 @@ type ListOptions struct {
// ContainerCount indicates whether container count should be computed.
ContainerCount bool
-
- // Manifests indicates whether the image manifests should be returned.
- Manifests bool
}
// RemoveOptions holds parameters to remove images.
diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go
index e87e216a28b3..f1e3e2ef018f 100644
--- a/vendor/github.com/docker/docker/api/types/image/summary.go
+++ b/vendor/github.com/docker/docker/api/types/image/summary.go
@@ -1,5 +1,10 @@
package image
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Summary summary
+// swagger:model Summary
type Summary struct {
// Number of containers using this image. Includes both stopped and running
@@ -12,7 +17,7 @@ type Summary struct {
Containers int64 `json:"Containers"`
// Date and time at which the image was created as a Unix timestamp
- // (number of seconds since EPOCH).
+ // (number of seconds sinds EPOCH).
//
// Required: true
Created int64 `json:"Created"`
@@ -42,14 +47,6 @@ type Summary struct {
// Required: true
ParentID string `json:"ParentId"`
- // Manifests is a list of image manifests available in this image. It
- // provides a more detailed view of the platform-specific image manifests or
- // other image-attached data like build attestations.
- //
- // WARNING: This is experimental and may change at any time without any backward
- // compatibility.
- Manifests []ManifestSummary `json:"Manifests,omitempty"`
-
// List of content-addressable digests of locally available image manifests
// that the image is referenced from. Multiple manifests can refer to the
// same image.
diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go
index 8e383f6e60cb..97a924e37477 100644
--- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go
+++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go
@@ -34,9 +34,10 @@ type AuthConfig struct {
}
// EncodeAuthConfig serializes the auth configuration as a base64url encoded
-// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header.
+// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header.
//
-// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5
+// For details on base64url encoding, see:
+// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
func EncodeAuthConfig(authConfig AuthConfig) (string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
@@ -45,14 +46,15 @@ func EncodeAuthConfig(authConfig AuthConfig) (string, error) {
return base64.URLEncoding.EncodeToString(buf), nil
}
-// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON
+// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON
// authentication information as sent through the X-Registry-Auth header.
//
-// This function always returns an [AuthConfig], even if an error occurs. It is up
+// This function always returns an AuthConfig, even if an error occurs. It is up
// to the caller to decide if authentication is required, and if the error can
// be ignored.
//
-// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5
+// For details on base64url encoding, see:
+// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) {
if authEncoded == "" {
return &AuthConfig{}, nil
@@ -67,7 +69,7 @@ func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) {
// clients and API versions. Current clients and API versions expect authentication
// to be provided through the X-Registry-Auth header.
//
-// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an
+// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an
// error occurs. It is up to the caller to decide if authentication is required,
// and if the error can be ignored.
func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) {
diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
index 1b4be6fffbab..3eae4b9b297d 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
@@ -122,7 +122,7 @@ type CAConfig struct {
SigningCAKey string `json:",omitempty"`
// If this value changes, and there is no specified signing cert and key,
- // then the swarm is forced to generate a new root certificate and key.
+ // then the swarm is forced to generate a new root certificate ane key.
ForceRotate uint64 `json:",omitempty"`
}
diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
index 618a4816209a..bbd9ff0b8f97 100644
--- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
+++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
@@ -414,7 +414,7 @@ type Info struct {
// the Volume has not been successfully created yet.
VolumeID string `json:",omitempty"`
- // AccessibleTopology is the topology this volume is actually accessible
+ // AccessibleTopolgoy is the topology this volume is actually accessible
// from.
AccessibleTopology []Topology `json:",omitempty"`
}
diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go
index bef679431dce..a9cc1e21e5dd 100644
--- a/vendor/github.com/docker/docker/client/image_list.go
+++ b/vendor/github.com/docker/docker/client/image_list.go
@@ -11,11 +11,6 @@ import (
)
// ImageList returns a list of images in the docker host.
-//
-// Experimental: Setting the [options.Manifest] will populate
-// [image.Summary.Manifests] with information about image manifests.
-// This is experimental and might change in the future without any backward
-// compatibility.
func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) {
var images []image.Summary
@@ -52,9 +47,6 @@ func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]
if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") {
query.Set("shared-size", "1")
}
- if options.Manifests && versions.GreaterThanOrEqualTo(cli.version, "1.47") {
- query.Set("manifests", "1")
- }
serverResp, err := cli.get(ctx, "/images/json", query, nil)
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
index 8d2c8857fb03..035160c834e4 100644
--- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -290,7 +290,7 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr,
}
// Stream is an io.Writer for output with utilities to get the output's file
-// descriptor and to detect whether it's a terminal.
+// descriptor and to detect wether it's a terminal.
//
// it is subset of the streams.Out type in
// https://pkg.go.dev/github.com/docker/cli@v20.10.17+incompatible/cli/streams#Out
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
index 3ea3012b188b..3792c67a9e45 100644
--- a/vendor/github.com/docker/docker/pkg/pools/pools.go
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -124,7 +124,7 @@ func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
}
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
-// into the pool and closes the writer if it's an io.WriteCloser.
+// into the pool and closes the writer if it's an io.Writecloser.
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
return ioutils.NewWriteCloserWrapper(w, func() error {
buf.Flush()
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
index b877ecc5a942..facfbb3126f1 100644
--- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -6,7 +6,7 @@ import (
// Lgetxattr retrieves the value of the extended attribute identified by attr
// and associated with the given path in the file system.
-// It returns a nil slice and nil error if the xattr is not set.
+// It will returns a nil slice and nil error if the xattr is not set.
func Lgetxattr(path string, attr string) ([]byte, error) {
sysErr := func(err error) ([]byte, error) {
return nil, &XattrError{Op: "lgetxattr", Attr: attr, Path: path, Err: err}
diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
index f4e7dbf37b36..ffc7b992b3c7 100644
--- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
+++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
@@ -1,7 +1,7 @@
freebsd_task:
name: 'FreeBSD'
freebsd_instance:
- image_family: freebsd-14-1
+ image_family: freebsd-13-2
install_script:
- pkg update -f
- pkg install -y go
@@ -9,6 +9,5 @@ freebsd_task:
# run tests as user "cirrus" instead of root
- pw useradd cirrus -m
- chown -R cirrus:cirrus .
- - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
- - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
- - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...
+ - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
+ - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
new file mode 100644
index 000000000000..fad895851e56
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig
@@ -0,0 +1,12 @@
+root = true
+
+[*.go]
+indent_style = tab
+indent_size = 4
+insert_final_newline = true
+
+[*.{yml,yaml}]
+indent_style = space
+indent_size = 2
+insert_final_newline = true
+trim_trailing_whitespace = true
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes
new file mode 100644
index 000000000000..32f1001be0a5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes
@@ -0,0 +1 @@
+go.sum linguist-generated
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
index daea9dd6d6d2..391cc076b126 100644
--- a/vendor/github.com/fsnotify/fsnotify/.gitignore
+++ b/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -5,6 +5,3 @@
# Output of go build ./cmd/fsnotify
/fsnotify
/fsnotify.exe
-
-/test/kqueue
-/test/a.out
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
index fa854785d0f5..e0e57575496c 100644
--- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -1,36 +1,8 @@
# Changelog
-1.8.0 2023-10-31
-----------------
-
-### Additions
-
-- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
-
-### Changes and fixes
-
-- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
-
-- kqueue: ignore events with Ident=0 ([#590])
-
-- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
-
-- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
-
-- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
-
-- inotify: fix panic when calling Remove() in a goroutine ([#650])
-
-- fen: allow watching subdirectories of watched directories ([#621])
-
-[#590]: https://github.com/fsnotify/fsnotify/pull/590
-[#610]: https://github.com/fsnotify/fsnotify/pull/610
-[#617]: https://github.com/fsnotify/fsnotify/pull/617
-[#619]: https://github.com/fsnotify/fsnotify/pull/619
-[#620]: https://github.com/fsnotify/fsnotify/pull/620
-[#621]: https://github.com/fsnotify/fsnotify/pull/621
-[#625]: https://github.com/fsnotify/fsnotify/pull/625
-[#650]: https://github.com/fsnotify/fsnotify/pull/650
+Unreleased
+----------
+Nothing yet.
1.7.0 - 2023-10-22
------------------
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
index e4ac2a2fffdc..ea379759d51a 100644
--- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -1,7 +1,7 @@
Thank you for your interest in contributing to fsnotify! We try to review and
merge PRs in a reasonable timeframe, but please be aware that:
-- To avoid "wasted" work, please discuss changes on the issue tracker first. You
+- To avoid "wasted" work, please discus changes on the issue tracker first. You
can just send PRs, but they may end up being rejected for one reason or the
other.
@@ -20,124 +20,6 @@ platforms. Testing different platforms locally can be done with something like
Use the `-short` flag to make the "stress test" run faster.
-Writing new tests
------------------
-Scripts in the testdata directory allow creating test cases in a "shell-like"
-syntax. The basic format is:
-
- script
-
- Output:
- desired output
-
-For example:
-
- # Create a new empty file with some data.
- watch /
- echo data >/file
-
- Output:
- create /file
- write /file
-
-Just create a new file to add a new test; select which tests to run with
-`-run TestScript/[path]`.
-
-script
-------
-The script is a "shell-like" script:
-
- cmd arg arg
-
-Comments are supported with `#`:
-
- # Comment
- cmd arg arg # Comment
-
-All operations are done in a temp directory; a path like "/foo" is rewritten to
-"/tmp/TestFoo/foo".
-
-Arguments can be quoted with `"` or `'`; there are no escapes and they're
-functionally identical right now, but this may change in the future, so best to
-assume shell-like rules.
-
- touch "/file with spaces"
-
-End-of-line escapes with `\` are not supported.
-
-### Supported commands
-
- watch path [ops] # Watch the path, reporting events for it. Nothing is
- # watched by default. Optionally a list of ops can be
- # given, as with AddWith(path, WithOps(...)).
- unwatch path # Stop watching the path.
- watchlist n # Assert watchlist length.
-
- stop # Stop running the script; for debugging.
- debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
- parallel by default, so -parallel=1 is probably a good
- idea).
-
- touch path
- mkdir [-p] dir
- ln -s target link # Only ln -s supported.
- mkfifo path
- mknod dev path
- mv src dst
- rm [-r] path
- chmod mode path # Octal only
- sleep time-in-ms
-
- cat path # Read path (does nothing with the data; just reads it).
- echo str >>path # Append "str" to "path".
- echo str >path # Truncate "path" and write "str".
-
- require reason # Skip the test if "reason" is true; "skip" and
- skip reason # "require" behave identical; it supports both for
- # readability. Possible reasons are:
- #
- # always Always skip this test.
- # symlink Symlinks are supported (requires admin
- # permissions on Windows).
- # mkfifo Platform doesn't support FIFO named sockets.
- # mknod Platform doesn't support device nodes.
-
-
-output
-------
-After `Output:` the desired output is given; this is indented by convention, but
-that's not required.
-
-The format of that is:
-
- # Comment
- event path # Comment
-
- system:
- event path
- system2:
- event path
-
-Every event is one line, and any whitespace between the event and path are
-ignored. The path can optionally be surrounded in ". Anything after a "#" is
-ignored.
-
-Platform-specific tests can be added after GOOS; for example:
-
- watch /
- touch /file
-
- Output:
- # Tested if nothing else matches
- create /file
-
- # Windows-specific test.
- windows:
- write /file
-
-You can specify multiple platforms with a comma (e.g. "windows, linux:").
-"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
-
[goon]: https://github.com/arp242/goon
[Vagrant]: https://www.vagrantup.com/
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go
index c349c326c718..28497f1dd8e6 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go
@@ -1,8 +1,8 @@
//go:build solaris
+// +build solaris
-// FEN backend for illumos (supported) and Solaris (untested, but should work).
-//
-// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
package fsnotify
@@ -12,33 +12,150 @@ import (
"os"
"path/filepath"
"sync"
- "time"
- "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
-type fen struct {
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
+//
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
+
+ // Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
mu sync.Mutex
port *unix.EventPort
- done chan struct{} // Channel for sending a "quit message" to the reader goroutine
- dirs map[string]Op // Explicitly watched directories
- watches map[string]Op // Explicitly watched non-directories
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ dirs map[string]struct{} // Explicitly watched directories
+ watches map[string]struct{} // Explicitly watched non-directories
}
-func newBackend(ev chan Event, errs chan error) (backend, error) {
- return newBufferedBackend(0, ev, errs)
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ return NewBufferedWatcher(0)
}
-func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
- w := &fen{
- Events: ev,
- Errors: errs,
- dirs: make(map[string]Op),
- watches: make(map[string]Op),
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
+ w := &Watcher{
+ Events: make(chan Event, sz),
+ Errors: make(chan error),
+ dirs: make(map[string]struct{}),
+ watches: make(map[string]struct{}),
done: make(chan struct{}),
}
@@ -54,30 +171,27 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
// sendEvent attempts to send an event to the user, returning true if the event
// was put in the channel successfully and false if the watcher has been closed.
-func (w *fen) sendEvent(name string, op Op) (sent bool) {
+func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
select {
- case <-w.done:
- return false
case w.Events <- Event{Name: name, Op: op}:
return true
+ case <-w.done:
+ return false
}
}
// sendError attempts to send an error to the user, returning true if the error
// was put in the channel successfully and false if the watcher has been closed.
-func (w *fen) sendError(err error) (sent bool) {
- if err == nil {
- return true
- }
+func (w *Watcher) sendError(err error) (sent bool) {
select {
- case <-w.done:
- return false
case w.Errors <- err:
return true
+ case <-w.done:
+ return false
}
}
-func (w *fen) isClosed() bool {
+func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
@@ -86,7 +200,8 @@ func (w *fen) isClosed() bool {
}
}
-func (w *fen) Close() error {
+// Close removes all watches and closes the Events channel.
+func (w *Watcher) Close() error {
// Take the lock used by associateFile to prevent lingering events from
// being processed after the close
w.mu.Lock()
@@ -98,21 +213,60 @@ func (w *fen) Close() error {
return w.port.Close()
}
-func (w *fen) Add(name string) error { return w.AddWith(name) }
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
+//
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
-func (w *fen) AddWith(name string, opts ...addOpt) error {
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
- if debug {
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
- time.Now().Format("15:04:05.000000000"), name)
+ if w.port.PathIsWatched(name) {
+ return nil
}
- with := getOptions(opts...)
- if !w.xSupports(with.op) {
- return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
- }
+ _ = getOptions(opts...)
// Currently we resolve symlinks that were explicitly requested to be
// watched. Otherwise we would use LStat here.
@@ -129,7 +283,7 @@ func (w *fen) AddWith(name string, opts ...addOpt) error {
}
w.mu.Lock()
- w.dirs[name] = with.op
+ w.dirs[name] = struct{}{}
w.mu.Unlock()
return nil
}
@@ -140,22 +294,26 @@ func (w *fen) AddWith(name string, opts ...addOpt) error {
}
w.mu.Lock()
- w.watches[name] = with.op
+ w.watches[name] = struct{}{}
w.mu.Unlock()
return nil
}
-func (w *fen) Remove(name string) error {
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) Remove(name string) error {
if w.isClosed() {
return nil
}
if !w.port.PathIsWatched(name) {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
- if debug {
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
- time.Now().Format("15:04:05.000000000"), name)
- }
// The user has expressed an intent. Immediately remove this name from
// whichever watch list it might be in. If it's not in there the delete
@@ -188,7 +346,7 @@ func (w *fen) Remove(name string) error {
}
// readEvents contains the main loop that runs in a goroutine watching for events.
-func (w *fen) readEvents() {
+func (w *Watcher) readEvents() {
// If this function returns, the watcher has been closed and we can close
// these channels
defer func() {
@@ -224,19 +382,17 @@ func (w *fen) readEvents() {
continue
}
- if debug {
- internal.Debug(pevent.Path, pevent.Events)
- }
-
err = w.handleEvent(&pevent)
- if !w.sendError(err) {
- return
+ if err != nil {
+ if !w.sendError(err) {
+ return
+ }
}
}
}
}
-func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
+func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
files, err := os.ReadDir(path)
if err != nil {
return err
@@ -262,7 +418,7 @@ func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handle
// bitmap matches more than one event type (e.g. the file was both modified and
// had the attributes changed between when the association was created and the
// when event was returned)
-func (w *fen) handleEvent(event *unix.PortEvent) error {
+func (w *Watcher) handleEvent(event *unix.PortEvent) error {
var (
events = event.Events
path = event.Path
@@ -354,9 +510,15 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
}
if events&unix.FILE_MODIFIED != 0 {
- if fmode.IsDir() && watchedDir {
- if err := w.updateDirectory(path); err != nil {
- return err
+ if fmode.IsDir() {
+ if watchedDir {
+ if err := w.updateDirectory(path); err != nil {
+ return err
+ }
+ } else {
+ if !w.sendEvent(path, Write) {
+ return nil
+ }
}
} else {
if !w.sendEvent(path, Write) {
@@ -381,7 +543,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
return nil
}
-func (w *fen) updateDirectory(path string) error {
+func (w *Watcher) updateDirectory(path string) error {
// The directory was modified, so we must find unwatched entities and watch
// them. If something was removed from the directory, nothing will happen,
// as everything else should still be watched.
@@ -401,8 +563,10 @@ func (w *fen) updateDirectory(path string) error {
return err
}
err = w.associateFile(path, finfo, false)
- if !w.sendError(err) {
- return nil
+ if err != nil {
+ if !w.sendError(err) {
+ return nil
+ }
}
if !w.sendEvent(path, Create) {
return nil
@@ -411,7 +575,7 @@ func (w *fen) updateDirectory(path string) error {
return nil
}
-func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
+func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
if w.isClosed() {
return ErrClosed
}
@@ -429,34 +593,34 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
// cleared up that discrepancy. The most likely cause is that the event
// has fired but we haven't processed it yet.
err := w.port.DissociatePath(path)
- if err != nil && !errors.Is(err, unix.ENOENT) {
+ if err != nil && err != unix.ENOENT {
return err
}
}
-
- var events int
- if !follow {
- // Watch symlinks themselves rather than their targets unless this entry
- // is explicitly watched.
- events |= unix.FILE_NOFOLLOW
- }
- if true { // TODO: implement withOps()
- events |= unix.FILE_MODIFIED
+ // FILE_NOFOLLOW means we watch symlinks themselves rather than their
+ // targets.
+ events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
+ if follow {
+ // We *DO* follow symlinks for explicitly watched entries.
+ events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
}
- if true {
- events |= unix.FILE_ATTRIB
- }
- return w.port.AssociatePath(path, stat, events, stat.Mode())
+ return w.port.AssociatePath(path, stat,
+ events,
+ stat.Mode())
}
-func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
+func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
if !w.port.PathIsWatched(path) {
return nil
}
return w.port.DissociatePath(path)
}
-func (w *fen) WatchList() []string {
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string {
if w.isClosed() {
return nil
}
@@ -474,11 +638,3 @@ func (w *fen) WatchList() []string {
return entries
}
-
-func (w *fen) xSupports(op Op) bool {
- if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
- op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
- return false
- }
- return true
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
index 36c311694cd5..921c1c1e4012 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
@@ -1,4 +1,8 @@
//go:build linux && !appengine
+// +build linux,!appengine
+
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
package fsnotify
@@ -6,20 +10,127 @@ import (
"errors"
"fmt"
"io"
- "io/fs"
"os"
"path/filepath"
"strings"
"sync"
- "time"
"unsafe"
- "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
-type inotify struct {
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
+//
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
+
+ // Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
// Store fd here as os.File.Read() will no longer return on close after
@@ -28,26 +139,8 @@ type inotify struct {
inotifyFile *os.File
watches *watches
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
- doneMu sync.Mutex
+ closeMu sync.Mutex
doneResp chan struct{} // Channel to respond to Close
-
- // Store rename cookies in an array, with the index wrapping to 0. Almost
- // all of the time what we get is a MOVED_FROM to set the cookie and the
- // next event inotify sends will be MOVED_TO to read it. However, this is
- // not guaranteed – as described in inotify(7) – and we may get other events
- // between the two MOVED_* events (including other MOVED_* ones).
- //
- // A second issue is that moving a file outside the watched directory will
- // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
- // read and delete it. So just storing it in a map would slowly leak memory.
- //
- // Doing it like this gives us a simple fast LRU-cache that won't allocate.
- // Ten items should be more than enough for our purpose, and a loop over
- // such a short array is faster than a map access anyway (not that it hugely
- // matters since we're talking about hundreds of ns at the most, but still).
- cookies [10]koekje
- cookieIndex uint8
- cookiesMu sync.Mutex
}
type (
@@ -57,14 +150,9 @@ type (
path map[string]uint32 // pathname → wd
}
watch struct {
- wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
- flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
- path string // Watch path.
- recurse bool // Recursion with ./...?
- }
- koekje struct {
- cookie uint32
- path string
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+ path string // Watch path.
}
)
@@ -91,45 +179,23 @@ func (w *watches) add(ww *watch) {
func (w *watches) remove(wd uint32) {
w.mu.Lock()
defer w.mu.Unlock()
- watch := w.wd[wd] // Could have had Remove() called. See #616.
- if watch == nil {
- return
- }
- delete(w.path, watch.path)
+ delete(w.path, w.wd[wd].path)
delete(w.wd, wd)
}
-func (w *watches) removePath(path string) ([]uint32, error) {
+func (w *watches) removePath(path string) (uint32, bool) {
w.mu.Lock()
defer w.mu.Unlock()
- path, recurse := recursivePath(path)
wd, ok := w.path[path]
if !ok {
- return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
- }
-
- watch := w.wd[wd]
- if recurse && !watch.recurse {
- return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
+ return 0, false
}
delete(w.path, path)
delete(w.wd, wd)
- if !watch.recurse {
- return []uint32{wd}, nil
- }
- wds := make([]uint32, 0, 8)
- wds = append(wds, wd)
- for p, rwd := range w.path {
- if filepath.HasPrefix(p, path) {
- delete(w.path, p)
- delete(w.wd, rwd)
- wds = append(wds, rwd)
- }
- }
- return wds, nil
+ return wd, true
}
func (w *watches) byPath(path string) *watch {
@@ -170,11 +236,20 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error
return nil
}
-func newBackend(ev chan Event, errs chan error) (backend, error) {
- return newBufferedBackend(0, ev, errs)
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ return NewBufferedWatcher(0)
}
-func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
// I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
@@ -182,12 +257,12 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
return nil, errno
}
- w := &inotify{
- Events: ev,
- Errors: errs,
+ w := &Watcher{
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
watches: newWatches(),
+ Events: make(chan Event, sz),
+ Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
@@ -197,29 +272,26 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
}
// Returns true if the event was sent, or false if watcher is closed.
-func (w *inotify) sendEvent(e Event) bool {
+func (w *Watcher) sendEvent(e Event) bool {
select {
- case <-w.done:
- return false
case w.Events <- e:
return true
+ case <-w.done:
+ return false
}
}
// Returns true if the error was sent, or false if watcher is closed.
-func (w *inotify) sendError(err error) bool {
- if err == nil {
- return true
- }
+func (w *Watcher) sendError(err error) bool {
select {
- case <-w.done:
- return false
case w.Errors <- err:
return true
+ case <-w.done:
+ return false
}
}
-func (w *inotify) isClosed() bool {
+func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
@@ -228,14 +300,15 @@ func (w *inotify) isClosed() bool {
}
}
-func (w *inotify) Close() error {
- w.doneMu.Lock()
+// Close removes all watches and closes the Events channel.
+func (w *Watcher) Close() error {
+ w.closeMu.Lock()
if w.isClosed() {
- w.doneMu.Unlock()
+ w.closeMu.Unlock()
return nil
}
close(w.done)
- w.doneMu.Unlock()
+ w.closeMu.Unlock()
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
@@ -250,104 +323,78 @@ func (w *inotify) Close() error {
return nil
}
-func (w *inotify) Add(name string) error { return w.AddWith(name) }
-
-func (w *inotify) AddWith(path string, opts ...addOpt) error {
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
+//
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
- if debug {
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
- time.Now().Format("15:04:05.000000000"), path)
- }
-
- with := getOptions(opts...)
- if !w.xSupports(with.op) {
- return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
- }
- path, recurse := recursivePath(path)
- if recurse {
- return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
- if err != nil {
- return err
- }
- if !d.IsDir() {
- if root == path {
- return fmt.Errorf("fsnotify: not a directory: %q", path)
- }
- return nil
- }
+ name = filepath.Clean(name)
+ _ = getOptions(opts...)
- // Send a Create event when adding new directory from a recursive
- // watch; this is for "mkdir -p one/two/three". Usually all those
- // directories will be created before we can set up watchers on the
- // subdirectories, so only "one" would be sent as a Create event and
- // not "one/two" and "one/two/three" (inotifywait -r has the same
- // problem).
- if with.sendCreate && root != path {
- w.sendEvent(Event{Name: root, Op: Create})
- }
-
- return w.add(root, with, true)
- })
- }
+ var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+ unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+ unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
- return w.add(path, with, false)
-}
-
-func (w *inotify) add(path string, with withOpts, recurse bool) error {
- var flags uint32
- if with.noFollow {
- flags |= unix.IN_DONT_FOLLOW
- }
- if with.op.Has(Create) {
- flags |= unix.IN_CREATE
- }
- if with.op.Has(Write) {
- flags |= unix.IN_MODIFY
- }
- if with.op.Has(Remove) {
- flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
- }
- if with.op.Has(Rename) {
- flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
- }
- if with.op.Has(Chmod) {
- flags |= unix.IN_ATTRIB
- }
- if with.op.Has(xUnportableOpen) {
- flags |= unix.IN_OPEN
- }
- if with.op.Has(xUnportableRead) {
- flags |= unix.IN_ACCESS
- }
- if with.op.Has(xUnportableCloseWrite) {
- flags |= unix.IN_CLOSE_WRITE
- }
- if with.op.Has(xUnportableCloseRead) {
- flags |= unix.IN_CLOSE_NOWRITE
- }
- return w.register(path, flags, recurse)
-}
-
-func (w *inotify) register(path string, flags uint32, recurse bool) error {
- return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
+ return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
if existing != nil {
flags |= existing.flags | unix.IN_MASK_ADD
}
- wd, err := unix.InotifyAddWatch(w.fd, path, flags)
+ wd, err := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return nil, err
}
if existing == nil {
return &watch{
- wd: uint32(wd),
- path: path,
- flags: flags,
- recurse: recurse,
+ wd: uint32(wd),
+ path: name,
+ flags: flags,
}, nil
}
@@ -357,44 +404,49 @@ func (w *inotify) register(path string, flags uint32, recurse bool) error {
})
}
-func (w *inotify) Remove(name string) error {
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) Remove(name string) error {
if w.isClosed() {
return nil
}
- if debug {
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
- time.Now().Format("15:04:05.000000000"), name)
- }
return w.remove(filepath.Clean(name))
}
-func (w *inotify) remove(name string) error {
- wds, err := w.watches.removePath(name)
- if err != nil {
- return err
- }
-
- for _, wd := range wds {
- _, err := unix.InotifyRmWatch(w.fd, wd)
- if err != nil {
- // TODO: Perhaps it's not helpful to return an error here in every
- // case; the only two possible errors are:
- //
- // EBADF, which happens when w.fd is not a valid file descriptor of
- // any kind.
- //
- // EINVAL, which is when fd is not an inotify descriptor or wd is
- // not a valid watch descriptor. Watch descriptors are invalidated
- // when they are removed explicitly or implicitly; explicitly by
- // inotify_rm_watch, implicitly when the file they are watching is
- // deleted.
- return err
- }
+func (w *Watcher) remove(name string) error {
+ wd, ok := w.watches.removePath(name)
+ if !ok {
+ return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
+ }
+
+ success, errno := unix.InotifyRmWatch(w.fd, wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case;
+ // The only two possible errors are:
+ //
+ // - EBADF, which happens when w.fd is not a valid file descriptor
+ // of any kind.
+ // - EINVAL, which is when fd is not an inotify descriptor or wd
+ // is not a valid watch descriptor. Watch descriptors are
+ // invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they
+ // are watching is deleted.
+ return errno
}
return nil
}
-func (w *inotify) WatchList() []string {
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string {
if w.isClosed() {
return nil
}
@@ -411,7 +463,7 @@ func (w *inotify) WatchList() []string {
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
-func (w *inotify) readEvents() {
+func (w *Watcher) readEvents() {
defer func() {
close(w.doneResp)
close(w.Errors)
@@ -454,17 +506,15 @@ func (w *inotify) readEvents() {
continue
}
+ var offset uint32
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
- var offset uint32
for offset <= uint32(n-unix.SizeofInotifyEvent) {
var (
// Point "raw" to the event in the buffer
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask = uint32(raw.Mask)
nameLen = uint32(raw.Len)
- // Move to the next event in the buffer
- next = func() { offset += unix.SizeofInotifyEvent + nameLen }
)
if mask&unix.IN_Q_OVERFLOW != 0 {
@@ -473,53 +523,21 @@ func (w *inotify) readEvents() {
}
}
- /// If the event happened to the watched directory or the watched
- /// file, the kernel doesn't append the filename to the event, but
- /// we would like to always fill the the "Name" field with a valid
- /// filename. We retrieve the path of the watch from the "paths"
- /// map.
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
watch := w.watches.byWd(uint32(raw.Wd))
- /// Can be nil if Remove() was called in another goroutine for this
- /// path inbetween reading the events from the kernel and reading
- /// the internal state. Not much we can do about it, so just skip.
- /// See #616.
- if watch == nil {
- next()
- continue
- }
-
- name := watch.path
- if nameLen > 0 {
- /// Point "bytes" at the first byte of the filename
- bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
- /// The filename is padded with NULL bytes. TrimRight() gets rid of those.
- name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
- }
-
- if debug {
- internal.Debug(name, raw.Mask, raw.Cookie)
- }
-
- if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0
- next()
- continue
- }
// inotify will automatically remove the watch on deletes; just need
// to clean our state here.
- if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
w.watches.remove(watch.wd)
}
-
// We can't really update the state when a watched path is moved;
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
// the watch.
- if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
- if watch.recurse {
- next() // Do nothing
- continue
- }
-
+ if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
err := w.remove(watch.path)
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
if !w.sendError(err) {
@@ -528,69 +546,34 @@ func (w *inotify) readEvents() {
}
}
- /// Skip if we're watching both this path and the parent; the parent
- /// will already send a delete so no need to do it twice.
- if mask&unix.IN_DELETE_SELF != 0 {
- if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok {
- next()
- continue
- }
+ var name string
+ if watch != nil {
+ name = watch.path
+ }
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
- ev := w.newEvent(name, mask, raw.Cookie)
- // Need to update watch path for recurse.
- if watch.recurse {
- isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR
- /// New directory created: set up watch on it.
- if isDir && ev.Has(Create) {
- err := w.register(ev.Name, watch.flags, true)
- if !w.sendError(err) {
- return
- }
+ event := w.newEvent(name, mask)
- // This was a directory rename, so we need to update all
- // the children.
- //
- // TODO: this is of course pretty slow; we should use a
- // better data structure for storing all of this, e.g. store
- // children in the watch. I have some code for this in my
- // kqueue refactor we can use in the future. For now I'm
- // okay with this as it's not publicly available.
- // Correctness first, performance second.
- if ev.renamedFrom != "" {
- w.watches.mu.Lock()
- for k, ww := range w.watches.wd {
- if k == watch.wd || ww.path == ev.Name {
- continue
- }
- if strings.HasPrefix(ww.path, ev.renamedFrom) {
- ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
- w.watches.wd[k] = ww
- }
- }
- w.watches.mu.Unlock()
- }
+ // Send the events that are not ignored on the events channel
+ if mask&unix.IN_IGNORED == 0 {
+ if !w.sendEvent(event) {
+ return
}
}
- /// Send the events that are not ignored on the events channel
- if !w.sendEvent(ev) {
- return
- }
- next()
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + nameLen
}
}
}
-func (w *inotify) isRecursive(path string) bool {
- ww := w.watches.byPath(path)
- if ww == nil { // path could be a file, so also check the Dir.
- ww = w.watches.byPath(filepath.Dir(path))
- }
- return ww != nil && ww.recurse
-}
-
-func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
+// newEvent returns an platform-independent Event based on an inotify mask.
+func (w *Watcher) newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
@@ -601,58 +584,11 @@ func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
- if mask&unix.IN_OPEN == unix.IN_OPEN {
- e.Op |= xUnportableOpen
- }
- if mask&unix.IN_ACCESS == unix.IN_ACCESS {
- e.Op |= xUnportableRead
- }
- if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
- e.Op |= xUnportableCloseWrite
- }
- if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
- e.Op |= xUnportableCloseRead
- }
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
-
- if cookie != 0 {
- if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
- w.cookiesMu.Lock()
- w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
- w.cookieIndex++
- if w.cookieIndex > 9 {
- w.cookieIndex = 0
- }
- w.cookiesMu.Unlock()
- } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
- w.cookiesMu.Lock()
- var prev string
- for _, c := range w.cookies {
- if c.cookie == cookie {
- prev = c.path
- break
- }
- }
- w.cookiesMu.Unlock()
- e.renamedFrom = prev
- }
- }
return e
}
-
-func (w *inotify) xSupports(op Op) bool {
- return true // Supports everything.
-}
-
-func (w *inotify) state() {
- w.watches.mu.Lock()
- defer w.watches.mu.Unlock()
- for wd, ww := range w.watches.wd {
- fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
- }
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
index d8de5ab76fdd..063a0915a07a 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
@@ -1,4 +1,8 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
+// +build freebsd openbsd netbsd dragonfly darwin
+
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
package fsnotify
@@ -7,195 +11,174 @@ import (
"fmt"
"os"
"path/filepath"
- "runtime"
"sync"
- "time"
- "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
-type kqueue struct {
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
+//
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
- Errors chan error
-
- kq int // File descriptor (as returned by the kqueue() syscall).
- closepipe [2]int // Pipe used for closing kq.
- watches *watches
- done chan struct{}
- doneMu sync.Mutex
-}
-
-type (
- watches struct {
- mu sync.RWMutex
- wd map[int]watch // wd → watch
- path map[string]int // pathname → wd
- byDir map[string]map[int]struct{} // dirname(path) → wd
- seen map[string]struct{} // Keep track of if we know this file exists.
- byUser map[string]struct{} // Watches added with Watcher.Add()
- }
- watch struct {
- wd int
- name string
- linkName string // In case of links; name is the target, and this is the link.
- isDir bool
- dirFlags uint32
- }
-)
-
-func newWatches() *watches {
- return &watches{
- wd: make(map[int]watch),
- path: make(map[string]int),
- byDir: make(map[string]map[int]struct{}),
- seen: make(map[string]struct{}),
- byUser: make(map[string]struct{}),
- }
-}
-
-func (w *watches) listPaths(userOnly bool) []string {
- w.mu.RLock()
- defer w.mu.RUnlock()
-
- if userOnly {
- l := make([]string, 0, len(w.byUser))
- for p := range w.byUser {
- l = append(l, p)
- }
- return l
- }
-
- l := make([]string, 0, len(w.path))
- for p := range w.path {
- l = append(l, p)
- }
- return l
-}
-
-func (w *watches) watchesInDir(path string) []string {
- w.mu.RLock()
- defer w.mu.RUnlock()
-
- l := make([]string, 0, 4)
- for fd := range w.byDir[path] {
- info := w.wd[fd]
- if _, ok := w.byUser[info.name]; !ok {
- l = append(l, info.name)
- }
- }
- return l
-}
-
-// Mark path as added by the user.
-func (w *watches) addUserWatch(path string) {
- w.mu.Lock()
- defer w.mu.Unlock()
- w.byUser[path] = struct{}{}
-}
-
-func (w *watches) addLink(path string, fd int) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- w.path[path] = fd
- w.seen[path] = struct{}{}
-}
-
-func (w *watches) add(path, linkPath string, fd int, isDir bool) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- w.path[path] = fd
- w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir}
-
- parent := filepath.Dir(path)
- byDir, ok := w.byDir[parent]
- if !ok {
- byDir = make(map[int]struct{}, 1)
- w.byDir[parent] = byDir
- }
- byDir[fd] = struct{}{}
-}
-
-func (w *watches) byWd(fd int) (watch, bool) {
- w.mu.RLock()
- defer w.mu.RUnlock()
- info, ok := w.wd[fd]
- return info, ok
-}
-
-func (w *watches) byPath(path string) (watch, bool) {
- w.mu.RLock()
- defer w.mu.RUnlock()
- info, ok := w.wd[w.path[path]]
- return info, ok
-}
-
-func (w *watches) updateDirFlags(path string, flags uint32) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- fd := w.path[path]
- info := w.wd[fd]
- info.dirFlags = flags
- w.wd[fd] = info
-}
-
-func (w *watches) remove(fd int, path string) bool {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- isDir := w.wd[fd].isDir
- delete(w.path, path)
- delete(w.byUser, path)
-
- parent := filepath.Dir(path)
- delete(w.byDir[parent], fd)
-
- if len(w.byDir[parent]) == 0 {
- delete(w.byDir, parent)
- }
- delete(w.wd, fd)
- delete(w.seen, path)
- return isDir
-}
+ // Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
+ Errors chan error
-func (w *watches) markSeen(path string, exists bool) {
- w.mu.Lock()
- defer w.mu.Unlock()
- if exists {
- w.seen[path] = struct{}{}
- } else {
- delete(w.seen, path)
- }
+ done chan struct{}
+ kq int // File descriptor (as returned by the kqueue() syscall).
+ closepipe [2]int // Pipe used for closing.
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Watched file descriptors (key: path).
+ watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)).
+ userWatches map[string]struct{} // Watches added with Watcher.Add()
+ dirFlags map[string]uint32 // Watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // File descriptors to path names for processing kqueue events.
+ fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
}
-func (w *watches) seenBefore(path string) bool {
- w.mu.RLock()
- defer w.mu.RUnlock()
- _, ok := w.seen[path]
- return ok
+type pathInfo struct {
+ name string
+ isDir bool
}
-func newBackend(ev chan Event, errs chan error) (backend, error) {
- return newBufferedBackend(0, ev, errs)
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ return NewBufferedWatcher(0)
}
-func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
}
- w := &kqueue{
- Events: ev,
- Errors: errs,
- kq: kq,
- closepipe: closepipe,
- done: make(chan struct{}),
- watches: newWatches(),
+ w := &Watcher{
+ kq: kq,
+ closepipe: closepipe,
+ watches: make(map[string]int),
+ watchesByDir: make(map[string]map[int]struct{}),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]struct{}),
+ userWatches: make(map[string]struct{}),
+ Events: make(chan Event, sz),
+ Errors: make(chan error),
+ done: make(chan struct{}),
}
go w.readEvents()
@@ -220,8 +203,6 @@ func newKqueue() (kq int, closepipe [2]int, err error) {
unix.Close(kq)
return kq, closepipe, err
}
- unix.CloseOnExec(closepipe[0])
- unix.CloseOnExec(closepipe[1])
// Register changes to listen on the closepipe.
changes := make([]unix.Kevent_t, 1)
@@ -240,108 +221,166 @@ func newKqueue() (kq int, closepipe [2]int, err error) {
}
// Returns true if the event was sent, or false if watcher is closed.
-func (w *kqueue) sendEvent(e Event) bool {
+func (w *Watcher) sendEvent(e Event) bool {
select {
- case <-w.done:
- return false
case w.Events <- e:
return true
+ case <-w.done:
+ return false
}
}
// Returns true if the error was sent, or false if watcher is closed.
-func (w *kqueue) sendError(err error) bool {
- if err == nil {
- return true
- }
+func (w *Watcher) sendError(err error) bool {
select {
- case <-w.done:
- return false
case w.Errors <- err:
return true
- }
-}
-
-func (w *kqueue) isClosed() bool {
- select {
case <-w.done:
- return true
- default:
return false
}
}
-func (w *kqueue) Close() error {
- w.doneMu.Lock()
- if w.isClosed() {
- w.doneMu.Unlock()
+// Close removes all watches and closes the Events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
return nil
}
- close(w.done)
- w.doneMu.Unlock()
+ w.isClosed = true
- pathsToRemove := w.watches.listPaths(false)
+ // copy paths to remove while locked
+ pathsToRemove := make([]string, 0, len(w.watches))
+ for name := range w.watches {
+ pathsToRemove = append(pathsToRemove, name)
+ }
+ w.mu.Unlock() // Unlock before calling Remove, which also locks
for _, name := range pathsToRemove {
w.Remove(name)
}
// Send "quit" message to the reader goroutine.
unix.Close(w.closepipe[1])
+ close(w.done)
+
return nil
}
-func (w *kqueue) Add(name string) error { return w.AddWith(name) }
-
-func (w *kqueue) AddWith(name string, opts ...addOpt) error {
- if debug {
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
- time.Now().Format("15:04:05.000000000"), name)
- }
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
+//
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
- with := getOptions(opts...)
- if !w.xSupports(with.op) {
- return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
- }
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+ _ = getOptions(opts...)
+ w.mu.Lock()
+ w.userWatches[name] = struct{}{}
+ w.mu.Unlock()
_, err := w.addWatch(name, noteAllEvents)
- if err != nil {
- return err
- }
- w.watches.addUserWatch(name)
- return nil
+ return err
}
-func (w *kqueue) Remove(name string) error {
- if debug {
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
- time.Now().Format("15:04:05.000000000"), name)
- }
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) Remove(name string) error {
return w.remove(name, true)
}
-func (w *kqueue) remove(name string, unwatchFiles bool) error {
- if w.isClosed() {
+func (w *Watcher) remove(name string, unwatchFiles bool) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
return nil
}
-
- name = filepath.Clean(name)
- info, ok := w.watches.byPath(name)
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
- err := w.register([]int{info.wd}, unix.EV_DELETE, 0)
+ err := w.register([]int{watchfd}, unix.EV_DELETE, 0)
if err != nil {
return err
}
- unix.Close(info.wd)
+ unix.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.userWatches, name)
+
+ parentName := filepath.Dir(name)
+ delete(w.watchesByDir[parentName], watchfd)
+
+ if len(w.watchesByDir[parentName]) == 0 {
+ delete(w.watchesByDir, parentName)
+ }
- isDir := w.watches.remove(info.wd, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ delete(w.fileExists, name)
+ w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
if unwatchFiles && isDir {
- pathsToRemove := w.watches.watchesInDir(name)
+ var pathsToRemove []string
+ w.mu.Lock()
+ for fd := range w.watchesByDir[name] {
+ path := w.paths[fd]
+ if _, ok := w.userWatches[path.name]; !ok {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ w.mu.Unlock()
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error to
// the user, as that will just confuse them with an error about a
@@ -352,11 +391,23 @@ func (w *kqueue) remove(name string, unwatchFiles bool) error {
return nil
}
-func (w *kqueue) WatchList() []string {
- if w.isClosed() {
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if w.isClosed {
return nil
}
- return w.watches.listPaths(true)
+
+ entries := make([]string, 0, len(w.userWatches))
+ for pathname := range w.userWatches {
+ entries = append(entries, pathname)
+ }
+
+ return entries
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
@@ -366,26 +417,34 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un
// described in kevent(2).
//
// Returns the real path to the file which was added, with symlinks resolved.
-func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
- if w.isClosed() {
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+ var isDir bool
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
return "", ErrClosed
}
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
- name = filepath.Clean(name)
-
- info, alreadyWatching := w.watches.byPath(name)
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
- // Don't watch sockets or named pipes.
+ // Don't watch sockets or named pipes
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
return "", nil
}
- // Follow symlinks.
+ // Follow Symlinks.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
link, err := os.Readlink(name)
if err != nil {
@@ -396,15 +455,18 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
return "", nil
}
- _, alreadyWatching = w.watches.byPath(link)
+ w.mu.Lock()
+ _, alreadyWatching = w.watches[link]
+ w.mu.Unlock()
+
if alreadyWatching {
// Add to watches so we don't get spurious Create events later
// on when we diff the directories.
- w.watches.addLink(name, 0)
+ w.watches[name] = 0
+ w.fileExists[name] = struct{}{}
return link, nil
}
- info.linkName = name
name = link
fi, err = os.Lstat(name)
if err != nil {
@@ -415,7 +477,7 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
// Retry on EINTR; open() can return EINTR in practice on macOS.
// See #354, and Go issues 11180 and 39237.
for {
- info.wd, err = unix.Open(name, openMode, 0)
+ watchfd, err = unix.Open(name, openMode, 0)
if err == nil {
break
}
@@ -426,25 +488,40 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
return "", err
}
- info.isDir = fi.IsDir()
+ isDir = fi.IsDir()
}
- err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
+ err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
if err != nil {
- unix.Close(info.wd)
+ unix.Close(watchfd)
return "", err
}
if !alreadyWatching {
- w.watches.add(name, info.linkName, info.wd, info.isDir)
+ w.mu.Lock()
+ parentName := filepath.Dir(name)
+ w.watches[name] = watchfd
+
+ watchesByDir, ok := w.watchesByDir[parentName]
+ if !ok {
+ watchesByDir = make(map[int]struct{}, 1)
+ w.watchesByDir[parentName] = watchesByDir
+ }
+ watchesByDir[watchfd] = struct{}{}
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
}
- // Watch the directory if it has not been watched before, or if it was
- // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
- if info.isDir {
+ if isDir {
+ // Watch the directory if it has not been watched before, or if it was
+ // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
- (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
- w.watches.updateDirFlags(name, flags)
+ (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
@@ -457,7 +534,7 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
-func (w *kqueue) readEvents() {
+func (w *Watcher) readEvents() {
defer func() {
close(w.Events)
close(w.Errors)
@@ -466,65 +543,50 @@ func (w *kqueue) readEvents() {
}()
eventBuffer := make([]unix.Kevent_t, 10)
- for {
+ for closed := false; !closed; {
kevents, err := w.read(eventBuffer)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
- return
+ closed = true
}
+ continue
}
+ // Flush the events we received to the Events channel
for _, kevent := range kevents {
var (
- wd = int(kevent.Ident)
- mask = uint32(kevent.Fflags)
+ watchfd = int(kevent.Ident)
+ mask = uint32(kevent.Fflags)
)
// Shut down the loop when the pipe is closed, but only after all
// other events have been processed.
- if wd == w.closepipe[0] {
- return
- }
-
- path, ok := w.watches.byWd(wd)
- if debug {
- internal.Debug(path.name, &kevent)
- }
-
- // On macOS it seems that sometimes an event with Ident=0 is
- // delivered, and no other flags/information beyond that, even
- // though we never saw such a file descriptor. For example in
- // TestWatchSymlink/277 (usually at the end, but sometimes sooner):
- //
- // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent)
- // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
- // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
- //
- // The first is a normal event, the second with Ident 0. No error
- // flag, no data, no ... nothing.
- //
- // I read a bit through bsd/kern_event.c from the xnu source, but I
- // don't really see an obvious location where this is triggered –
- // this doesn't seem intentional, but idk...
- //
- // Technically fd 0 is a valid descriptor, so only skip it if
- // there's no path, and if we're on macOS.
- if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" {
+ if watchfd == w.closepipe[0] {
+ closed = true
continue
}
- event := w.newEvent(path.name, path.linkName, mask)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+
+ event := w.newEvent(path.name, mask)
if event.Has(Rename) || event.Has(Remove) {
w.remove(event.Name, false)
- w.watches.markSeen(event.Name, false)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
}
if path.isDir && event.Has(Write) && !event.Has(Remove) {
- w.dirChange(event.Name)
- } else if !w.sendEvent(event) {
- return
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ if !w.sendEvent(event) {
+ closed = true
+ continue
+ }
}
if event.Has(Remove) {
@@ -532,34 +594,25 @@ func (w *kqueue) readEvents() {
// mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
- _, found := w.watches.byPath(fileDir)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
if found {
- // TODO: this branch is never triggered in any test.
- // Added in d6220df (2012).
- // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111
- //
- // I don't really get how this can be triggered either.
- // And it wasn't triggered in the patch that added it,
- // either.
- //
- // Original also had a comment:
- // make sure the directory exists before we watch for
- // changes. When we do a recursive watch and perform
- // rm -rf, the parent directory might have gone
- // missing, ignore the missing directory and let the
- // upcoming delete event remove the watch from the
- // parent directory.
- err := w.dirChange(fileDir)
- if !w.sendError(err) {
- return
+ err := w.sendDirectoryChangeEvents(fileDir)
+ if err != nil {
+ if !w.sendError(err) {
+ closed = true
+ }
}
}
} else {
- path := filepath.Clean(event.Name)
- if fi, err := os.Lstat(path); err == nil {
- err := w.sendCreateIfNew(path, fi)
- if !w.sendError(err) {
- return
+ filePath := filepath.Clean(event.Name)
+ if fi, err := os.Lstat(filePath); err == nil {
+ err := w.sendFileCreatedEventIfNew(filePath, fi)
+ if err != nil {
+ if !w.sendError(err) {
+ closed = true
+ }
}
}
}
@@ -569,14 +622,8 @@ func (w *kqueue) readEvents() {
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
-func (w *kqueue) newEvent(name, linkName string, mask uint32) Event {
+func (w *Watcher) newEvent(name string, mask uint32) Event {
e := Event{Name: name}
- if linkName != "" {
- // If the user watched "/path/link" then emit events as "/path/link"
- // rather than "/path/target".
- e.Name = linkName
- }
-
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
@@ -598,7 +645,8 @@ func (w *kqueue) newEvent(name, linkName string, mask uint32) Event {
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
-func (w *kqueue) watchDirectoryFiles(dirPath string) error {
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
files, err := os.ReadDir(dirPath)
if err != nil {
return err
@@ -626,7 +674,9 @@ func (w *kqueue) watchDirectoryFiles(dirPath string) error {
}
}
- w.watches.markSeen(cleanPath, true)
+ w.mu.Lock()
+ w.fileExists[cleanPath] = struct{}{}
+ w.mu.Unlock()
}
return nil
@@ -636,7 +686,7 @@ func (w *kqueue) watchDirectoryFiles(dirPath string) error {
//
// This functionality is to have the BSD watcher match the inotify, which sends
// a create event for files created in a watched directory.
-func (w *kqueue) dirChange(dir string) error {
+func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
files, err := os.ReadDir(dir)
if err != nil {
// Directory no longer exists: we can ignore this safely. kqueue will
@@ -644,51 +694,61 @@ func (w *kqueue) dirChange(dir string) error {
if errors.Is(err, os.ErrNotExist) {
return nil
}
- return fmt.Errorf("fsnotify.dirChange: %w", err)
+ return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
for _, f := range files {
fi, err := f.Info()
if err != nil {
- return fmt.Errorf("fsnotify.dirChange: %w", err)
+ return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
- err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
+ err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
// Don't need to send an error if this file isn't readable.
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
return nil
}
- return fmt.Errorf("fsnotify.dirChange: %w", err)
+ return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
}
return nil
}
-// Send a create event if the file isn't already being tracked, and start
-// watching this file.
-func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error {
- if !w.watches.seenBefore(path) {
- if !w.sendEvent(Event{Name: path, Op: Create}) {
- return nil
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) {
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ if !w.sendEvent(Event{Name: filePath, Op: Create}) {
+ return
}
}
- // Like watchDirectoryFiles, but without doing another ReadDir.
- path, err := w.internalWatch(path, fi)
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ filePath, err = w.internalWatch(filePath, fi)
if err != nil {
return err
}
- w.watches.markSeen(path, true)
+
+ w.mu.Lock()
+ w.fileExists[filePath] = struct{}{}
+ w.mu.Unlock()
+
return nil
}
-func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
+func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
// mimic Linux providing delete events for subdirectories, but preserve
// the flags used if currently watching subdirectory
- info, _ := w.watches.byPath(name)
- return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME)
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+ return w.addWatch(name, flags)
}
// watch file to mimic Linux inotify
@@ -696,7 +756,7 @@ func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
}
// Register events with the queue.
-func (w *kqueue) register(fds []int, flags int, fflags uint32) error {
+func (w *Watcher) register(fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types.
@@ -713,21 +773,10 @@ func (w *kqueue) register(fds []int, flags int, fflags uint32) error {
}
// read retrieves pending events, or waits until an event occurs.
-func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
+func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(w.kq, nil, events, nil)
if err != nil {
return nil, err
}
return events[0:n], nil
}
-
-func (w *kqueue) xSupports(op Op) bool {
- if runtime.GOOS == "freebsd" {
- //return true // Supports everything.
- }
- if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
- op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
- return false
- }
- return true
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go
index 5eb5dbc66f26..d34a23c015f8 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_other.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go
@@ -1,23 +1,205 @@
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
+// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
+
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
package fsnotify
import "errors"
-type other struct {
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
+//
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
+
+ // Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
}
-func newBackend(ev chan Event, errs chan error) (backend, error) {
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
return nil, errors.New("fsnotify not supported on the current platform")
}
-func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
- return newBackend(ev, errs)
-}
-func (w *other) Close() error { return nil }
-func (w *other) WatchList() []string { return nil }
-func (w *other) Add(name string) error { return nil }
-func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
-func (w *other) Remove(name string) error { return nil }
-func (w *other) xSupports(op Op) bool { return false }
+
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
+
+// Close removes all watches and closes the Events channel.
+func (w *Watcher) Close() error { return nil }
+
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string { return nil }
+
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
+//
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return nil }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
+
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) Remove(name string) error { return nil }
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go
index c54a63083835..9bc91e5d613f 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go
@@ -1,8 +1,12 @@
//go:build windows
+// +build windows
// Windows backend based on ReadDirectoryChangesW()
//
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
+//
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
package fsnotify
@@ -15,15 +19,123 @@ import (
"runtime"
"strings"
"sync"
- "time"
"unsafe"
- "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/windows"
)
-type readDirChangesW struct {
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
+//
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
+
+ // Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
port windows.Handle // Handle to completion port
@@ -35,40 +147,48 @@ type readDirChangesW struct {
closed bool // Set to true when Close() is first called
}
-func newBackend(ev chan Event, errs chan error) (backend, error) {
- return newBufferedBackend(50, ev, errs)
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ return NewBufferedWatcher(50)
}
-func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
}
- w := &readDirChangesW{
- Events: ev,
- Errors: errs,
+ w := &Watcher{
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
+ Events: make(chan Event, sz),
+ Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
-func (w *readDirChangesW) isClosed() bool {
+func (w *Watcher) isClosed() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.closed
}
-func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
}
event := w.newEvent(name, uint32(mask))
- event.renamedFrom = renamedFrom
select {
case ch := <-w.quit:
w.quit <- ch
@@ -78,19 +198,17 @@ func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool
}
// Returns true if the error was sent, or false if watcher is closed.
-func (w *readDirChangesW) sendError(err error) bool {
- if err == nil {
- return true
- }
+func (w *Watcher) sendError(err error) bool {
select {
case w.Errors <- err:
return true
case <-w.quit:
- return false
}
+ return false
}
-func (w *readDirChangesW) Close() error {
+// Close removes all watches and closes the Events channel.
+func (w *Watcher) Close() error {
if w.isClosed() {
return nil
}
@@ -108,21 +226,57 @@ func (w *readDirChangesW) Close() error {
return <-ch
}
-func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
+//
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
-func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
- if debug {
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
- time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
- }
with := getOptions(opts...)
- if !w.xSupports(with.op) {
- return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
- }
if with.bufsize < 4096 {
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
}
@@ -141,14 +295,18 @@ func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
return <-in.reply
}
-func (w *readDirChangesW) Remove(name string) error {
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) Remove(name string) error {
if w.isClosed() {
return nil
}
- if debug {
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
- time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
- }
in := &input{
op: opRemoveWatch,
@@ -162,7 +320,11 @@ func (w *readDirChangesW) Remove(name string) error {
return <-in.reply
}
-func (w *readDirChangesW) WatchList() []string {
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string {
if w.isClosed() {
return nil
}
@@ -173,13 +335,7 @@ func (w *readDirChangesW) WatchList() []string {
entries := make([]string, 0, len(w.watches))
for _, entry := range w.watches {
for _, watchEntry := range entry {
- for name := range watchEntry.names {
- entries = append(entries, filepath.Join(watchEntry.path, name))
- }
- // the directory itself is being watched
- if watchEntry.mask != 0 {
- entries = append(entries, watchEntry.path)
- }
+ entries = append(entries, watchEntry.path)
}
}
@@ -205,7 +361,7 @@ const (
sysFSIGNORED = 0x8000
)
-func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
+func (w *Watcher) newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
@@ -261,7 +417,7 @@ type (
watchMap map[uint32]indexMap
)
-func (w *readDirChangesW) wakeupReader() error {
+func (w *Watcher) wakeupReader() error {
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if err != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", err)
@@ -269,7 +425,7 @@ func (w *readDirChangesW) wakeupReader() error {
return nil
}
-func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
+func (w *Watcher) getDir(pathname string) (dir string, err error) {
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
if err != nil {
return "", os.NewSyscallError("GetFileAttributes", err)
@@ -283,7 +439,7 @@ func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
return
}
-func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
+func (w *Watcher) getIno(path string) (ino *inode, err error) {
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
windows.FILE_LIST_DIRECTORY,
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
@@ -326,8 +482,9 @@ func (m watchMap) set(ino *inode, watch *watch) {
}
// Must run within the I/O thread.
-func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
- pathname, recurse := recursivePath(pathname)
+func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
+ //pathname, recurse := recursivePath(pathname)
+ recurse := false
dir, err := w.getDir(pathname)
if err != nil {
@@ -381,7 +538,7 @@ func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) e
}
// Must run within the I/O thread.
-func (w *readDirChangesW) remWatch(pathname string) error {
+func (w *Watcher) remWatch(pathname string) error {
pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname)
@@ -409,11 +566,11 @@ func (w *readDirChangesW) remWatch(pathname string) error {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
}
if pathname == dir {
- w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
- w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
+ w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
@@ -421,23 +578,23 @@ func (w *readDirChangesW) remWatch(pathname string) error {
}
// Must run within the I/O thread.
-func (w *readDirChangesW) deleteWatch(watch *watch) {
+func (w *Watcher) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
- w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
+ w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
- w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
-func (w *readDirChangesW) startRead(watch *watch) error {
+func (w *Watcher) startRead(watch *watch) error {
err := windows.CancelIo(watch.ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CancelIo", err))
@@ -467,7 +624,7 @@ func (w *readDirChangesW) startRead(watch *watch) error {
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
- w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
err = nil
}
w.deleteWatch(watch)
@@ -480,7 +637,7 @@ func (w *readDirChangesW) startRead(watch *watch) error {
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
-func (w *readDirChangesW) readEvents() {
+func (w *Watcher) readEvents() {
var (
n uint32
key uintptr
@@ -543,7 +700,7 @@ func (w *readDirChangesW) readEvents() {
}
case windows.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
- w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
@@ -576,10 +733,6 @@ func (w *readDirChangesW) readEvents() {
name := windows.UTF16ToString(buf)
fullname := filepath.Join(watch.path, name)
- if debug {
- internal.Debug(fullname, raw.Action)
- }
-
var mask uint64
switch raw.Action {
case windows.FILE_ACTION_REMOVED:
@@ -608,22 +761,21 @@ func (w *readDirChangesW) readEvents() {
}
}
+ sendNameEvent := func() {
+ w.sendEvent(fullname, watch.names[name]&mask)
+ }
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
- w.sendEvent(fullname, "", watch.names[name]&mask)
+ sendNameEvent()
}
if raw.Action == windows.FILE_ACTION_REMOVED {
- w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
+ w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
- if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
- w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
- } else {
- w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
- }
-
+ w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
- w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
+ fullname = filepath.Join(watch.path, watch.rename)
+ sendNameEvent()
}
// Move to the next event in the buffer
@@ -635,7 +787,8 @@ func (w *readDirChangesW) readEvents() {
// Error!
if offset >= n {
//lint:ignore ST1005 Windows should be capitalized
- w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
+ w.sendError(errors.New(
+ "Windows system assumed buffer larger than it is, events have likely been missed"))
break
}
}
@@ -646,7 +799,7 @@ func (w *readDirChangesW) readEvents() {
}
}
-func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
+func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSMODIFY != 0 {
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
@@ -657,7 +810,7 @@ func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
return m
}
-func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
+func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
switch action {
case windows.FILE_ACTION_ADDED:
return sysFSCREATE
@@ -672,11 +825,3 @@ func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
}
return 0
}
-
-func (w *readDirChangesW) xSupports(op Op) bool {
- if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
- op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
- return false
- }
- return true
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
index 0760efe91600..24c99cc4999e 100644
--- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -3,146 +3,19 @@
//
// Currently supported systems:
//
-// - Linux via inotify
-// - BSD, macOS via kqueue
-// - Windows via ReadDirectoryChangesW
-// - illumos via FEN
-//
-// # FSNOTIFY_DEBUG
-//
-// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
-// stderr. This can be useful to track down some problems, especially in cases
-// where fsnotify is used as an indirect dependency.
-//
-// Every event will be printed as soon as there's something useful to print,
-// with as little processing from fsnotify.
-//
-// Example output:
-//
-// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
-// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
-// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
+// Linux 2.6.32+ via inotify
+// BSD, macOS via kqueue
+// Windows via ReadDirectoryChangesW
+// illumos via FEN
package fsnotify
import (
"errors"
"fmt"
- "os"
"path/filepath"
"strings"
)
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\\path\\to\\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all files, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- b backend
-
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
- Events chan Event
-
- // Errors sends any errors.
- Errors chan error
-}
-
// Event represents a file system notification.
type Event struct {
// Path to the file or directory.
@@ -157,16 +30,6 @@ type Event struct {
// This is a bitmask and some systems may send multiple operations at once.
// Use the Event.Has() method instead of comparing with ==.
Op Op
-
- // Create events will have this set to the old path if it's a rename. This
- // only works when both the source and destination are watched. It's not
- // reliable when watching individual files, only directories.
- //
- // For example "mv /tmp/file /tmp/rename" will emit:
- //
- // Event{Op: Rename, Name: "/tmp/file"}
- // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
- renamedFrom string
}
// Op describes a set of file operations.
@@ -187,7 +50,7 @@ const (
// example "remove to trash" is often a rename).
Remove
- // The path was renamed to something else; any watches on it will be
+ // The path was renamed to something else; any watched on it will be
// removed.
Rename
@@ -197,155 +60,15 @@ const (
// get triggered very frequently by some software. For example, Spotlight
// indexing on macOS, anti-virus software, backup software, etc.
Chmod
-
- // File descriptor was opened.
- //
- // Only works on Linux and FreeBSD.
- xUnportableOpen
-
- // File was read from.
- //
- // Only works on Linux and FreeBSD.
- xUnportableRead
-
- // File opened for writing was closed.
- //
- // Only works on Linux and FreeBSD.
- //
- // The advantage of using this over Write is that it's more reliable than
- // waiting for Write events to stop. It's also faster (if you're not
- // listening to Write events): copying a file of a few GB can easily
- // generate tens of thousands of Write events in a short span of time.
- xUnportableCloseWrite
-
- // File opened for reading was closed.
- //
- // Only works on Linux and FreeBSD.
- xUnportableCloseRead
)
+// Common errors that can be reported.
var (
- // ErrNonExistentWatch is used when Remove() is called on a path that's not
- // added.
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
-
- // ErrClosed is used when trying to operate on a closed Watcher.
- ErrClosed = errors.New("fsnotify: watcher already closed")
-
- // ErrEventOverflow is reported from the Errors channel when there are too
- // many events:
- //
- // - inotify: inotify returns IN_Q_OVERFLOW – because there are too
- // many queued events (the fs.inotify.max_queued_events
- // sysctl can be used to increase this).
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
- ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
-
- // ErrUnsupported is returned by AddWith() when WithOps() specified an
- // Unportable event that's not supported on this platform.
- xErrUnsupported = errors.New("fsnotify: not supported with this backend")
+ ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
+ ErrClosed = errors.New("fsnotify: watcher already closed")
)
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- ev, errs := make(chan Event), make(chan error)
- b, err := newBackend(ev, errs)
- if err != nil {
- return nil, err
- }
- return &Watcher{b: b, Events: ev, Errors: errs}, nil
-}
-
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) {
- ev, errs := make(chan Event), make(chan error)
- b, err := newBufferedBackend(sz, ev, errs)
- if err != nil {
- return nil, err
- }
- return &Watcher{b: b, Events: ev, Errors: errs}, nil
-}
-
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(path string) error { return w.b.Add(path) }
-
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
-
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
-
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error { return w.b.Close() }
-
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string { return w.b.WatchList() }
-
-// Supports reports if all the listed operations are supported by this platform.
-//
-// Create, Write, Remove, Rename, and Chmod are always supported. It can only
-// return false for an Op starting with Unportable.
-func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
-
func (o Op) String() string {
var b strings.Builder
if o.Has(Create) {
@@ -357,18 +80,6 @@ func (o Op) String() string {
if o.Has(Write) {
b.WriteString("|WRITE")
}
- if o.Has(xUnportableOpen) {
- b.WriteString("|OPEN")
- }
- if o.Has(xUnportableRead) {
- b.WriteString("|READ")
- }
- if o.Has(xUnportableCloseWrite) {
- b.WriteString("|CLOSE_WRITE")
- }
- if o.Has(xUnportableCloseRead) {
- b.WriteString("|CLOSE_READ")
- }
if o.Has(Rename) {
b.WriteString("|RENAME")
}
@@ -389,48 +100,24 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
// String returns a string representation of the event with their path.
func (e Event) String() string {
- if e.renamedFrom != "" {
- return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
- }
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
}
type (
- backend interface {
- Add(string) error
- AddWith(string, ...addOpt) error
- Remove(string) error
- WatchList() []string
- Close() error
- xSupports(Op) bool
- }
addOpt func(opt *withOpts)
withOpts struct {
- bufsize int
- op Op
- noFollow bool
- sendCreate bool
+ bufsize int
}
)
-var debug = func() bool {
- // Check for exactly "1" (rather than mere existence) so we can add
- // options/flags in the future. I don't know if we ever want that, but it's
- // nice to leave the option open.
- return os.Getenv("FSNOTIFY_DEBUG") == "1"
-}()
-
var defaultOpts = withOpts{
bufsize: 65536, // 64K
- op: Create | Write | Remove | Rename | Chmod,
}
func getOptions(opts ...addOpt) withOpts {
with := defaultOpts
for _, o := range opts {
- if o != nil {
- o(&with)
- }
+ o(&with)
}
return with
}
@@ -449,44 +136,9 @@ func WithBufferSize(bytes int) addOpt {
return func(opt *withOpts) { opt.bufsize = bytes }
}
-// WithOps sets which operations to listen for. The default is [Create],
-// [Write], [Remove], [Rename], and [Chmod].
-//
-// Excluding operations you're not interested in can save quite a bit of CPU
-// time; in some use cases there may be hundreds of thousands of useless Write
-// or Chmod operations per second.
-//
-// This can also be used to add unportable operations not supported by all
-// platforms; unportable operations all start with "Unportable":
-// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
-// [UnportableCloseRead].
-//
-// AddWith returns an error when using an unportable operation that's not
-// supported. Use [Watcher.Support] to check for support.
-func withOps(op Op) addOpt {
- return func(opt *withOpts) { opt.op = op }
-}
-
-// WithNoFollow disables following symlinks, so the symlinks themselves are
-// watched.
-func withNoFollow() addOpt {
- return func(opt *withOpts) { opt.noFollow = true }
-}
-
-// "Internal" option for recursive watches on inotify.
-func withCreate() addOpt {
- return func(opt *withOpts) { opt.sendCreate = true }
-}
-
-var enableRecurse = false
-
// Check if this path is recursive (ends with "/..." or "\..."), and return the
// path with the /... stripped.
func recursivePath(path string) (string, bool) {
- path = filepath.Clean(path)
- if !enableRecurse { // Only enabled in tests for now.
- return path, false
- }
if filepath.Base(path) == "..." {
return filepath.Dir(path), true
}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go
deleted file mode 100644
index b0eab10090d3..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go
+++ /dev/null
@@ -1,39 +0,0 @@
-//go:build darwin
-
-package internal
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-var (
- SyscallEACCES = syscall.EACCES
- UnixEACCES = unix.EACCES
-)
-
-var maxfiles uint64
-
-// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
-func SetRlimit() {
- var l syscall.Rlimit
- err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
- if err == nil && l.Cur != l.Max {
- l.Cur = l.Max
- syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
- }
- maxfiles = l.Cur
-
- if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
- maxfiles = uint64(n)
- }
-
- if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
- maxfiles = uint64(n)
- }
-}
-
-func Maxfiles() uint64 { return maxfiles }
-func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
-func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
deleted file mode 100644
index 928319fb09ab..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package internal
-
-import "golang.org/x/sys/unix"
-
-var names = []struct {
- n string
- m uint32
-}{
- {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE},
- {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
- {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND},
- {"NOTE_CHILD", unix.NOTE_CHILD},
- {"NOTE_CRITICAL", unix.NOTE_CRITICAL},
- {"NOTE_DELETE", unix.NOTE_DELETE},
- {"NOTE_EXEC", unix.NOTE_EXEC},
- {"NOTE_EXIT", unix.NOTE_EXIT},
- {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS},
- {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR},
- {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL},
- {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL},
- {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK},
- {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY},
- {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED},
- {"NOTE_EXTEND", unix.NOTE_EXTEND},
- {"NOTE_FFAND", unix.NOTE_FFAND},
- {"NOTE_FFCOPY", unix.NOTE_FFCOPY},
- {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
- {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
- {"NOTE_FFNOP", unix.NOTE_FFNOP},
- {"NOTE_FFOR", unix.NOTE_FFOR},
- {"NOTE_FORK", unix.NOTE_FORK},
- {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK},
- {"NOTE_LEEWAY", unix.NOTE_LEEWAY},
- {"NOTE_LINK", unix.NOTE_LINK},
- {"NOTE_LOWAT", unix.NOTE_LOWAT},
- {"NOTE_MACHTIME", unix.NOTE_MACHTIME},
- {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME},
- {"NOTE_NONE", unix.NOTE_NONE},
- {"NOTE_NSECONDS", unix.NOTE_NSECONDS},
- {"NOTE_OOB", unix.NOTE_OOB},
- //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!)
- {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
- {"NOTE_REAP", unix.NOTE_REAP},
- {"NOTE_RENAME", unix.NOTE_RENAME},
- {"NOTE_REVOKE", unix.NOTE_REVOKE},
- {"NOTE_SECONDS", unix.NOTE_SECONDS},
- {"NOTE_SIGNAL", unix.NOTE_SIGNAL},
- {"NOTE_TRACK", unix.NOTE_TRACK},
- {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
- {"NOTE_TRIGGER", unix.NOTE_TRIGGER},
- {"NOTE_USECONDS", unix.NOTE_USECONDS},
- {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR},
- {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE},
- {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE},
- {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE},
- {"NOTE_WRITE", unix.NOTE_WRITE},
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
deleted file mode 100644
index 3186b0c3491d..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package internal
-
-import "golang.org/x/sys/unix"
-
-var names = []struct {
- n string
- m uint32
-}{
- {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
- {"NOTE_CHILD", unix.NOTE_CHILD},
- {"NOTE_DELETE", unix.NOTE_DELETE},
- {"NOTE_EXEC", unix.NOTE_EXEC},
- {"NOTE_EXIT", unix.NOTE_EXIT},
- {"NOTE_EXTEND", unix.NOTE_EXTEND},
- {"NOTE_FFAND", unix.NOTE_FFAND},
- {"NOTE_FFCOPY", unix.NOTE_FFCOPY},
- {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
- {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
- {"NOTE_FFNOP", unix.NOTE_FFNOP},
- {"NOTE_FFOR", unix.NOTE_FFOR},
- {"NOTE_FORK", unix.NOTE_FORK},
- {"NOTE_LINK", unix.NOTE_LINK},
- {"NOTE_LOWAT", unix.NOTE_LOWAT},
- {"NOTE_OOB", unix.NOTE_OOB},
- {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
- {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
- {"NOTE_RENAME", unix.NOTE_RENAME},
- {"NOTE_REVOKE", unix.NOTE_REVOKE},
- {"NOTE_TRACK", unix.NOTE_TRACK},
- {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
- {"NOTE_TRIGGER", unix.NOTE_TRIGGER},
- {"NOTE_WRITE", unix.NOTE_WRITE},
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
deleted file mode 100644
index f69fdb930f5f..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package internal
-
-import "golang.org/x/sys/unix"
-
-var names = []struct {
- n string
- m uint32
-}{
- {"NOTE_ABSTIME", unix.NOTE_ABSTIME},
- {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
- {"NOTE_CHILD", unix.NOTE_CHILD},
- {"NOTE_CLOSE", unix.NOTE_CLOSE},
- {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE},
- {"NOTE_DELETE", unix.NOTE_DELETE},
- {"NOTE_EXEC", unix.NOTE_EXEC},
- {"NOTE_EXIT", unix.NOTE_EXIT},
- {"NOTE_EXTEND", unix.NOTE_EXTEND},
- {"NOTE_FFAND", unix.NOTE_FFAND},
- {"NOTE_FFCOPY", unix.NOTE_FFCOPY},
- {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
- {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
- {"NOTE_FFNOP", unix.NOTE_FFNOP},
- {"NOTE_FFOR", unix.NOTE_FFOR},
- {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL},
- {"NOTE_FORK", unix.NOTE_FORK},
- {"NOTE_LINK", unix.NOTE_LINK},
- {"NOTE_LOWAT", unix.NOTE_LOWAT},
- {"NOTE_MSECONDS", unix.NOTE_MSECONDS},
- {"NOTE_NSECONDS", unix.NOTE_NSECONDS},
- {"NOTE_OPEN", unix.NOTE_OPEN},
- {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
- {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
- {"NOTE_READ", unix.NOTE_READ},
- {"NOTE_RENAME", unix.NOTE_RENAME},
- {"NOTE_REVOKE", unix.NOTE_REVOKE},
- {"NOTE_SECONDS", unix.NOTE_SECONDS},
- {"NOTE_TRACK", unix.NOTE_TRACK},
- {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
- {"NOTE_TRIGGER", unix.NOTE_TRIGGER},
- {"NOTE_USECONDS", unix.NOTE_USECONDS},
- {"NOTE_WRITE", unix.NOTE_WRITE},
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
deleted file mode 100644
index 607e683bd731..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
+++ /dev/null
@@ -1,32 +0,0 @@
-//go:build freebsd || openbsd || netbsd || dragonfly || darwin
-
-package internal
-
-import (
- "fmt"
- "os"
- "strings"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-func Debug(name string, kevent *unix.Kevent_t) {
- mask := uint32(kevent.Fflags)
-
- var (
- l []string
- unknown = mask
- )
- for _, n := range names {
- if mask&n.m == n.m {
- l = append(l, n.n)
- unknown ^= n.m
- }
- }
- if unknown > 0 {
- l = append(l, fmt.Sprintf("0x%x", unknown))
- }
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n",
- time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
deleted file mode 100644
index 35c734be4311..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package internal
-
-import (
- "fmt"
- "os"
- "strings"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-func Debug(name string, mask, cookie uint32) {
- names := []struct {
- n string
- m uint32
- }{
- {"IN_ACCESS", unix.IN_ACCESS},
- {"IN_ATTRIB", unix.IN_ATTRIB},
- {"IN_CLOSE", unix.IN_CLOSE},
- {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE},
- {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE},
- {"IN_CREATE", unix.IN_CREATE},
- {"IN_DELETE", unix.IN_DELETE},
- {"IN_DELETE_SELF", unix.IN_DELETE_SELF},
- {"IN_IGNORED", unix.IN_IGNORED},
- {"IN_ISDIR", unix.IN_ISDIR},
- {"IN_MODIFY", unix.IN_MODIFY},
- {"IN_MOVE", unix.IN_MOVE},
- {"IN_MOVED_FROM", unix.IN_MOVED_FROM},
- {"IN_MOVED_TO", unix.IN_MOVED_TO},
- {"IN_MOVE_SELF", unix.IN_MOVE_SELF},
- {"IN_OPEN", unix.IN_OPEN},
- {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW},
- {"IN_UNMOUNT", unix.IN_UNMOUNT},
- }
-
- var (
- l []string
- unknown = mask
- )
- for _, n := range names {
- if mask&n.m == n.m {
- l = append(l, n.n)
- unknown ^= n.m
- }
- }
- if unknown > 0 {
- l = append(l, fmt.Sprintf("0x%x", unknown))
- }
- var c string
- if cookie > 0 {
- c = fmt.Sprintf("(cookie: %d) ", cookie)
- }
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n",
- time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name)
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
deleted file mode 100644
index e5b3b6f69433..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package internal
-
-import "golang.org/x/sys/unix"
-
-var names = []struct {
- n string
- m uint32
-}{
- {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
- {"NOTE_CHILD", unix.NOTE_CHILD},
- {"NOTE_DELETE", unix.NOTE_DELETE},
- {"NOTE_EXEC", unix.NOTE_EXEC},
- {"NOTE_EXIT", unix.NOTE_EXIT},
- {"NOTE_EXTEND", unix.NOTE_EXTEND},
- {"NOTE_FORK", unix.NOTE_FORK},
- {"NOTE_LINK", unix.NOTE_LINK},
- {"NOTE_LOWAT", unix.NOTE_LOWAT},
- {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
- {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
- {"NOTE_RENAME", unix.NOTE_RENAME},
- {"NOTE_REVOKE", unix.NOTE_REVOKE},
- {"NOTE_TRACK", unix.NOTE_TRACK},
- {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
- {"NOTE_WRITE", unix.NOTE_WRITE},
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
deleted file mode 100644
index 1dd455bc5a4e..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package internal
-
-import "golang.org/x/sys/unix"
-
-var names = []struct {
- n string
- m uint32
-}{
- {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
- // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386?
- {"NOTE_CHILD", unix.NOTE_CHILD},
- {"NOTE_DELETE", unix.NOTE_DELETE},
- {"NOTE_EOF", unix.NOTE_EOF},
- {"NOTE_EXEC", unix.NOTE_EXEC},
- {"NOTE_EXIT", unix.NOTE_EXIT},
- {"NOTE_EXTEND", unix.NOTE_EXTEND},
- {"NOTE_FORK", unix.NOTE_FORK},
- {"NOTE_LINK", unix.NOTE_LINK},
- {"NOTE_LOWAT", unix.NOTE_LOWAT},
- {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
- {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
- {"NOTE_RENAME", unix.NOTE_RENAME},
- {"NOTE_REVOKE", unix.NOTE_REVOKE},
- {"NOTE_TRACK", unix.NOTE_TRACK},
- {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
- {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE},
- {"NOTE_WRITE", unix.NOTE_WRITE},
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
deleted file mode 100644
index f1b2e73bd5ba..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package internal
-
-import (
- "fmt"
- "os"
- "strings"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-func Debug(name string, mask int32) {
- names := []struct {
- n string
- m int32
- }{
- {"FILE_ACCESS", unix.FILE_ACCESS},
- {"FILE_MODIFIED", unix.FILE_MODIFIED},
- {"FILE_ATTRIB", unix.FILE_ATTRIB},
- {"FILE_TRUNC", unix.FILE_TRUNC},
- {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW},
- {"FILE_DELETE", unix.FILE_DELETE},
- {"FILE_RENAME_TO", unix.FILE_RENAME_TO},
- {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM},
- {"UNMOUNTED", unix.UNMOUNTED},
- {"MOUNTEDOVER", unix.MOUNTEDOVER},
- {"FILE_EXCEPTION", unix.FILE_EXCEPTION},
- }
-
- var (
- l []string
- unknown = mask
- )
- for _, n := range names {
- if mask&n.m == n.m {
- l = append(l, n.n)
- unknown ^= n.m
- }
- }
- if unknown > 0 {
- l = append(l, fmt.Sprintf("0x%x", unknown))
- }
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n",
- time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
deleted file mode 100644
index 52bf4ce53b56..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package internal
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "golang.org/x/sys/windows"
-)
-
-func Debug(name string, mask uint32) {
- names := []struct {
- n string
- m uint32
- }{
- {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED},
- {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED},
- {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED},
- {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME},
- {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME},
- }
-
- var (
- l []string
- unknown = mask
- )
- for _, n := range names {
- if mask&n.m == n.m {
- l = append(l, n.n)
- unknown ^= n.m
- }
- }
- if unknown > 0 {
- l = append(l, fmt.Sprintf("0x%x", unknown))
- }
- fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n",
- time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name))
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
deleted file mode 100644
index 547df1df84b5..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
+++ /dev/null
@@ -1,31 +0,0 @@
-//go:build freebsd
-
-package internal
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-var (
- SyscallEACCES = syscall.EACCES
- UnixEACCES = unix.EACCES
-)
-
-var maxfiles uint64
-
-func SetRlimit() {
- // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
- var l syscall.Rlimit
- err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
- if err == nil && l.Cur != l.Max {
- l.Cur = l.Max
- syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
- }
- maxfiles = uint64(l.Cur)
-}
-
-func Maxfiles() uint64 { return maxfiles }
-func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
-func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) }
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go
deleted file mode 100644
index 7daa45e19eec..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/internal.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package internal contains some helpers.
-package internal
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go
deleted file mode 100644
index 30976ce97395..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/unix.go
+++ /dev/null
@@ -1,31 +0,0 @@
-//go:build !windows && !darwin && !freebsd
-
-package internal
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-var (
- SyscallEACCES = syscall.EACCES
- UnixEACCES = unix.EACCES
-)
-
-var maxfiles uint64
-
-func SetRlimit() {
- // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
- var l syscall.Rlimit
- err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
- if err == nil && l.Cur != l.Max {
- l.Cur = l.Max
- syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
- }
- maxfiles = uint64(l.Cur)
-}
-
-func Maxfiles() uint64 { return maxfiles }
-func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
-func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go
deleted file mode 100644
index 37dfeddc2896..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build !windows
-
-package internal
-
-func HasPrivilegesForSymlink() bool {
- return true
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go
deleted file mode 100644
index a72c64954905..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/internal/windows.go
+++ /dev/null
@@ -1,41 +0,0 @@
-//go:build windows
-
-package internal
-
-import (
- "errors"
-
- "golang.org/x/sys/windows"
-)
-
-// Just a dummy.
-var (
- SyscallEACCES = errors.New("dummy")
- UnixEACCES = errors.New("dummy")
-)
-
-func SetRlimit() {}
-func Maxfiles() uint64 { return 1<<64 - 1 }
-func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") }
-func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") }
-
-func HasPrivilegesForSymlink() bool {
- var sid *windows.SID
- err := windows.AllocateAndInitializeSid(
- &windows.SECURITY_NT_AUTHORITY,
- 2,
- windows.SECURITY_BUILTIN_DOMAIN_RID,
- windows.DOMAIN_ALIAS_RID_ADMINS,
- 0, 0, 0, 0, 0, 0,
- &sid)
- if err != nil {
- return false
- }
- defer windows.FreeSid(sid)
- token := windows.Token(0)
- member, err := token.IsMember(sid)
- if err != nil {
- return false
- }
- return member || token.IsElevated()
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
new file mode 100644
index 000000000000..99012ae6539e
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
@@ -0,0 +1,259 @@
+#!/usr/bin/env zsh
+[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
+setopt err_exit no_unset pipefail extended_glob
+
+# Simple script to update the godoc comments on all watchers so you don't need
+# to update the same comment 5 times.
+
+watcher=$(</tmp/x
+ print -r -- $cmt >>/tmp/x
+ tail -n+$(( end + 1 )) $file >>/tmp/x
+ mv /tmp/x $file
+ done
+}
+
+set-cmt '^type Watcher struct ' $watcher
+set-cmt '^func NewWatcher(' $new
+set-cmt '^func NewBufferedWatcher(' $newbuffered
+set-cmt '^func (w \*Watcher) Add(' $add
+set-cmt '^func (w \*Watcher) AddWith(' $addwith
+set-cmt '^func (w \*Watcher) Remove(' $remove
+set-cmt '^func (w \*Watcher) Close(' $close
+set-cmt '^func (w \*Watcher) WatchList(' $watchlist
+set-cmt '^[[:space:]]*Events *chan Event$' $events
+set-cmt '^[[:space:]]*Errors *chan error$' $errors
diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go
index f65e8fe3edce..4322b0b88557 100644
--- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go
+++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go
@@ -1,4 +1,5 @@
//go:build freebsd || openbsd || netbsd || dragonfly
+// +build freebsd openbsd netbsd dragonfly
package fsnotify
diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go
index a29fc7aab620..5da5ffa78fe7 100644
--- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go
+++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go
@@ -1,4 +1,5 @@
//go:build darwin
+// +build darwin
package fsnotify
diff --git a/vendor/github.com/operator-framework/api/pkg/lib/version/version.go b/vendor/github.com/operator-framework/api/pkg/lib/version/version.go
deleted file mode 100644
index a0ffb9fcbe0a..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/lib/version/version.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package version
-
-import (
- "encoding/json"
-
- semver "github.com/blang/semver/v4"
-)
-
-// +k8s:openapi-gen=true
-// OperatorVersion is a wrapper around semver.Version which supports correct
-// marshaling to YAML and JSON.
-// +kubebuilder:validation:Type=string
-type OperatorVersion struct {
- semver.Version `json:"-"`
-}
-
-// DeepCopyInto creates a deep-copy of the Version value.
-func (v *OperatorVersion) DeepCopyInto(out *OperatorVersion) {
- out.Major = v.Major
- out.Minor = v.Minor
- out.Patch = v.Patch
-
- if v.Pre != nil {
- pre := make([]semver.PRVersion, len(v.Pre))
- copy(pre, v.Pre)
- out.Pre = pre
- }
-
- if v.Build != nil {
- build := make([]string, len(v.Build))
- copy(build, v.Build)
- out.Build = build
- }
-}
-
-// MarshalJSON implements the encoding/json.Marshaler interface.
-func (v OperatorVersion) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
-func (v *OperatorVersion) UnmarshalJSON(data []byte) (err error) {
- var versionString string
-
- if err = json.Unmarshal(data, &versionString); err != nil {
- return
- }
-
- version := semver.Version{}
- version, err = semver.ParseTolerant(versionString)
- if err != nil {
- return err
- }
- v.Version = version
- return
-}
-
-// OpenAPISchemaType is used by the kube-openapi generator when constructing
-// the OpenAPI spec of this type.
-//
-// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
-func (_ OperatorVersion) OpenAPISchemaType() []string { return []string{"string"} }
-
-// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
-// the OpenAPI spec of this type.
-// "semver" is not a standard openapi format but tooling may use the value regardless
-func (_ OperatorVersion) OpenAPISchemaFormat() string { return "semver" }
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/doc.go
deleted file mode 100644
index 7eba794488ba..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-// +kubebuilder:skip
-
-// Package operators contains all resource types of the operators.coreos.com API group.
-package operators
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/register.go b/vendor/github.com/operator-framework/api/pkg/operators/register.go
deleted file mode 100644
index e3c31d51ac22..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/register.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package operators
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-const (
- // GroupName is the group name used in this package.
- GroupName = "operators.coreos.com"
- // GroupVersion is the group version used in this package.
- GroupVersion = runtime.APIVersionInternal
-
- // LEGACY: Exported kind names, remove after major version bump
-
- // ClusterServiceVersionKind is the kind name for ClusterServiceVersion resources.
- ClusterServiceVersionKind = "ClusterServiceVersion"
- // CatalogSourceKind is the kind name for CatalogSource resources.
- CatalogSourceKind = "CatalogSource"
- // InstallPlanKind is the kind name for InstallPlan resources.
- InstallPlanKind = "InstallPlan"
- // SubscriptionKind is the kind name for Subscription resources.
- SubscriptionKind = "Subscription"
- // OperatorKind is the kind name for Operator resources.
- OperatorKind = "Operator"
- // OperatorGroupKind is the kind name for OperatorGroup resources.
- OperatorGroupKind = "OperatorGroup"
-)
-
-// SchemeGroupVersion is group version used to register these objects
-var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go
deleted file mode 100644
index b5f5e3b7e5a5..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go
+++ /dev/null
@@ -1,364 +0,0 @@
-package v1alpha1
-
-import (
- "encoding/json"
- "fmt"
- "time"
-
- "github.com/sirupsen/logrus"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
-)
-
-const (
- CatalogSourceCRDAPIVersion = GroupName + "/" + GroupVersion
- CatalogSourceKind = "CatalogSource"
- DefaultRegistryPollDuration = 15 * time.Minute
-)
-
-// SourceType indicates the type of backing store for a CatalogSource
-type SourceType string
-
-const (
- // SourceTypeInternal (deprecated) specifies a CatalogSource of type SourceTypeConfigmap
- SourceTypeInternal SourceType = "internal"
-
- // SourceTypeConfigmap specifies a CatalogSource that generates a configmap-server registry
- SourceTypeConfigmap SourceType = "configmap"
-
- // SourceTypeGrpc specifies a CatalogSource that can use an operator registry image to generate a
- // registry-server or connect to a pre-existing registry at an address.
- SourceTypeGrpc SourceType = "grpc"
-)
-
-const (
- // CatalogSourceSpecInvalidError denotes when fields on the spec of the CatalogSource are not valid.
- CatalogSourceSpecInvalidError ConditionReason = "SpecInvalidError"
- // CatalogSourceConfigMapError denotes when there is an issue extracting manifests from the specified ConfigMap.
- CatalogSourceConfigMapError ConditionReason = "ConfigMapError"
- // CatalogSourceRegistryServerError denotes when there is an issue querying the specified registry server.
- CatalogSourceRegistryServerError ConditionReason = "RegistryServerError"
- // CatalogSourceIntervalInvalidError denotes if the registry polling interval is invalid.
- CatalogSourceIntervalInvalidError ConditionReason = "InvalidIntervalError"
-)
-
-type CatalogSourceSpec struct {
- // SourceType is the type of source
- SourceType SourceType `json:"sourceType"`
-
- // Priority field assigns a weight to the catalog source to prioritize them so that it can be consumed by the dependency resolver.
- // Usage:
- // Higher weight indicates that this catalog source is preferred over lower weighted catalog sources during dependency resolution.
- // The range of the priority value can go from positive to negative in the range of int32.
- // The default value to a catalog source with unassigned priority would be 0.
- // The catalog source with the same priority values will be ranked lexicographically based on its name.
- // +optional
- Priority int `json:"priority,omitempty"`
-
- // ConfigMap is the name of the ConfigMap to be used to back a configmap-server registry.
- // Only used when SourceType = SourceTypeConfigmap or SourceTypeInternal.
- // +optional
- ConfigMap string `json:"configMap,omitempty"`
-
- // Address is a host that OLM can use to connect to a pre-existing registry.
- // Format: :
- // Only used when SourceType = SourceTypeGrpc.
- // Ignored when the Image field is set.
- // +optional
- Address string `json:"address,omitempty"`
-
- // Image is an operator-registry container image to instantiate a registry-server with.
- // Only used when SourceType = SourceTypeGrpc.
- // If present, the address field is ignored.
- // +optional
- Image string `json:"image,omitempty"`
-
- // GrpcPodConfig exposes different overrides for the pod spec of the CatalogSource Pod.
- // Only used when SourceType = SourceTypeGrpc and Image is set.
- // +optional
- GrpcPodConfig *GrpcPodConfig `json:"grpcPodConfig,omitempty"`
-
- // UpdateStrategy defines how updated catalog source images can be discovered
- // Consists of an interval that defines polling duration and an embedded strategy type
- // +optional
- UpdateStrategy *UpdateStrategy `json:"updateStrategy,omitempty"`
-
- // Secrets represent set of secrets that can be used to access the contents of the catalog.
- // It is best to keep this list small, since each will need to be tried for every catalog entry.
- // +optional
- Secrets []string `json:"secrets,omitempty"`
-
- // Metadata
- DisplayName string `json:"displayName,omitempty"`
- Description string `json:"description,omitempty"`
- Publisher string `json:"publisher,omitempty"`
- Icon Icon `json:"icon,omitempty"`
-}
-
-type SecurityConfig string
-
-const (
- Legacy SecurityConfig = "legacy"
- Restricted SecurityConfig = "restricted"
-)
-
-// GrpcPodConfig contains configuration specified for a catalog source
-type GrpcPodConfig struct {
- // NodeSelector is a selector which must be true for the pod to fit on a node.
- // Selector which must match a node's labels for the pod to be scheduled on that node.
- // +optional
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
-
- // Tolerations are the catalog source's pod's tolerations.
- // +optional
- Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
-
- // Affinity is the catalog source's pod's affinity.
- // +optional
- Affinity *corev1.Affinity `json:"affinity,omitempty"`
-
- // If specified, indicates the pod's priority.
- // If not specified, the pod priority will be default or zero if there is no
- // default.
- // +optional
- PriorityClassName *string `json:"priorityClassName,omitempty"`
-
- // SecurityContextConfig can be one of `legacy` or `restricted`. The CatalogSource's pod is either injected with the
- // right pod.spec.securityContext and pod.spec.container[*].securityContext values to allow the pod to run in Pod
- // Security Admission (PSA) `restricted` mode, or doesn't set these values at all, in which case the pod can only be
- // run in PSA `baseline` or `privileged` namespaces. If the SecurityContextConfig is unspecified, the mode will be
- // determined by the namespace's PSA configuration. If the namespace is enforcing `restricted` mode, then the pod
- // will be configured as if `restricted` was specified. Otherwise, it will be configured as if `legacy` was
- // specified. Specifying a value other than `legacy` or `restricted` result in a validation error. When using older
- // catalog images, which can not run in `restricted` mode, the SecurityContextConfig should be set to `legacy`.
- //
- // More information about PSA can be found here: https://kubernetes.io/docs/concepts/security/pod-security-admission/'
- // +optional
- // +kubebuilder:validation:Enum=legacy;restricted
- SecurityContextConfig SecurityConfig `json:"securityContextConfig,omitempty"`
-
- // MemoryTarget configures the $GOMEMLIMIT value for the gRPC catalog Pod. This is a soft memory limit for the server,
- // which the runtime will attempt to meet but makes no guarantees that it will do so. If this value is set, the Pod
- // will have the following modifications made to the container running the server:
- // - the $GOMEMLIMIT environment variable will be set to this value in bytes
- // - the memory request will be set to this value
- //
- // This field should be set if it's desired to reduce the footprint of a catalog server as much as possible, or if
- // a catalog being served is very large and needs more than the default allocation. If your index image has a file-
- // system cache, determine a good approximation for this value by doubling the size of the package cache at
- // /tmp/cache/cache/packages.json in the index image.
- //
- // This field is best-effort; if unset, no default will be used and no Pod memory limit or $GOMEMLIMIT value will be set.
- // +optional
- MemoryTarget *resource.Quantity `json:"memoryTarget,omitempty"`
-
- // ExtractContent configures the gRPC catalog Pod to extract catalog metadata from the provided index image and
- // use a well-known version of the `opm` server to expose it. The catalog index image that this CatalogSource is
- // configured to use *must* be using the file-based catalogs in order to utilize this feature.
- // +optional
- ExtractContent *ExtractContentConfig `json:"extractContent,omitempty"`
-}
-
-// ExtractContentConfig configures context extraction from a file-based catalog index image.
-type ExtractContentConfig struct {
- // CacheDir is the directory storing the pre-calculated API cache.
- CacheDir string `json:"cacheDir"`
- // CatalogDir is the directory storing the file-based catalog contents.
- CatalogDir string `json:"catalogDir"`
-}
-
-// UpdateStrategy holds all the different types of catalog source update strategies
-// Currently only registry polling strategy is implemented
-type UpdateStrategy struct {
- *RegistryPoll `json:"registryPoll,omitempty"`
-}
-
-type RegistryPoll struct {
- // Interval is used to determine the time interval between checks of the latest catalog source version.
- // The catalog operator polls to see if a new version of the catalog source is available.
- // If available, the latest image is pulled and gRPC traffic is directed to the latest catalog source.
- RawInterval string `json:"interval,omitempty"`
- Interval *metav1.Duration `json:"-"`
- ParsingError string `json:"-"`
-}
-
-// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
-func (u *UpdateStrategy) UnmarshalJSON(data []byte) (err error) {
- type alias struct {
- *RegistryPoll `json:"registryPoll,omitempty"`
- }
- us := alias{}
- if err = json.Unmarshal(data, &us); err != nil {
- return err
- }
- registryPoll := &RegistryPoll{
- RawInterval: us.RegistryPoll.RawInterval,
- }
- duration, err := time.ParseDuration(registryPoll.RawInterval)
- if err != nil {
- registryPoll.ParsingError = fmt.Sprintf("error parsing spec.updateStrategy.registryPoll.interval. Using the default value of %s instead. Error: %s", DefaultRegistryPollDuration, err)
- registryPoll.Interval = &metav1.Duration{Duration: DefaultRegistryPollDuration}
- } else {
- registryPoll.Interval = &metav1.Duration{Duration: duration}
- }
- u.RegistryPoll = registryPoll
- return nil
-}
-
-type RegistryServiceStatus struct {
- Protocol string `json:"protocol,omitempty"`
- ServiceName string `json:"serviceName,omitempty"`
- ServiceNamespace string `json:"serviceNamespace,omitempty"`
- Port string `json:"port,omitempty"`
- CreatedAt metav1.Time `json:"createdAt,omitempty"`
-}
-
-func (s *RegistryServiceStatus) Address() string {
- return fmt.Sprintf("%s.%s.svc:%s", s.ServiceName, s.ServiceNamespace, s.Port)
-}
-
-type GRPCConnectionState struct {
- Address string `json:"address,omitempty"`
- LastObservedState string `json:"lastObservedState"`
- LastConnectTime metav1.Time `json:"lastConnect,omitempty"`
-}
-
-type CatalogSourceStatus struct {
- // A human readable message indicating details about why the CatalogSource is in this condition.
- // +optional
- Message string `json:"message,omitempty"`
- // Reason is the reason the CatalogSource was transitioned to its current state.
- // +optional
- Reason ConditionReason `json:"reason,omitempty"`
-
- // The last time the CatalogSource image registry has been polled to ensure the image is up-to-date
- LatestImageRegistryPoll *metav1.Time `json:"latestImageRegistryPoll,omitempty"`
-
- // ConfigMapReference (deprecated) is the reference to the ConfigMap containing the catalog source's configuration, when the catalog source is a ConfigMap
- ConfigMapResource *ConfigMapResourceReference `json:"configMapReference,omitempty"`
- // RegistryService represents the current state of the GRPC service used to serve the catalog
- RegistryServiceStatus *RegistryServiceStatus `json:"registryService,omitempty"`
- // ConnectionState represents the current state of the CatalogSource's connection to the registry
- GRPCConnectionState *GRPCConnectionState `json:"connectionState,omitempty"`
-
- // Represents the state of a CatalogSource. Note that Message and Reason represent the original
- // status information, which may be migrated to be conditions based in the future. Any new features
- // introduced will use conditions.
- // +optional
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
- Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
-}
-
-type ConfigMapResourceReference struct {
- Name string `json:"name"`
- Namespace string `json:"namespace"`
- UID types.UID `json:"uid,omitempty"`
- ResourceVersion string `json:"resourceVersion,omitempty"`
- LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
-}
-
-func (r *ConfigMapResourceReference) IsAMatch(object *metav1.ObjectMeta) bool {
- return r.UID == object.GetUID() && r.ResourceVersion == object.GetResourceVersion()
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +genclient
-// +kubebuilder:resource:shortName=catsrc,categories=olm
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Display",type=string,JSONPath=`.spec.displayName`,description="The pretty name of the catalog"
-// +kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.spec.sourceType`,description="The type of the catalog"
-// +kubebuilder:printcolumn:name="Publisher",type=string,JSONPath=`.spec.publisher`,description="The publisher of the catalog"
-// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
-
-// CatalogSource is a repository of CSVs, CRDs, and operator packages.
-type CatalogSource struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- Spec CatalogSourceSpec `json:"spec"`
- // +optional
- Status CatalogSourceStatus `json:"status"`
-}
-
-func (c *CatalogSource) Address() string {
- if c.Spec.Address != "" {
- return c.Spec.Address
- }
- return c.Status.RegistryServiceStatus.Address()
-}
-
-func (c *CatalogSource) SetError(reason ConditionReason, err error) {
- c.Status.Reason = reason
- c.Status.Message = ""
- if err != nil {
- c.Status.Message = err.Error()
- }
-}
-
-func (c *CatalogSource) SetLastUpdateTime() {
- now := metav1.Now()
- c.Status.LatestImageRegistryPoll = &now
-}
-
-// Check if it is time to update based on polling setting
-func (c *CatalogSource) Update() bool {
- if !c.Poll() {
- return false
- }
- interval := c.Spec.UpdateStrategy.Interval.Duration
- latest := c.Status.LatestImageRegistryPoll
- if latest == nil {
- logrus.WithField("CatalogSource", c.Name).Debugf("latest poll %v", latest)
- } else {
- logrus.WithField("CatalogSource", c.Name).Debugf("latest poll %v", *c.Status.LatestImageRegistryPoll)
- }
-
- if c.Status.LatestImageRegistryPoll.IsZero() {
- logrus.WithField("CatalogSource", c.Name).Debugf("creation timestamp plus interval before now %t", c.CreationTimestamp.Add(interval).Before(time.Now()))
- if c.CreationTimestamp.Add(interval).Before(time.Now()) {
- return true
- }
- } else {
- logrus.WithField("CatalogSource", c.Name).Debugf("latest poll plus interval before now %t", c.Status.LatestImageRegistryPoll.Add(interval).Before(time.Now()))
- if c.Status.LatestImageRegistryPoll.Add(interval).Before(time.Now()) {
- return true
- }
- }
-
- return false
-}
-
-// Poll determines whether the polling feature is enabled on the particular catalog source
-func (c *CatalogSource) Poll() bool {
- if c.Spec.UpdateStrategy == nil {
- return false
- }
- // if polling interval is zero polling will not be done
- if c.Spec.UpdateStrategy.RegistryPoll == nil {
- return false
- }
- // if catalog source is not backed by an image polling will not be done
- if c.Spec.Image == "" {
- return false
- }
- // if image is not type gRPC polling will not be done
- if c.Spec.SourceType != SourceTypeGrpc {
- return false
- }
- return true
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// CatalogSourceList is a repository of CSVs, CRDs, and operator packages.
-type CatalogSourceList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []CatalogSource `json:"items"`
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go
deleted file mode 100644
index a4c8d1746960..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package v1alpha1
-
-import (
- "fmt"
-
- v1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/tools/record"
-)
-
-const (
- CopiedLabelKey = "olm.copiedFrom"
-
- // ConditionsLengthLimit is the maximum length of Status.Conditions of a
- // given ClusterServiceVersion object. The oldest condition(s) are removed
- // from the list as it grows over time to keep it at limit.
- ConditionsLengthLimit = 20
-)
-
-// obsoleteReasons are the set of reasons that mean a CSV should no longer be processed as active
-var obsoleteReasons = map[ConditionReason]struct{}{
- CSVReasonReplaced: {},
- CSVReasonBeingReplaced: {},
-}
-
-// uncopiableReasons are the set of reasons that should prevent a CSV from being copied to target namespaces
-var uncopiableReasons = map[ConditionReason]struct{}{
- CSVReasonCopied: {},
- CSVReasonInvalidInstallModes: {},
- CSVReasonNoTargetNamespaces: {},
- CSVReasonUnsupportedOperatorGroup: {},
- CSVReasonNoOperatorGroup: {},
- CSVReasonTooManyOperatorGroups: {},
- CSVReasonInterOperatorGroupOwnerConflict: {},
- CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs: {},
-}
-
-// safeToAnnotateOperatorGroupReasons are the set of reasons that it's safe to attempt to update the operatorgroup
-// annotations
-var safeToAnnotateOperatorGroupReasons = map[ConditionReason]struct{}{
- CSVReasonOwnerConflict: {},
- CSVReasonInstallSuccessful: {},
- CSVReasonInvalidInstallModes: {},
- CSVReasonNoTargetNamespaces: {},
- CSVReasonUnsupportedOperatorGroup: {},
- CSVReasonNoOperatorGroup: {},
- CSVReasonTooManyOperatorGroups: {},
- CSVReasonInterOperatorGroupOwnerConflict: {},
- CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs: {},
-}
-
-// SetPhaseWithEventIfChanged emits a Kubernetes event with details of a phase change and sets the current phase if phase, reason, or message would changed
-func (c *ClusterServiceVersion) SetPhaseWithEventIfChanged(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time, recorder record.EventRecorder) {
- if c.Status.Phase == phase && c.Status.Reason == reason && c.Status.Message == message {
- return
- }
-
- c.SetPhaseWithEvent(phase, reason, message, now, recorder)
-}
-
-// SetPhaseWithEvent generates a Kubernetes event with details about the phase change and sets the current phase
-func (c *ClusterServiceVersion) SetPhaseWithEvent(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time, recorder record.EventRecorder) {
- var eventtype string
- if phase == CSVPhaseFailed {
- eventtype = v1.EventTypeWarning
- } else {
- eventtype = v1.EventTypeNormal
- }
- go recorder.Event(c, eventtype, string(reason), message)
- c.SetPhase(phase, reason, message, now)
-}
-
-// SetPhase sets the current phase and adds a condition if necessary
-func (c *ClusterServiceVersion) SetPhase(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time) {
- newCondition := func() ClusterServiceVersionCondition {
- return ClusterServiceVersionCondition{
- Phase: c.Status.Phase,
- LastTransitionTime: c.Status.LastTransitionTime,
- LastUpdateTime: c.Status.LastUpdateTime,
- Message: message,
- Reason: reason,
- }
- }
-
- defer c.TrimConditionsIfLimitExceeded()
-
- c.Status.LastUpdateTime = now
- if c.Status.Phase != phase {
- c.Status.Phase = phase
- c.Status.LastTransitionTime = now
- }
- c.Status.Message = message
- c.Status.Reason = reason
- if len(c.Status.Conditions) == 0 {
- c.Status.Conditions = append(c.Status.Conditions, newCondition())
- return
- }
-
- previousCondition := c.Status.Conditions[len(c.Status.Conditions)-1]
- if previousCondition.Phase != c.Status.Phase || previousCondition.Reason != c.Status.Reason {
- c.Status.Conditions = append(c.Status.Conditions, newCondition())
- }
-}
-
-// SetRequirementStatus adds the status of all requirements to the CSV status
-func (c *ClusterServiceVersion) SetRequirementStatus(statuses []RequirementStatus) {
- c.Status.RequirementStatus = statuses
-}
-
-// IsObsolete returns if this CSV is being replaced or is marked for deletion
-func (c *ClusterServiceVersion) IsObsolete() bool {
- for _, condition := range c.Status.Conditions {
- _, ok := obsoleteReasons[condition.Reason]
- if ok {
- return true
- }
- }
- return false
-}
-
-// IsCopied returns true if the CSV has been copied and false otherwise.
-func (c *ClusterServiceVersion) IsCopied() bool {
- return c.Status.Reason == CSVReasonCopied || IsCopied(c)
-}
-
-func IsCopied(o metav1.Object) bool {
- annotations := o.GetAnnotations()
- if annotations != nil {
- operatorNamespace, ok := annotations[OperatorGroupNamespaceAnnotationKey]
- if ok && o.GetNamespace() != operatorNamespace {
- return true
- }
- }
-
- if labels := o.GetLabels(); labels != nil {
- if _, ok := labels[CopiedLabelKey]; ok {
- return true
- }
- }
- return false
-}
-
-func (c *ClusterServiceVersion) IsUncopiable() bool {
- if c.Status.Phase == CSVPhaseNone {
- return true
- }
- _, ok := uncopiableReasons[c.Status.Reason]
- return ok
-}
-
-func (c *ClusterServiceVersion) IsSafeToUpdateOperatorGroupAnnotations() bool {
- _, ok := safeToAnnotateOperatorGroupReasons[c.Status.Reason]
- return ok
-}
-
-// NewInstallModeSet returns an InstallModeSet instantiated from the given list of InstallModes.
-// If the given list is not a set, an error is returned.
-func NewInstallModeSet(modes []InstallMode) (InstallModeSet, error) {
- set := InstallModeSet{}
- for _, mode := range modes {
- if _, exists := set[mode.Type]; exists {
- return nil, fmt.Errorf("InstallMode list contains duplicates, cannot make set: %v", modes)
- }
- set[mode.Type] = mode.Supported
- }
-
- return set, nil
-}
-
-// Supports returns an error if the InstallModeSet does not support configuration for
-// the given operatorNamespace and list of target namespaces.
-func (set InstallModeSet) Supports(operatorNamespace string, namespaces []string) error {
- numNamespaces := len(namespaces)
- switch {
- case numNamespaces == 0:
- return fmt.Errorf("operatorgroup has invalid selected namespaces, cannot configure to watch zero namespaces")
- case numNamespaces == 1:
- switch namespaces[0] {
- case operatorNamespace:
- if !set[InstallModeTypeOwnNamespace] {
- return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch own namespace", InstallModeTypeOwnNamespace)
- }
- case v1.NamespaceAll:
- if !set[InstallModeTypeAllNamespaces] {
- return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch all namespaces", InstallModeTypeAllNamespaces)
- }
- default:
- if !set[InstallModeTypeSingleNamespace] {
- return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch one namespace", InstallModeTypeSingleNamespace)
- }
- }
- case numNamespaces > 1 && !set[InstallModeTypeMultiNamespace]:
- return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch %d namespaces", InstallModeTypeMultiNamespace, numNamespaces)
- case numNamespaces > 1:
- for _, namespace := range namespaces {
- if namespace == operatorNamespace && !set[InstallModeTypeOwnNamespace] {
- return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch own namespace", InstallModeTypeOwnNamespace)
- }
- if namespace == v1.NamespaceAll {
- return fmt.Errorf("operatorgroup has invalid selected namespaces, NamespaceAll found when |selected namespaces| > 1")
- }
- }
- }
-
- return nil
-}
-
-func (c *ClusterServiceVersion) TrimConditionsIfLimitExceeded() {
- if len(c.Status.Conditions) <= ConditionsLengthLimit {
- return
- }
-
- firstIndex := len(c.Status.Conditions) - ConditionsLengthLimit
- c.Status.Conditions = c.Status.Conditions[firstIndex:len(c.Status.Conditions)]
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go
deleted file mode 100644
index 3e6d3248037e..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go
+++ /dev/null
@@ -1,737 +0,0 @@
-package v1alpha1
-
-import (
- "encoding/json"
- "fmt"
- "sort"
- "strings"
-
- admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- appsv1 "k8s.io/api/apps/v1"
- rbac "k8s.io/api/rbac/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/util/intstr"
-
- "github.com/operator-framework/api/pkg/lib/version"
-)
-
-const (
- ClusterServiceVersionAPIVersion = GroupName + "/" + GroupVersion
- ClusterServiceVersionKind = "ClusterServiceVersion"
- OperatorGroupNamespaceAnnotationKey = "olm.operatorNamespace"
- InstallStrategyNameDeployment = "deployment"
- SkipRangeAnnotationKey = "olm.skipRange"
-)
-
-// InstallModeType is a supported type of install mode for CSV installation
-type InstallModeType string
-
-const (
- // InstallModeTypeOwnNamespace indicates that the operator can be a member of an `OperatorGroup` that selects its own namespace.
- InstallModeTypeOwnNamespace InstallModeType = "OwnNamespace"
- // InstallModeTypeSingleNamespace indicates that the operator can be a member of an `OperatorGroup` that selects one namespace.
- InstallModeTypeSingleNamespace InstallModeType = "SingleNamespace"
- // InstallModeTypeMultiNamespace indicates that the operator can be a member of an `OperatorGroup` that selects more than one namespace.
- InstallModeTypeMultiNamespace InstallModeType = "MultiNamespace"
- // InstallModeTypeAllNamespaces indicates that the operator can be a member of an `OperatorGroup` that selects all namespaces (target namespace set is the empty string "").
- InstallModeTypeAllNamespaces InstallModeType = "AllNamespaces"
-)
-
-// InstallMode associates an InstallModeType with a flag representing if the CSV supports it
-// +k8s:openapi-gen=true
-type InstallMode struct {
- Type InstallModeType `json:"type"`
- Supported bool `json:"supported"`
-}
-
-// InstallModeSet is a mapping of unique InstallModeTypes to whether they are supported.
-type InstallModeSet map[InstallModeType]bool
-
-// NamedInstallStrategy represents the block of an ClusterServiceVersion resource
-// where the install strategy is specified.
-// +k8s:openapi-gen=true
-type NamedInstallStrategy struct {
- StrategyName string `json:"strategy"`
- StrategySpec StrategyDetailsDeployment `json:"spec,omitempty"`
-}
-
-// StrategyDeploymentPermissions describe the rbac rules and service account needed by the install strategy
-// +k8s:openapi-gen=true
-type StrategyDeploymentPermissions struct {
- ServiceAccountName string `json:"serviceAccountName"`
- Rules []rbac.PolicyRule `json:"rules"`
-}
-
-// StrategyDeploymentSpec contains the name, spec and labels for the deployment ALM should create
-// +k8s:openapi-gen=true
-type StrategyDeploymentSpec struct {
- Name string `json:"name"`
- Spec appsv1.DeploymentSpec `json:"spec"`
- Label labels.Set `json:"label,omitempty"`
-}
-
-// StrategyDetailsDeployment represents the parsed details of a Deployment
-// InstallStrategy.
-// +k8s:openapi-gen=true
-type StrategyDetailsDeployment struct {
- DeploymentSpecs []StrategyDeploymentSpec `json:"deployments"`
- Permissions []StrategyDeploymentPermissions `json:"permissions,omitempty"`
- ClusterPermissions []StrategyDeploymentPermissions `json:"clusterPermissions,omitempty"`
-}
-
-func (d *StrategyDetailsDeployment) GetStrategyName() string {
- return InstallStrategyNameDeployment
-}
-
-// StatusDescriptor describes a field in a status block of a CRD so that OLM can consume it
-// +k8s:openapi-gen=true
-type StatusDescriptor struct {
- Path string `json:"path"`
- DisplayName string `json:"displayName,omitempty"`
- Description string `json:"description,omitempty"`
- XDescriptors []string `json:"x-descriptors,omitempty"`
- Value json.RawMessage `json:"value,omitempty"`
-}
-
-// SpecDescriptor describes a field in a spec block of a CRD so that OLM can consume it
-// +k8s:openapi-gen=true
-type SpecDescriptor struct {
- Path string `json:"path"`
- DisplayName string `json:"displayName,omitempty"`
- Description string `json:"description,omitempty"`
- XDescriptors []string `json:"x-descriptors,omitempty"`
- Value json.RawMessage `json:"value,omitempty"`
-}
-
-// ActionDescriptor describes a declarative action that can be performed on a custom resource instance
-// +k8s:openapi-gen=true
-type ActionDescriptor struct {
- Path string `json:"path"`
- DisplayName string `json:"displayName,omitempty"`
- Description string `json:"description,omitempty"`
- XDescriptors []string `json:"x-descriptors,omitempty"`
- Value json.RawMessage `json:"value,omitempty"`
-}
-
-// CRDDescription provides details to OLM about the CRDs
-// +k8s:openapi-gen=true
-type CRDDescription struct {
- Name string `json:"name"`
- Version string `json:"version"`
- Kind string `json:"kind"`
- DisplayName string `json:"displayName,omitempty"`
- Description string `json:"description,omitempty"`
- Resources []APIResourceReference `json:"resources,omitempty"`
- StatusDescriptors []StatusDescriptor `json:"statusDescriptors,omitempty"`
- SpecDescriptors []SpecDescriptor `json:"specDescriptors,omitempty"`
- ActionDescriptor []ActionDescriptor `json:"actionDescriptors,omitempty"`
-}
-
-// APIServiceDescription provides details to OLM about apis provided via aggregation
-// +k8s:openapi-gen=true
-type APIServiceDescription struct {
- Name string `json:"name"`
- Group string `json:"group"`
- Version string `json:"version"`
- Kind string `json:"kind"`
- DeploymentName string `json:"deploymentName,omitempty"`
- ContainerPort int32 `json:"containerPort,omitempty"`
- DisplayName string `json:"displayName,omitempty"`
- Description string `json:"description,omitempty"`
- Resources []APIResourceReference `json:"resources,omitempty"`
- StatusDescriptors []StatusDescriptor `json:"statusDescriptors,omitempty"`
- SpecDescriptors []SpecDescriptor `json:"specDescriptors,omitempty"`
- ActionDescriptor []ActionDescriptor `json:"actionDescriptors,omitempty"`
-}
-
-// APIResourceReference is a reference to a Kubernetes resource type that the referrer utilizes.
-// +k8s:openapi-gen=true
-type APIResourceReference struct {
- // Plural name of the referenced resource type (CustomResourceDefinition.Spec.Names[].Plural). Empty string if the referenced resource type is not a custom resource.
- Name string `json:"name"`
- // Kind of the referenced resource type.
- Kind string `json:"kind"`
- // API Version of the referenced resource type.
- Version string `json:"version"`
-}
-
-// GetName returns the name of an APIService as derived from its group and version.
-func (d APIServiceDescription) GetName() string {
- return fmt.Sprintf("%s.%s", d.Version, d.Group)
-}
-
-// WebhookAdmissionType is the type of admission webhooks supported by OLM
-type WebhookAdmissionType string
-
-const (
- // ValidatingAdmissionWebhook is for validating admission webhooks
- ValidatingAdmissionWebhook WebhookAdmissionType = "ValidatingAdmissionWebhook"
- // MutatingAdmissionWebhook is for mutating admission webhooks
- MutatingAdmissionWebhook WebhookAdmissionType = "MutatingAdmissionWebhook"
- // ConversionWebhook is for conversion webhooks
- ConversionWebhook WebhookAdmissionType = "ConversionWebhook"
-)
-
-// WebhookDescription provides details to OLM about required webhooks
-// +k8s:openapi-gen=true
-type WebhookDescription struct {
- GenerateName string `json:"generateName"`
- // +kubebuilder:validation:Enum=ValidatingAdmissionWebhook;MutatingAdmissionWebhook;ConversionWebhook
- Type WebhookAdmissionType `json:"type"`
- DeploymentName string `json:"deploymentName,omitempty"`
- // +kubebuilder:validation:Maximum=65535
- // +kubebuilder:validation:Minimum=1
- // +kubebuilder:default=443
- ContainerPort int32 `json:"containerPort,omitempty"`
- TargetPort *intstr.IntOrString `json:"targetPort,omitempty"`
- Rules []admissionregistrationv1.RuleWithOperations `json:"rules,omitempty"`
- FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"`
- MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
- ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty"`
- SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects"`
- TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
- AdmissionReviewVersions []string `json:"admissionReviewVersions"`
- ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
- WebhookPath *string `json:"webhookPath,omitempty"`
- ConversionCRDs []string `json:"conversionCRDs,omitempty"`
-}
-
-// GetValidatingWebhook returns a ValidatingWebhook generated from the WebhookDescription
-func (w *WebhookDescription) GetValidatingWebhook(namespace string, namespaceSelector *metav1.LabelSelector, caBundle []byte) admissionregistrationv1.ValidatingWebhook {
- return admissionregistrationv1.ValidatingWebhook{
- Name: w.GenerateName,
- Rules: w.Rules,
- FailurePolicy: w.FailurePolicy,
- MatchPolicy: w.MatchPolicy,
- NamespaceSelector: namespaceSelector,
- ObjectSelector: w.ObjectSelector,
- SideEffects: w.SideEffects,
- TimeoutSeconds: w.TimeoutSeconds,
- AdmissionReviewVersions: w.AdmissionReviewVersions,
- ClientConfig: admissionregistrationv1.WebhookClientConfig{
- Service: &admissionregistrationv1.ServiceReference{
- Name: w.DomainName() + "-service",
- Namespace: namespace,
- Path: w.WebhookPath,
- Port: &w.ContainerPort,
- },
- CABundle: caBundle,
- },
- }
-}
-
-// GetMutatingWebhook returns a MutatingWebhook generated from the WebhookDescription
-func (w *WebhookDescription) GetMutatingWebhook(namespace string, namespaceSelector *metav1.LabelSelector, caBundle []byte) admissionregistrationv1.MutatingWebhook {
- return admissionregistrationv1.MutatingWebhook{
- Name: w.GenerateName,
- Rules: w.Rules,
- FailurePolicy: w.FailurePolicy,
- MatchPolicy: w.MatchPolicy,
- NamespaceSelector: namespaceSelector,
- ObjectSelector: w.ObjectSelector,
- SideEffects: w.SideEffects,
- TimeoutSeconds: w.TimeoutSeconds,
- AdmissionReviewVersions: w.AdmissionReviewVersions,
- ClientConfig: admissionregistrationv1.WebhookClientConfig{
- Service: &admissionregistrationv1.ServiceReference{
- Name: w.DomainName() + "-service",
- Namespace: namespace,
- Path: w.WebhookPath,
- Port: &w.ContainerPort,
- },
- CABundle: caBundle,
- },
- ReinvocationPolicy: w.ReinvocationPolicy,
- }
-}
-
-// DomainName returns the result of replacing all periods in the given Webhook name with hyphens
-func (w *WebhookDescription) DomainName() string {
- // Replace all '.'s with "-"s to convert to a DNS-1035 label
- return strings.Replace(w.DeploymentName, ".", "-", -1)
-}
-
-// CustomResourceDefinitions declares all of the CRDs managed or required by
-// an operator being ran by ClusterServiceVersion.
-//
-// If the CRD is present in the Owned list, it is implicitly required.
-// +k8s:openapi-gen=true
-type CustomResourceDefinitions struct {
- Owned []CRDDescription `json:"owned,omitempty"`
- Required []CRDDescription `json:"required,omitempty"`
-}
-
-// APIServiceDefinitions declares all of the extension apis managed or required by
-// an operator being ran by ClusterServiceVersion.
-// +k8s:openapi-gen=true
-type APIServiceDefinitions struct {
- Owned []APIServiceDescription `json:"owned,omitempty"`
- Required []APIServiceDescription `json:"required,omitempty"`
-}
-
-// ClusterServiceVersionSpec declarations tell OLM how to install an operator
-// that can manage apps for a given version.
-// +k8s:openapi-gen=true
-type ClusterServiceVersionSpec struct {
- InstallStrategy NamedInstallStrategy `json:"install"`
- Version version.OperatorVersion `json:"version,omitempty"`
- Maturity string `json:"maturity,omitempty"`
- CustomResourceDefinitions CustomResourceDefinitions `json:"customresourcedefinitions,omitempty"`
- APIServiceDefinitions APIServiceDefinitions `json:"apiservicedefinitions,omitempty"`
- WebhookDefinitions []WebhookDescription `json:"webhookdefinitions,omitempty"`
- NativeAPIs []metav1.GroupVersionKind `json:"nativeAPIs,omitempty"`
- MinKubeVersion string `json:"minKubeVersion,omitempty"`
-
- // The name of the operator in display format.
- DisplayName string `json:"displayName"`
-
- // Description of the operator. Can include the features, limitations or use-cases of the
- // operator.
- // +optional
- Description string `json:"description,omitempty"`
-
- // A list of keywords describing the operator.
- // +optional
- Keywords []string `json:"keywords,omitempty"`
-
- // A list of organizational entities maintaining the operator.
- // +optional
- Maintainers []Maintainer `json:"maintainers,omitempty"`
-
- // The publishing entity behind the operator.
- // +optional
- Provider AppLink `json:"provider,omitempty"`
-
- // A list of links related to the operator.
- // +optional
- Links []AppLink `json:"links,omitempty"`
-
- // The icon for this operator.
- // +optional
- Icon []Icon `json:"icon,omitempty"`
-
- // InstallModes specify supported installation types
- // +optional
- InstallModes []InstallMode `json:"installModes,omitempty"`
-
- // The name of a CSV this one replaces. Should match the `metadata.Name` field of the old CSV.
- // +optional
- Replaces string `json:"replaces,omitempty"`
-
- // Map of string keys and values that can be used to organize and categorize
- // (scope and select) objects.
- // +optional
- Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
-
- // Annotations is an unstructured key value map stored with a resource that may be
- // set by external tools to store and retrieve arbitrary metadata.
- // +optional
- Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
-
- // Label selector for related resources.
- // +optional
- Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
-
- // Cleanup specifies the cleanup behaviour when the CSV gets deleted
- // +optional
- Cleanup CleanupSpec `json:"cleanup,omitempty"`
-
- // The name(s) of one or more CSV(s) that should be skipped in the upgrade graph.
- // Should match the `metadata.Name` field of the CSV that should be skipped.
- // This field is only used during catalog creation and plays no part in cluster runtime.
- // +optional
- Skips []string `json:"skips,omitempty"`
-
- // List any related images, or other container images that your Operator might require to perform their functions.
- // This list should also include operand images as well. All image references should be specified by
- // digest (SHA) and not by tag. This field is only used during catalog creation and plays no part in cluster runtime.
- // +optional
- RelatedImages []RelatedImage `json:"relatedImages,omitempty"`
-}
-
-// +k8s:openapi-gen=true
-type CleanupSpec struct {
- Enabled bool `json:"enabled"`
-}
-
-// +k8s:openapi-gen=true
-type Maintainer struct {
- Name string `json:"name,omitempty"`
- Email string `json:"email,omitempty"`
-}
-
-// +k8s:openapi-gen=true
-type AppLink struct {
- Name string `json:"name,omitempty"`
- URL string `json:"url,omitempty"`
-}
-
-// +k8s:openapi-gen=true
-type Icon struct {
- Data string `json:"base64data"`
- MediaType string `json:"mediatype"`
-}
-
-// +k8s:openapi-gen=true
-type RelatedImage struct {
- Name string `json:"name"`
- Image string `json:"image"`
-}
-
-// ClusterServiceVersionPhase is a label for the condition of a ClusterServiceVersion at the current time.
-type ClusterServiceVersionPhase string
-
-// These are the valid phases of ClusterServiceVersion
-const (
- CSVPhaseNone = ""
- // CSVPhasePending means the csv has been accepted by the system, but the install strategy has not been attempted.
- // This is likely because there are unmet requirements.
- CSVPhasePending ClusterServiceVersionPhase = "Pending"
- // CSVPhaseInstallReady means that the requirements are met but the install strategy has not been run.
- CSVPhaseInstallReady ClusterServiceVersionPhase = "InstallReady"
- // CSVPhaseInstalling means that the install strategy has been initiated but not completed.
- CSVPhaseInstalling ClusterServiceVersionPhase = "Installing"
- // CSVPhaseSucceeded means that the resources in the CSV were created successfully.
- CSVPhaseSucceeded ClusterServiceVersionPhase = "Succeeded"
- // CSVPhaseFailed means that the install strategy could not be successfully completed.
- CSVPhaseFailed ClusterServiceVersionPhase = "Failed"
- // CSVPhaseUnknown means that for some reason the state of the csv could not be obtained.
- CSVPhaseUnknown ClusterServiceVersionPhase = "Unknown"
- // CSVPhaseReplacing means that a newer CSV has been created and the csv's resources will be transitioned to a new owner.
- CSVPhaseReplacing ClusterServiceVersionPhase = "Replacing"
- // CSVPhaseDeleting means that a CSV has been replaced by a new one and will be checked for safety before being deleted
- CSVPhaseDeleting ClusterServiceVersionPhase = "Deleting"
- // CSVPhaseAny matches all other phases in CSV queries
- CSVPhaseAny ClusterServiceVersionPhase = ""
-)
-
-// ConditionReason is a camelcased reason for the state transition
-type ConditionReason string
-
-const (
- CSVReasonRequirementsUnknown ConditionReason = "RequirementsUnknown"
- CSVReasonRequirementsNotMet ConditionReason = "RequirementsNotMet"
- CSVReasonRequirementsMet ConditionReason = "AllRequirementsMet"
- CSVReasonOwnerConflict ConditionReason = "OwnerConflict"
- CSVReasonComponentFailed ConditionReason = "InstallComponentFailed"
- CSVReasonComponentFailedNoRetry ConditionReason = "InstallComponentFailedNoRetry"
- CSVReasonInvalidStrategy ConditionReason = "InvalidInstallStrategy"
- CSVReasonWaiting ConditionReason = "InstallWaiting"
- CSVReasonInstallSuccessful ConditionReason = "InstallSucceeded"
- CSVReasonInstallCheckFailed ConditionReason = "InstallCheckFailed"
- CSVReasonComponentUnhealthy ConditionReason = "ComponentUnhealthy"
- CSVReasonBeingReplaced ConditionReason = "BeingReplaced"
- CSVReasonReplaced ConditionReason = "Replaced"
- CSVReasonNeedsReinstall ConditionReason = "NeedsReinstall"
- CSVReasonNeedsCertRotation ConditionReason = "NeedsCertRotation"
- CSVReasonAPIServiceResourceIssue ConditionReason = "APIServiceResourceIssue"
- CSVReasonAPIServiceResourcesNeedReinstall ConditionReason = "APIServiceResourcesNeedReinstall"
- CSVReasonAPIServiceInstallFailed ConditionReason = "APIServiceInstallFailed"
- CSVReasonCopied ConditionReason = "Copied"
- CSVReasonInvalidInstallModes ConditionReason = "InvalidInstallModes"
- CSVReasonNoTargetNamespaces ConditionReason = "NoTargetNamespaces"
- CSVReasonUnsupportedOperatorGroup ConditionReason = "UnsupportedOperatorGroup"
- CSVReasonNoOperatorGroup ConditionReason = "NoOperatorGroup"
- CSVReasonTooManyOperatorGroups ConditionReason = "TooManyOperatorGroups"
- CSVReasonInterOperatorGroupOwnerConflict ConditionReason = "InterOperatorGroupOwnerConflict"
- CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs ConditionReason = "CannotModifyStaticOperatorGroupProvidedAPIs"
- CSVReasonDetectedClusterChange ConditionReason = "DetectedClusterChange"
- CSVReasonInvalidWebhookDescription ConditionReason = "InvalidWebhookDescription"
- CSVReasonOperatorConditionNotUpgradeable ConditionReason = "OperatorConditionNotUpgradeable"
- CSVReasonWaitingForCleanupToComplete ConditionReason = "WaitingOnCleanup"
-)
-
-// HasCaResources returns true if the CSV has owned APIServices or Webhooks.
-func (c *ClusterServiceVersion) HasCAResources() bool {
- // Return early if there are no owned APIServices
- if len(c.Spec.APIServiceDefinitions.Owned)+len(c.Spec.WebhookDefinitions) == 0 {
- return false
- }
- return true
-}
-
-// Conditions appear in the status as a record of state transitions on the ClusterServiceVersion
-// +k8s:openapi-gen=true
-type ClusterServiceVersionCondition struct {
- // Condition of the ClusterServiceVersion
- Phase ClusterServiceVersionPhase `json:"phase,omitempty"`
- // A human readable message indicating details about why the ClusterServiceVersion is in this condition.
- // +optional
- Message string `json:"message,omitempty"`
- // A brief CamelCase message indicating details about why the ClusterServiceVersion is in this state.
- // e.g. 'RequirementsNotMet'
- // +optional
- Reason ConditionReason `json:"reason,omitempty"`
- // Last time we updated the status
- // +optional
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- // Last time the status transitioned from one status to another.
- // +optional
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
-}
-
-// OwnsCRD determines whether the current CSV owns a particular CRD.
-func (csv ClusterServiceVersion) OwnsCRD(name string) bool {
- for _, desc := range csv.Spec.CustomResourceDefinitions.Owned {
- if desc.Name == name {
- return true
- }
- }
-
- return false
-}
-
-// OwnsAPIService determines whether the current CSV owns a particular APIService.
-func (csv ClusterServiceVersion) OwnsAPIService(name string) bool {
- for _, desc := range csv.Spec.APIServiceDefinitions.Owned {
- apiServiceName := fmt.Sprintf("%s.%s", desc.Version, desc.Group)
- if apiServiceName == name {
- return true
- }
- }
-
- return false
-}
-
-// StatusReason is a camelcased reason for the status of a RequirementStatus or DependentStatus
-type StatusReason string
-
-const (
- RequirementStatusReasonPresent StatusReason = "Present"
- RequirementStatusReasonNotPresent StatusReason = "NotPresent"
- RequirementStatusReasonPresentNotSatisfied StatusReason = "PresentNotSatisfied"
- // The CRD is present but the Established condition is False (not available)
- RequirementStatusReasonNotAvailable StatusReason = "PresentNotAvailable"
- DependentStatusReasonSatisfied StatusReason = "Satisfied"
- DependentStatusReasonNotSatisfied StatusReason = "NotSatisfied"
-)
-
-// DependentStatus is the status for a dependent requirement (to prevent infinite nesting)
-// +k8s:openapi-gen=true
-type DependentStatus struct {
- Group string `json:"group"`
- Version string `json:"version"`
- Kind string `json:"kind"`
- Status StatusReason `json:"status"`
- UUID string `json:"uuid,omitempty"`
- Message string `json:"message,omitempty"`
-}
-
-// +k8s:openapi-gen=true
-type RequirementStatus struct {
- Group string `json:"group"`
- Version string `json:"version"`
- Kind string `json:"kind"`
- Name string `json:"name"`
- Status StatusReason `json:"status"`
- Message string `json:"message"`
- UUID string `json:"uuid,omitempty"`
- Dependents []DependentStatus `json:"dependents,omitempty"`
-}
-
-// ClusterServiceVersionStatus represents information about the status of a CSV. Status may trail the actual
-// state of a system.
-// +k8s:openapi-gen=true
-type ClusterServiceVersionStatus struct {
- // Current condition of the ClusterServiceVersion
- Phase ClusterServiceVersionPhase `json:"phase,omitempty"`
- // A human readable message indicating details about why the ClusterServiceVersion is in this condition.
- // +optional
- Message string `json:"message,omitempty"`
- // A brief CamelCase message indicating details about why the ClusterServiceVersion is in this state.
- // e.g. 'RequirementsNotMet'
- // +optional
- Reason ConditionReason `json:"reason,omitempty"`
- // Last time we updated the status
- // +optional
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- // Last time the status transitioned from one status to another.
- // +optional
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- // List of conditions, a history of state transitions
- Conditions []ClusterServiceVersionCondition `json:"conditions,omitempty"`
- // The status of each requirement for this CSV
- RequirementStatus []RequirementStatus `json:"requirementStatus,omitempty"`
- // Last time the owned APIService certs were updated
- // +optional
- CertsLastUpdated *metav1.Time `json:"certsLastUpdated,omitempty"`
- // Time the owned APIService certs will rotate next
- // +optional
- CertsRotateAt *metav1.Time `json:"certsRotateAt,omitempty"`
- // CleanupStatus represents information about the status of cleanup while a CSV is pending deletion
- // +optional
- Cleanup CleanupStatus `json:"cleanup,omitempty"`
-}
-
-// CleanupStatus represents information about the status of cleanup while a CSV is pending deletion
-// +k8s:openapi-gen=true
-type CleanupStatus struct {
- // PendingDeletion is the list of custom resource objects that are pending deletion and blocked on finalizers.
- // This indicates the progress of cleanup that is blocking CSV deletion or operator uninstall.
- // +optional
- PendingDeletion []ResourceList `json:"pendingDeletion,omitempty"`
-}
-
-// ResourceList represents a list of resources which are of the same Group/Kind
-// +k8s:openapi-gen=true
-type ResourceList struct {
- Group string `json:"group"`
- Kind string `json:"kind"`
- Instances []ResourceInstance `json:"instances"`
-}
-
-// +k8s:openapi-gen=true
-type ResourceInstance struct {
- Name string `json:"name"`
- // Namespace can be empty for cluster-scoped resources
- Namespace string `json:"namespace,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +genclient
-// +kubebuilder:storageversion
-// +kubebuilder:resource:shortName={csv, csvs},categories=olm
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Display",type=string,JSONPath=`.spec.displayName`,description="The name of the CSV"
-// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`,description="The version of the CSV"
-// +kubebuilder:printcolumn:name="Replaces",type=string,JSONPath=`.spec.replaces`,description="The name of a CSV that this one replaces"
-// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
-
-// ClusterServiceVersion is a Custom Resource of type `ClusterServiceVersionSpec`.
-type ClusterServiceVersion struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- Spec ClusterServiceVersionSpec `json:"spec"`
- // +optional
- Status ClusterServiceVersionStatus `json:"status"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ClusterServiceVersionList represents a list of ClusterServiceVersions.
-type ClusterServiceVersionList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []ClusterServiceVersion `json:"items"`
-}
-
-// GetAllCRDDescriptions returns a deduplicated set of CRDDescriptions that is
-// the union of the owned and required CRDDescriptions.
-//
-// Descriptions with the same name prefer the value in Owned.
-// Descriptions are returned in alphabetical order.
-func (csv ClusterServiceVersion) GetAllCRDDescriptions() []CRDDescription {
- set := make(map[string]CRDDescription)
- for _, required := range csv.Spec.CustomResourceDefinitions.Required {
- set[required.Name] = required
- }
-
- for _, owned := range csv.Spec.CustomResourceDefinitions.Owned {
- set[owned.Name] = owned
- }
-
- keys := make([]string, 0)
- for key := range set {
- keys = append(keys, key)
- }
- sort.StringSlice(keys).Sort()
-
- descs := make([]CRDDescription, 0)
- for _, key := range keys {
- descs = append(descs, set[key])
- }
-
- return descs
-}
-
-// GetAllAPIServiceDescriptions returns a deduplicated set of APIServiceDescriptions that is
-// the union of the owned and required APIServiceDescriptions.
-//
-// Descriptions with the same name prefer the value in Owned.
-// Descriptions are returned in alphabetical order.
-func (csv ClusterServiceVersion) GetAllAPIServiceDescriptions() []APIServiceDescription {
- set := make(map[string]APIServiceDescription)
- for _, required := range csv.Spec.APIServiceDefinitions.Required {
- name := fmt.Sprintf("%s.%s", required.Version, required.Group)
- set[name] = required
- }
-
- for _, owned := range csv.Spec.APIServiceDefinitions.Owned {
- name := fmt.Sprintf("%s.%s", owned.Version, owned.Group)
- set[name] = owned
- }
-
- keys := make([]string, 0)
- for key := range set {
- keys = append(keys, key)
- }
- sort.StringSlice(keys).Sort()
-
- descs := make([]APIServiceDescription, 0)
- for _, key := range keys {
- descs = append(descs, set[key])
- }
-
- return descs
-}
-
-// GetRequiredAPIServiceDescriptions returns a deduplicated set of required APIServiceDescriptions
-// with the intersection of required and owned removed
-// Equivalent to the set subtraction required - owned
-//
-// Descriptions are returned in alphabetical order.
-func (csv ClusterServiceVersion) GetRequiredAPIServiceDescriptions() []APIServiceDescription {
- set := make(map[string]APIServiceDescription)
- for _, required := range csv.Spec.APIServiceDefinitions.Required {
- name := fmt.Sprintf("%s.%s", required.Version, required.Group)
- set[name] = required
- }
-
- // Remove any shared owned from the set
- for _, owned := range csv.Spec.APIServiceDefinitions.Owned {
- name := fmt.Sprintf("%s.%s", owned.Version, owned.Group)
- if _, ok := set[name]; ok {
- delete(set, name)
- }
- }
-
- keys := make([]string, 0)
- for key := range set {
- keys = append(keys, key)
- }
- sort.StringSlice(keys).Sort()
-
- descs := make([]APIServiceDescription, 0)
- for _, key := range keys {
- descs = append(descs, set[key])
- }
-
- return descs
-}
-
-// GetOwnedAPIServiceDescriptions returns a deduplicated set of owned APIServiceDescriptions
-//
-// Descriptions are returned in alphabetical order.
-func (csv ClusterServiceVersion) GetOwnedAPIServiceDescriptions() []APIServiceDescription {
- set := make(map[string]APIServiceDescription)
- for _, owned := range csv.Spec.APIServiceDefinitions.Owned {
- name := owned.GetName()
- set[name] = owned
- }
-
- keys := make([]string, 0)
- for key := range set {
- keys = append(keys, key)
- }
- sort.StringSlice(keys).Sort()
-
- descs := make([]APIServiceDescription, 0)
- for _, key := range keys {
- descs = append(descs, set[key])
- }
-
- return descs
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go
deleted file mode 100644
index 74bc9b819a40..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +groupName=operators.coreos.com
-// +k8s:deepcopy-gen=package
-// +k8s:conversion-gen=github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators
-
-// Package v1alpha1 contains resources types for version v1alpha1 of the operators.coreos.com API group.
-package v1alpha1
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go
deleted file mode 100644
index 09deba525b7c..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go
+++ /dev/null
@@ -1,389 +0,0 @@
-package v1alpha1
-
-import (
- "errors"
- "fmt"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
- InstallPlanKind = "InstallPlan"
- InstallPlanAPIVersion = GroupName + "/" + GroupVersion
-)
-
-// Approval is the user approval policy for an InstallPlan.
-// It must be one of "Automatic" or "Manual".
-type Approval string
-
-const (
- ApprovalAutomatic Approval = "Automatic"
- ApprovalManual Approval = "Manual"
-)
-
-// InstallPlanSpec defines a set of Application resources to be installed
-type InstallPlanSpec struct {
- CatalogSource string `json:"source,omitempty"`
- CatalogSourceNamespace string `json:"sourceNamespace,omitempty"`
- ClusterServiceVersionNames []string `json:"clusterServiceVersionNames"`
- Approval Approval `json:"approval"`
- Approved bool `json:"approved"`
- Generation int `json:"generation,omitempty"`
-}
-
-// InstallPlanPhase is the current status of a InstallPlan as a whole.
-type InstallPlanPhase string
-
-const (
- InstallPlanPhaseNone InstallPlanPhase = ""
- InstallPlanPhasePlanning InstallPlanPhase = "Planning"
- InstallPlanPhaseRequiresApproval InstallPlanPhase = "RequiresApproval"
- InstallPlanPhaseInstalling InstallPlanPhase = "Installing"
- InstallPlanPhaseComplete InstallPlanPhase = "Complete"
- InstallPlanPhaseFailed InstallPlanPhase = "Failed"
-)
-
-// InstallPlanConditionType describes the state of an InstallPlan at a certain point as a whole.
-type InstallPlanConditionType string
-
-const (
- InstallPlanResolved InstallPlanConditionType = "Resolved"
- InstallPlanInstalled InstallPlanConditionType = "Installed"
-)
-
-// ConditionReason is a camelcased reason for the state transition.
-type InstallPlanConditionReason string
-
-const (
- InstallPlanReasonPlanUnknown InstallPlanConditionReason = "PlanUnknown"
- InstallPlanReasonInstallCheckFailed InstallPlanConditionReason = "InstallCheckFailed"
- InstallPlanReasonDependencyConflict InstallPlanConditionReason = "DependenciesConflict"
- InstallPlanReasonComponentFailed InstallPlanConditionReason = "InstallComponentFailed"
-)
-
-// StepStatus is the current status of a particular resource an in
-// InstallPlan
-type StepStatus string
-
-const (
- StepStatusUnknown StepStatus = "Unknown"
- StepStatusNotPresent StepStatus = "NotPresent"
- StepStatusPresent StepStatus = "Present"
- StepStatusCreated StepStatus = "Created"
- StepStatusNotCreated StepStatus = "NotCreated"
- StepStatusWaitingForAPI StepStatus = "WaitingForApi"
- StepStatusUnsupportedResource StepStatus = "UnsupportedResource"
-)
-
-// ErrInvalidInstallPlan is the error returned by functions that operate on
-// InstallPlans when the InstallPlan does not contain totally valid data.
-var ErrInvalidInstallPlan = errors.New("the InstallPlan contains invalid data")
-
-// InstallPlanStatus represents the information about the status of
-// steps required to complete installation.
-//
-// Status may trail the actual state of a system.
-type InstallPlanStatus struct {
- Phase InstallPlanPhase `json:"phase"`
- Conditions []InstallPlanCondition `json:"conditions,omitempty"`
- CatalogSources []string `json:"catalogSources"`
- Plan []*Step `json:"plan,omitempty"`
- // BundleLookups is the set of in-progress requests to pull and unpackage bundle content to the cluster.
- // +optional
- BundleLookups []BundleLookup `json:"bundleLookups,omitempty"`
- // AttenuatedServiceAccountRef references the service account that is used
- // to do scoped operator install.
- AttenuatedServiceAccountRef *corev1.ObjectReference `json:"attenuatedServiceAccountRef,omitempty"`
-
- // StartTime is the time when the controller began applying
- // the resources listed in the plan to the cluster.
- // +optional
- StartTime *metav1.Time `json:"startTime,omitempty"`
-
- // Message is a human-readable message containing detailed
- // information that may be important to understanding why the
- // plan has its current status.
- // +optional
- Message string `json:"message,omitempty"`
-}
-
-// InstallPlanCondition represents the overall status of the execution of
-// an InstallPlan.
-type InstallPlanCondition struct {
- Type InstallPlanConditionType `json:"type,omitempty"`
- Status corev1.ConditionStatus `json:"status,omitempty"` // True, False, or Unknown
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason InstallPlanConditionReason `json:"reason,omitempty"`
- Message string `json:"message,omitempty"`
-}
-
-// allow overwriting `now` function for deterministic tests
-var now = metav1.Now
-
-// GetCondition returns the InstallPlanCondition of the given type if it exists in the InstallPlanStatus' Conditions.
-// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found.
-func (s InstallPlanStatus) GetCondition(conditionType InstallPlanConditionType) InstallPlanCondition {
- for _, cond := range s.Conditions {
- if cond.Type == conditionType {
- return cond
- }
- }
-
- return InstallPlanCondition{
- Type: conditionType,
- Status: corev1.ConditionUnknown,
- }
-}
-
-// SetCondition adds or updates a condition, using `Type` as merge key.
-func (s *InstallPlanStatus) SetCondition(cond InstallPlanCondition) InstallPlanCondition {
- for i, existing := range s.Conditions {
- if existing.Type != cond.Type {
- continue
- }
- if existing.Status == cond.Status {
- cond.LastTransitionTime = existing.LastTransitionTime
- }
- s.Conditions[i] = cond
- return cond
- }
- s.Conditions = append(s.Conditions, cond)
- return cond
-}
-
-func OrderSteps(steps []*Step) []*Step {
- // CSVs must be applied first
- csvList := []*Step{}
-
- // CRDs must be applied second
- crdList := []*Step{}
-
- // Other resources may be applied in any order
- remainingResources := []*Step{}
- for _, step := range steps {
- switch step.Resource.Kind {
- case crdKind:
- crdList = append(crdList, step)
- case ClusterServiceVersionKind:
- csvList = append(csvList, step)
- default:
- remainingResources = append(remainingResources, step)
- }
- }
-
- result := make([]*Step, len(steps))
- i := 0
-
- for j := range csvList {
- result[i] = csvList[j]
- i++
- }
-
- for j := range crdList {
- result[i] = crdList[j]
- i++
- }
-
- for j := range remainingResources {
- result[i] = remainingResources[j]
- i++
- }
-
- return result
-}
-
-func (s InstallPlanStatus) NeedsRequeue() bool {
- for _, step := range s.Plan {
- switch step.Status {
- case StepStatusWaitingForAPI:
- return true
- }
- }
-
- return false
-}
-func ConditionFailed(cond InstallPlanConditionType, reason InstallPlanConditionReason, message string, now *metav1.Time) InstallPlanCondition {
- return InstallPlanCondition{
- Type: cond,
- Status: corev1.ConditionFalse,
- Reason: reason,
- Message: message,
- LastUpdateTime: now,
- LastTransitionTime: now,
- }
-}
-
-func ConditionMet(cond InstallPlanConditionType, now *metav1.Time) InstallPlanCondition {
- return InstallPlanCondition{
- Type: cond,
- Status: corev1.ConditionTrue,
- LastUpdateTime: now,
- LastTransitionTime: now,
- }
-}
-
-// Step represents the status of an individual step in an InstallPlan.
-type Step struct {
- Resolving string `json:"resolving"`
- Resource StepResource `json:"resource"`
- Optional bool `json:"optional,omitempty"`
- Status StepStatus `json:"status"`
-}
-
-// BundleLookupConditionType is a category of the overall state of a BundleLookup.
-type BundleLookupConditionType string
-
-const (
- // BundleLookupPending describes BundleLookups that are not complete.
- BundleLookupPending BundleLookupConditionType = "BundleLookupPending"
-
- // BundleLookupFailed describes conditions types for when BundleLookups fail
- BundleLookupFailed BundleLookupConditionType = "BundleLookupFailed"
-
- crdKind = "CustomResourceDefinition"
-)
-
-type BundleLookupCondition struct {
- // Type of condition.
- Type BundleLookupConditionType `json:"type"`
- // Status of the condition, one of True, False, Unknown.
- Status corev1.ConditionStatus `json:"status"`
- // The reason for the condition's last transition.
- // +optional
- Reason string `json:"reason,omitempty"`
- // A human readable message indicating details about the transition.
- // +optional
- Message string `json:"message,omitempty"`
- // Last time the condition was probed.
- // +optional
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- // Last time the condition transitioned from one status to another.
- // +optional
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
-}
-
-// BundleLookup is a request to pull and unpackage the content of a bundle to the cluster.
-type BundleLookup struct {
- // Path refers to the location of a bundle to pull.
- // It's typically an image reference.
- Path string `json:"path"`
- // Identifier is the catalog-unique name of the operator (the name of the CSV for bundles that contain CSVs)
- Identifier string `json:"identifier"`
- // Replaces is the name of the bundle to replace with the one found at Path.
- Replaces string `json:"replaces"`
- // CatalogSourceRef is a reference to the CatalogSource the bundle path was resolved from.
- CatalogSourceRef *corev1.ObjectReference `json:"catalogSourceRef"`
- // Conditions represents the overall state of a BundleLookup.
- // +optional
- Conditions []BundleLookupCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
- // The effective properties of the unpacked bundle.
- // +optional
- Properties string `json:"properties,omitempty"`
-}
-
-// GetCondition returns the BundleLookupCondition of the given type if it exists in the BundleLookup's Conditions.
-// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found.
-func (b BundleLookup) GetCondition(conditionType BundleLookupConditionType) BundleLookupCondition {
- for _, cond := range b.Conditions {
- if cond.Type == conditionType {
- return cond
- }
- }
-
- return BundleLookupCondition{
- Type: conditionType,
- Status: corev1.ConditionUnknown,
- }
-}
-
-// RemoveCondition removes the BundleLookupCondition of the given type from the BundleLookup's Conditions if it exists.
-func (b *BundleLookup) RemoveCondition(conditionType BundleLookupConditionType) {
- for i, cond := range b.Conditions {
- if cond.Type == conditionType {
- b.Conditions = append(b.Conditions[:i], b.Conditions[i+1:]...)
- if len(b.Conditions) == 0 {
- b.Conditions = nil
- }
- return
- }
- }
-}
-
-// SetCondition replaces the existing BundleLookupCondition of the same type, or adds it if it was not found.
-func (b *BundleLookup) SetCondition(cond BundleLookupCondition) BundleLookupCondition {
- for i, existing := range b.Conditions {
- if existing.Type != cond.Type {
- continue
- }
- if existing.Status == cond.Status {
- cond.LastTransitionTime = existing.LastTransitionTime
- }
- b.Conditions[i] = cond
- return cond
- }
- b.Conditions = append(b.Conditions, cond)
-
- return cond
-}
-
-func (s *Step) String() string {
- return fmt.Sprintf("%s: %s (%s)", s.Resolving, s.Resource, s.Status)
-}
-
-// StepResource represents the status of a resource to be tracked by an
-// InstallPlan.
-type StepResource struct {
- CatalogSource string `json:"sourceName"`
- CatalogSourceNamespace string `json:"sourceNamespace"`
- Group string `json:"group"`
- Version string `json:"version"`
- Kind string `json:"kind"`
- Name string `json:"name"`
- Manifest string `json:"manifest,omitempty"`
-}
-
-func (r StepResource) String() string {
- return fmt.Sprintf("%s[%s/%s/%s (%s/%s)]", r.Name, r.Group, r.Version, r.Kind, r.CatalogSource, r.CatalogSourceNamespace)
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +genclient
-// +kubebuilder:resource:shortName=ip,categories=olm
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="CSV",type=string,JSONPath=`.spec.clusterServiceVersionNames[0]`,description="The first CSV in the list of clusterServiceVersionNames"
-// +kubebuilder:printcolumn:name="Approval",type=string,JSONPath=`.spec.approval`,description="The approval mode"
-// +kubebuilder:printcolumn:name="Approved",type=boolean,JSONPath=`.spec.approved`
-
-// InstallPlan defines the installation of a set of operators.
-type InstallPlan struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- Spec InstallPlanSpec `json:"spec"`
- // +optional
- Status InstallPlanStatus `json:"status"`
-}
-
-// EnsureCatalogSource ensures that a CatalogSource is present in the Status
-// block of an InstallPlan.
-func (p *InstallPlan) EnsureCatalogSource(sourceName string) {
- for _, srcName := range p.Status.CatalogSources {
- if srcName == sourceName {
- return
- }
- }
-
- p.Status.CatalogSources = append(p.Status.CatalogSources, sourceName)
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// InstallPlanList is a list of InstallPlan resources.
-type InstallPlanList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []InstallPlan `json:"items"`
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go
deleted file mode 100644
index f1cd86f1a372..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package v1alpha1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-
- "github.com/operator-framework/api/pkg/operators"
-)
-
-const (
- // GroupName is the group name used in this package.
- GroupName = operators.GroupName
- // GroupVersion is the group version used in this package.
- GroupVersion = "v1alpha1"
-)
-
-// SchemeGroupVersion is group version used to register these objects
-var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
-
-// Kind takes an unqualified kind and returns back a Group qualified GroupKind
-func Kind(kind string) schema.GroupKind {
- return SchemeGroupVersion.WithKind(kind).GroupKind()
-}
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
-func Resource(resource string) schema.GroupResource {
- return SchemeGroupVersion.WithResource(resource).GroupResource()
-}
-
-var (
- // SchemeBuilder initializes a scheme builder
- SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
- // AddToScheme is a global function that registers this API group & version to a scheme
- AddToScheme = SchemeBuilder.AddToScheme
-
- // localSchemeBuilder is expected by generated conversion functions
- localSchemeBuilder = &SchemeBuilder
-)
-
-// addKnownTypes adds the list of known types to Scheme
-func addKnownTypes(scheme *runtime.Scheme) error {
- scheme.AddKnownTypes(SchemeGroupVersion,
- &CatalogSource{},
- &CatalogSourceList{},
- &InstallPlan{},
- &InstallPlanList{},
- &Subscription{},
- &SubscriptionList{},
- &ClusterServiceVersion{},
- &ClusterServiceVersionList{},
- )
- metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
- return nil
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go
deleted file mode 100644
index 292fedf9b989..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go
+++ /dev/null
@@ -1,360 +0,0 @@
-package v1alpha1
-
-import (
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
-)
-
-const (
- SubscriptionKind = "Subscription"
- SubscriptionCRDAPIVersion = GroupName + "/" + GroupVersion
-)
-
-// SubscriptionState tracks when updates are available, installing, or service is up to date
-type SubscriptionState string
-
-const (
- SubscriptionStateNone = ""
- SubscriptionStateFailed = "UpgradeFailed"
- SubscriptionStateUpgradeAvailable = "UpgradeAvailable"
- SubscriptionStateUpgradePending = "UpgradePending"
- SubscriptionStateAtLatest = "AtLatestKnown"
-)
-
-const (
- SubscriptionReasonInvalidCatalog ConditionReason = "InvalidCatalog"
- SubscriptionReasonUpgradeSucceeded ConditionReason = "UpgradeSucceeded"
-)
-
-// SubscriptionSpec defines an Application that can be installed
-type SubscriptionSpec struct {
- CatalogSource string `json:"source"`
- CatalogSourceNamespace string `json:"sourceNamespace"`
- Package string `json:"name"`
- Channel string `json:"channel,omitempty"`
- StartingCSV string `json:"startingCSV,omitempty"`
- InstallPlanApproval Approval `json:"installPlanApproval,omitempty"`
- Config *SubscriptionConfig `json:"config,omitempty"`
-}
-
-// SubscriptionConfig contains configuration specified for a subscription.
-type SubscriptionConfig struct {
- // Selector is the label selector for pods to be configured.
- // Existing ReplicaSets whose pods are
- // selected by this will be the ones affected by this deployment.
- // It must match the pod template's labels.
- Selector *metav1.LabelSelector `json:"selector,omitempty"`
-
- // NodeSelector is a selector which must be true for the pod to fit on a node.
- // Selector which must match a node's labels for the pod to be scheduled on that node.
- // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
- // +optional
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
-
- // Tolerations are the pod's tolerations.
- // +optional
- Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
-
- // Resources represents compute resources required by this container.
- // Immutable.
- // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
- // +optional
- Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
-
- // EnvFrom is a list of sources to populate environment variables in the container.
- // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
- // will be reported as an event when the container is starting. When a key exists in multiple
- // sources, the value associated with the last source will take precedence.
- // Values defined by an Env with a duplicate key will take precedence.
- // Immutable.
- // +optional
- EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"`
- // Env is a list of environment variables to set in the container.
- // Cannot be updated.
- // +patchMergeKey=name
- // +patchStrategy=merge
- // +optional
- Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge"`
-
- // List of Volumes to set in the podSpec.
- // +optional
- Volumes []corev1.Volume `json:"volumes,omitempty"`
-
- // List of VolumeMounts to set in the container.
- // +optional
- VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
-
- // If specified, overrides the pod's scheduling constraints.
- // nil sub-attributes will *not* override the original values in the pod.spec for those sub-attributes.
- // Use empty object ({}) to erase original sub-attribute values.
- // +optional
- Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
-
- // Annotations is an unstructured key value map stored with each Deployment, Pod, APIService in the Operator.
- // Typically, annotations may be set by external tools to store and retrieve arbitrary metadata.
- // Use this field to pre-define annotations that OLM should add to each of the Subscription's
- // deployments, pods, and apiservices.
- // +optional
- Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
-}
-
-// SubscriptionConditionType indicates an explicit state condition about a Subscription in "abnormal-true"
-// polarity form (see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties).
-type SubscriptionConditionType string
-
-const (
- // SubscriptionCatalogSourcesUnhealthy indicates that some or all of the CatalogSources to be used in resolution are unhealthy.
- SubscriptionCatalogSourcesUnhealthy SubscriptionConditionType = "CatalogSourcesUnhealthy"
-
- // SubscriptionInstallPlanMissing indicates that a Subscription's InstallPlan is missing.
- SubscriptionInstallPlanMissing SubscriptionConditionType = "InstallPlanMissing"
-
- // SubscriptionInstallPlanPending indicates that a Subscription's InstallPlan is pending installation.
- SubscriptionInstallPlanPending SubscriptionConditionType = "InstallPlanPending"
-
- // SubscriptionInstallPlanFailed indicates that the installation of a Subscription's InstallPlan has failed.
- SubscriptionInstallPlanFailed SubscriptionConditionType = "InstallPlanFailed"
-
- // SubscriptionResolutionFailed indicates that the dependency resolution in the namespace in which the subscription is created has failed
- SubscriptionResolutionFailed SubscriptionConditionType = "ResolutionFailed"
-
- // SubscriptionBundleUnpacking indicates that the unpack job is currently running
- SubscriptionBundleUnpacking SubscriptionConditionType = "BundleUnpacking"
-
- // SubscriptionBundleUnpackFailed indicates that the unpack job failed
- SubscriptionBundleUnpackFailed SubscriptionConditionType = "BundleUnpackFailed"
-
- // SubscriptionDeprecated is a roll-up condition which indicates that the Operator currently installed with this Subscription
- //has been deprecated. It will be present when any of the three deprecation types (Package, Channel, Bundle) are present.
- SubscriptionDeprecated SubscriptionConditionType = "Deprecated"
-
- // SubscriptionOperatorDeprecated indicates that the Package currently installed with this Subscription has been deprecated.
- SubscriptionPackageDeprecated SubscriptionConditionType = "PackageDeprecated"
-
- // SubscriptionOperatorDeprecated indicates that the Channel used with this Subscription has been deprecated.
- SubscriptionChannelDeprecated SubscriptionConditionType = "ChannelDeprecated"
-
- // SubscriptionOperatorDeprecated indicates that the Bundle currently installed with this Subscription has been deprecated.
- SubscriptionBundleDeprecated SubscriptionConditionType = "BundleDeprecated"
-)
-
-const (
- // NoCatalogSourcesFound is a reason string for Subscriptions with unhealthy CatalogSources due to none being available.
- NoCatalogSourcesFound = "NoCatalogSourcesFound"
-
- // AllCatalogSourcesHealthy is a reason string for Subscriptions that transitioned due to all CatalogSources being healthy.
- AllCatalogSourcesHealthy = "AllCatalogSourcesHealthy"
-
- // CatalogSourcesAdded is a reason string for Subscriptions that transitioned due to CatalogSources being added.
- CatalogSourcesAdded = "CatalogSourcesAdded"
-
- // CatalogSourcesUpdated is a reason string for Subscriptions that transitioned due to CatalogSource being updated.
- CatalogSourcesUpdated = "CatalogSourcesUpdated"
-
- // CatalogSourcesDeleted is a reason string for Subscriptions that transitioned due to CatalogSources being removed.
- CatalogSourcesDeleted = "CatalogSourcesDeleted"
-
- // UnhealthyCatalogSourceFound is a reason string for Subscriptions that transitioned because an unhealthy CatalogSource was found.
- UnhealthyCatalogSourceFound = "UnhealthyCatalogSourceFound"
-
- // ReferencedInstallPlanNotFound is a reason string for Subscriptions that transitioned due to a referenced InstallPlan not being found.
- ReferencedInstallPlanNotFound = "ReferencedInstallPlanNotFound"
-
- // InstallPlanNotYetReconciled is a reason string for Subscriptions that transitioned due to a referenced InstallPlan not being reconciled yet.
- InstallPlanNotYetReconciled = "InstallPlanNotYetReconciled"
-
- // InstallPlanFailed is a reason string for Subscriptions that transitioned due to a referenced InstallPlan failing without setting an explicit failure condition.
- InstallPlanFailed = "InstallPlanFailed"
-)
-
-// SubscriptionCondition represents the latest available observations of a Subscription's state.
-type SubscriptionCondition struct {
- // Type is the type of Subscription condition.
- Type SubscriptionConditionType `json:"type" description:"type of Subscription condition"`
-
- // Status is the status of the condition, one of True, False, Unknown.
- Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
-
- // Reason is a one-word CamelCase reason for the condition's last transition.
- // +optional
- Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"`
-
- // Message is a human-readable message indicating details about last transition.
- // +optional
- Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"`
-
- // LastHeartbeatTime is the last time we got an update on a given condition
- // +optional
- LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty" description:"last time we got an update on a given condition"`
-
- // LastTransitionTime is the last time the condition transit from one status to another
- // +optional
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another" hash:"ignore"`
-}
-
-// Equals returns true if a SubscriptionCondition equals the one given, false otherwise.
-// Equality is determined by the equality of the type, status, reason, and message fields ONLY.
-func (s SubscriptionCondition) Equals(condition SubscriptionCondition) bool {
- return s.Type == condition.Type && s.Status == condition.Status && s.Reason == condition.Reason && s.Message == condition.Message
-}
-
-type SubscriptionStatus struct {
- // CurrentCSV is the CSV the Subscription is progressing to.
- // +optional
- CurrentCSV string `json:"currentCSV,omitempty"`
-
- // InstalledCSV is the CSV currently installed by the Subscription.
- // +optional
- InstalledCSV string `json:"installedCSV,omitempty"`
-
- // Install is a reference to the latest InstallPlan generated for the Subscription.
- // DEPRECATED: InstallPlanRef
- // +optional
- Install *InstallPlanReference `json:"installplan,omitempty"`
-
- // State represents the current state of the Subscription
- // +optional
- State SubscriptionState `json:"state,omitempty"`
-
- // Reason is the reason the Subscription was transitioned to its current state.
- // +optional
- Reason ConditionReason `json:"reason,omitempty"`
-
- // InstallPlanGeneration is the current generation of the installplan
- // +optional
- InstallPlanGeneration int `json:"installPlanGeneration,omitempty"`
-
- // InstallPlanRef is a reference to the latest InstallPlan that contains the Subscription's current CSV.
- // +optional
- InstallPlanRef *corev1.ObjectReference `json:"installPlanRef,omitempty"`
-
- // CatalogHealth contains the Subscription's view of its relevant CatalogSources' status.
- // It is used to determine SubscriptionStatusConditions related to CatalogSources.
- // +optional
- CatalogHealth []SubscriptionCatalogHealth `json:"catalogHealth,omitempty"`
-
- // Conditions is a list of the latest available observations about a Subscription's current state.
- // +optional
- Conditions []SubscriptionCondition `json:"conditions,omitempty" hash:"set"`
-
- // LastUpdated represents the last time that the Subscription status was updated.
- LastUpdated metav1.Time `json:"lastUpdated"`
-}
-
-// GetCondition returns the SubscriptionCondition of the given type if it exists in the SubscriptionStatus' Conditions.
-// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found.
-func (s SubscriptionStatus) GetCondition(conditionType SubscriptionConditionType) SubscriptionCondition {
- for _, cond := range s.Conditions {
- if cond.Type == conditionType {
- return cond
- }
- }
-
- return SubscriptionCondition{
- Type: conditionType,
- Status: corev1.ConditionUnknown,
- }
-}
-
-// SetCondition sets the given SubscriptionCondition in the SubscriptionStatus' Conditions.
-func (s *SubscriptionStatus) SetCondition(condition SubscriptionCondition) {
- for i, cond := range s.Conditions {
- if cond.Type == condition.Type {
- s.Conditions[i] = condition
- return
- }
- }
-
- s.Conditions = append(s.Conditions, condition)
-}
-
-// RemoveConditions removes any conditions of the given types from the SubscriptionStatus' Conditions.
-func (s *SubscriptionStatus) RemoveConditions(remove ...SubscriptionConditionType) {
- exclusions := map[SubscriptionConditionType]struct{}{}
- for _, r := range remove {
- exclusions[r] = struct{}{}
- }
-
- var filtered []SubscriptionCondition
- for _, cond := range s.Conditions {
- if _, ok := exclusions[cond.Type]; ok {
- // Skip excluded condition types
- continue
- }
- filtered = append(filtered, cond)
- }
-
- s.Conditions = filtered
-}
-
-type InstallPlanReference struct {
- APIVersion string `json:"apiVersion"`
- Kind string `json:"kind"`
- Name string `json:"name"`
- UID types.UID `json:"uuid"`
-}
-
-// SubscriptionCatalogHealth describes the health of a CatalogSource the Subscription knows about.
-type SubscriptionCatalogHealth struct {
- // CatalogSourceRef is a reference to a CatalogSource.
- CatalogSourceRef *corev1.ObjectReference `json:"catalogSourceRef"`
-
- // LastUpdated represents the last time that the CatalogSourceHealth changed
- LastUpdated *metav1.Time `json:"lastUpdated"`
-
- // Healthy is true if the CatalogSource is healthy; false otherwise.
- Healthy bool `json:"healthy"`
-}
-
-// Equals returns true if a SubscriptionCatalogHealth equals the one given, false otherwise.
-// Equality is based SOLEY on health and UID.
-func (s SubscriptionCatalogHealth) Equals(health SubscriptionCatalogHealth) bool {
- return s.Healthy == health.Healthy && s.CatalogSourceRef.UID == health.CatalogSourceRef.UID
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +genclient
-// +kubebuilder:resource:shortName={sub, subs},categories=olm
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Package",type=string,JSONPath=`.spec.name`,description="The package subscribed to"
-// +kubebuilder:printcolumn:name="Source",type=string,JSONPath=`.spec.source`,description="The catalog source for the specified package"
-// +kubebuilder:printcolumn:name="Channel",type=string,JSONPath=`.spec.channel`,description="The channel of updates to subscribe to"
-
-// Subscription keeps operators up to date by tracking changes to Catalogs.
-type Subscription struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- Spec *SubscriptionSpec `json:"spec"`
- // +optional
- Status SubscriptionStatus `json:"status"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// SubscriptionList is a list of Subscription resources.
-type SubscriptionList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Subscription `json:"items"`
-}
-
-// GetInstallPlanApproval gets the configured install plan approval or the default
-func (s *Subscription) GetInstallPlanApproval() Approval {
- if s.Spec.InstallPlanApproval == ApprovalManual {
- return ApprovalManual
- }
- return ApprovalAutomatic
-}
-
-// NewInstallPlanReference returns an InstallPlanReference for the given ObjectReference.
-func NewInstallPlanReference(ref *corev1.ObjectReference) *InstallPlanReference {
- return &InstallPlanReference{
- APIVersion: ref.APIVersion,
- Kind: ref.Kind,
- Name: ref.Name,
- UID: ref.UID,
- }
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go
deleted file mode 100644
index 684a7432a6e5..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,1632 +0,0 @@
-//go:build !ignore_autogenerated
-
-/*
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by controller-gen. DO NOT EDIT.
-
-package v1alpha1
-
-import (
- "encoding/json"
- admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- "k8s.io/api/core/v1"
- rbacv1 "k8s.io/api/rbac/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/intstr"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIResourceReference) DeepCopyInto(out *APIResourceReference) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceReference.
-func (in *APIResourceReference) DeepCopy() *APIResourceReference {
- if in == nil {
- return nil
- }
- out := new(APIResourceReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServiceDefinitions) DeepCopyInto(out *APIServiceDefinitions) {
- *out = *in
- if in.Owned != nil {
- in, out := &in.Owned, &out.Owned
- *out = make([]APIServiceDescription, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Required != nil {
- in, out := &in.Required, &out.Required
- *out = make([]APIServiceDescription, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDefinitions.
-func (in *APIServiceDefinitions) DeepCopy() *APIServiceDefinitions {
- if in == nil {
- return nil
- }
- out := new(APIServiceDefinitions)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServiceDescription) DeepCopyInto(out *APIServiceDescription) {
- *out = *in
- if in.Resources != nil {
- in, out := &in.Resources, &out.Resources
- *out = make([]APIResourceReference, len(*in))
- copy(*out, *in)
- }
- if in.StatusDescriptors != nil {
- in, out := &in.StatusDescriptors, &out.StatusDescriptors
- *out = make([]StatusDescriptor, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.SpecDescriptors != nil {
- in, out := &in.SpecDescriptors, &out.SpecDescriptors
- *out = make([]SpecDescriptor, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.ActionDescriptor != nil {
- in, out := &in.ActionDescriptor, &out.ActionDescriptor
- *out = make([]ActionDescriptor, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDescription.
-func (in *APIServiceDescription) DeepCopy() *APIServiceDescription {
- if in == nil {
- return nil
- }
- out := new(APIServiceDescription)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ActionDescriptor) DeepCopyInto(out *ActionDescriptor) {
- *out = *in
- if in.XDescriptors != nil {
- in, out := &in.XDescriptors, &out.XDescriptors
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Value != nil {
- in, out := &in.Value, &out.Value
- *out = make(json.RawMessage, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDescriptor.
-func (in *ActionDescriptor) DeepCopy() *ActionDescriptor {
- if in == nil {
- return nil
- }
- out := new(ActionDescriptor)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AppLink) DeepCopyInto(out *AppLink) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppLink.
-func (in *AppLink) DeepCopy() *AppLink {
- if in == nil {
- return nil
- }
- out := new(AppLink)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BundleLookup) DeepCopyInto(out *BundleLookup) {
- *out = *in
- if in.CatalogSourceRef != nil {
- in, out := &in.CatalogSourceRef, &out.CatalogSourceRef
- *out = new(v1.ObjectReference)
- **out = **in
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]BundleLookupCondition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookup.
-func (in *BundleLookup) DeepCopy() *BundleLookup {
- if in == nil {
- return nil
- }
- out := new(BundleLookup)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BundleLookupCondition) DeepCopyInto(out *BundleLookupCondition) {
- *out = *in
- if in.LastUpdateTime != nil {
- in, out := &in.LastUpdateTime, &out.LastUpdateTime
- *out = (*in).DeepCopy()
- }
- if in.LastTransitionTime != nil {
- in, out := &in.LastTransitionTime, &out.LastTransitionTime
- *out = (*in).DeepCopy()
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookupCondition.
-func (in *BundleLookupCondition) DeepCopy() *BundleLookupCondition {
- if in == nil {
- return nil
- }
- out := new(BundleLookupCondition)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CRDDescription) DeepCopyInto(out *CRDDescription) {
- *out = *in
- if in.Resources != nil {
- in, out := &in.Resources, &out.Resources
- *out = make([]APIResourceReference, len(*in))
- copy(*out, *in)
- }
- if in.StatusDescriptors != nil {
- in, out := &in.StatusDescriptors, &out.StatusDescriptors
- *out = make([]StatusDescriptor, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.SpecDescriptors != nil {
- in, out := &in.SpecDescriptors, &out.SpecDescriptors
- *out = make([]SpecDescriptor, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.ActionDescriptor != nil {
- in, out := &in.ActionDescriptor, &out.ActionDescriptor
- *out = make([]ActionDescriptor, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDDescription.
-func (in *CRDDescription) DeepCopy() *CRDDescription {
- if in == nil {
- return nil
- }
- out := new(CRDDescription)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CatalogSource) DeepCopyInto(out *CatalogSource) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource.
-func (in *CatalogSource) DeepCopy() *CatalogSource {
- if in == nil {
- return nil
- }
- out := new(CatalogSource)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *CatalogSource) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CatalogSourceList) DeepCopyInto(out *CatalogSourceList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]CatalogSource, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceList.
-func (in *CatalogSourceList) DeepCopy() *CatalogSourceList {
- if in == nil {
- return nil
- }
- out := new(CatalogSourceList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *CatalogSourceList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) {
- *out = *in
- if in.GrpcPodConfig != nil {
- in, out := &in.GrpcPodConfig, &out.GrpcPodConfig
- *out = new(GrpcPodConfig)
- (*in).DeepCopyInto(*out)
- }
- if in.UpdateStrategy != nil {
- in, out := &in.UpdateStrategy, &out.UpdateStrategy
- *out = new(UpdateStrategy)
- (*in).DeepCopyInto(*out)
- }
- if in.Secrets != nil {
- in, out := &in.Secrets, &out.Secrets
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.Icon = in.Icon
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceSpec.
-func (in *CatalogSourceSpec) DeepCopy() *CatalogSourceSpec {
- if in == nil {
- return nil
- }
- out := new(CatalogSourceSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) {
- *out = *in
- if in.LatestImageRegistryPoll != nil {
- in, out := &in.LatestImageRegistryPoll, &out.LatestImageRegistryPoll
- *out = (*in).DeepCopy()
- }
- if in.ConfigMapResource != nil {
- in, out := &in.ConfigMapResource, &out.ConfigMapResource
- *out = new(ConfigMapResourceReference)
- (*in).DeepCopyInto(*out)
- }
- if in.RegistryServiceStatus != nil {
- in, out := &in.RegistryServiceStatus, &out.RegistryServiceStatus
- *out = new(RegistryServiceStatus)
- (*in).DeepCopyInto(*out)
- }
- if in.GRPCConnectionState != nil {
- in, out := &in.GRPCConnectionState, &out.GRPCConnectionState
- *out = new(GRPCConnectionState)
- (*in).DeepCopyInto(*out)
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]metav1.Condition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceStatus.
-func (in *CatalogSourceStatus) DeepCopy() *CatalogSourceStatus {
- if in == nil {
- return nil
- }
- out := new(CatalogSourceStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CleanupSpec) DeepCopyInto(out *CleanupSpec) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupSpec.
-func (in *CleanupSpec) DeepCopy() *CleanupSpec {
- if in == nil {
- return nil
- }
- out := new(CleanupSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CleanupStatus) DeepCopyInto(out *CleanupStatus) {
- *out = *in
- if in.PendingDeletion != nil {
- in, out := &in.PendingDeletion, &out.PendingDeletion
- *out = make([]ResourceList, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupStatus.
-func (in *CleanupStatus) DeepCopy() *CleanupStatus {
- if in == nil {
- return nil
- }
- out := new(CleanupStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterServiceVersion) DeepCopyInto(out *ClusterServiceVersion) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersion.
-func (in *ClusterServiceVersion) DeepCopy() *ClusterServiceVersion {
- if in == nil {
- return nil
- }
- out := new(ClusterServiceVersion)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterServiceVersion) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterServiceVersionCondition) DeepCopyInto(out *ClusterServiceVersionCondition) {
- *out = *in
- if in.LastUpdateTime != nil {
- in, out := &in.LastUpdateTime, &out.LastUpdateTime
- *out = (*in).DeepCopy()
- }
- if in.LastTransitionTime != nil {
- in, out := &in.LastTransitionTime, &out.LastTransitionTime
- *out = (*in).DeepCopy()
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionCondition.
-func (in *ClusterServiceVersionCondition) DeepCopy() *ClusterServiceVersionCondition {
- if in == nil {
- return nil
- }
- out := new(ClusterServiceVersionCondition)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterServiceVersionList) DeepCopyInto(out *ClusterServiceVersionList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]ClusterServiceVersion, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionList.
-func (in *ClusterServiceVersionList) DeepCopy() *ClusterServiceVersionList {
- if in == nil {
- return nil
- }
- out := new(ClusterServiceVersionList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterServiceVersionList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec) {
- *out = *in
- in.InstallStrategy.DeepCopyInto(&out.InstallStrategy)
- in.Version.DeepCopyInto(&out.Version)
- in.CustomResourceDefinitions.DeepCopyInto(&out.CustomResourceDefinitions)
- in.APIServiceDefinitions.DeepCopyInto(&out.APIServiceDefinitions)
- if in.WebhookDefinitions != nil {
- in, out := &in.WebhookDefinitions, &out.WebhookDefinitions
- *out = make([]WebhookDescription, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.NativeAPIs != nil {
- in, out := &in.NativeAPIs, &out.NativeAPIs
- *out = make([]metav1.GroupVersionKind, len(*in))
- copy(*out, *in)
- }
- if in.Keywords != nil {
- in, out := &in.Keywords, &out.Keywords
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Maintainers != nil {
- in, out := &in.Maintainers, &out.Maintainers
- *out = make([]Maintainer, len(*in))
- copy(*out, *in)
- }
- out.Provider = in.Provider
- if in.Links != nil {
- in, out := &in.Links, &out.Links
- *out = make([]AppLink, len(*in))
- copy(*out, *in)
- }
- if in.Icon != nil {
- in, out := &in.Icon, &out.Icon
- *out = make([]Icon, len(*in))
- copy(*out, *in)
- }
- if in.InstallModes != nil {
- in, out := &in.InstallModes, &out.InstallModes
- *out = make([]InstallMode, len(*in))
- copy(*out, *in)
- }
- if in.Labels != nil {
- in, out := &in.Labels, &out.Labels
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Annotations != nil {
- in, out := &in.Annotations, &out.Annotations
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Selector != nil {
- in, out := &in.Selector, &out.Selector
- *out = new(metav1.LabelSelector)
- (*in).DeepCopyInto(*out)
- }
- out.Cleanup = in.Cleanup
- if in.Skips != nil {
- in, out := &in.Skips, &out.Skips
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.RelatedImages != nil {
- in, out := &in.RelatedImages, &out.RelatedImages
- *out = make([]RelatedImage, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionSpec.
-func (in *ClusterServiceVersionSpec) DeepCopy() *ClusterServiceVersionSpec {
- if in == nil {
- return nil
- }
- out := new(ClusterServiceVersionSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterServiceVersionStatus) DeepCopyInto(out *ClusterServiceVersionStatus) {
- *out = *in
- if in.LastUpdateTime != nil {
- in, out := &in.LastUpdateTime, &out.LastUpdateTime
- *out = (*in).DeepCopy()
- }
- if in.LastTransitionTime != nil {
- in, out := &in.LastTransitionTime, &out.LastTransitionTime
- *out = (*in).DeepCopy()
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]ClusterServiceVersionCondition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.RequirementStatus != nil {
- in, out := &in.RequirementStatus, &out.RequirementStatus
- *out = make([]RequirementStatus, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.CertsLastUpdated != nil {
- in, out := &in.CertsLastUpdated, &out.CertsLastUpdated
- *out = (*in).DeepCopy()
- }
- if in.CertsRotateAt != nil {
- in, out := &in.CertsRotateAt, &out.CertsRotateAt
- *out = (*in).DeepCopy()
- }
- in.Cleanup.DeepCopyInto(&out.Cleanup)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionStatus.
-func (in *ClusterServiceVersionStatus) DeepCopy() *ClusterServiceVersionStatus {
- if in == nil {
- return nil
- }
- out := new(ClusterServiceVersionStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConfigMapResourceReference) DeepCopyInto(out *ConfigMapResourceReference) {
- *out = *in
- in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapResourceReference.
-func (in *ConfigMapResourceReference) DeepCopy() *ConfigMapResourceReference {
- if in == nil {
- return nil
- }
- out := new(ConfigMapResourceReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CustomResourceDefinitions) DeepCopyInto(out *CustomResourceDefinitions) {
- *out = *in
- if in.Owned != nil {
- in, out := &in.Owned, &out.Owned
- *out = make([]CRDDescription, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Required != nil {
- in, out := &in.Required, &out.Required
- *out = make([]CRDDescription, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitions.
-func (in *CustomResourceDefinitions) DeepCopy() *CustomResourceDefinitions {
- if in == nil {
- return nil
- }
- out := new(CustomResourceDefinitions)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DependentStatus) DeepCopyInto(out *DependentStatus) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependentStatus.
-func (in *DependentStatus) DeepCopy() *DependentStatus {
- if in == nil {
- return nil
- }
- out := new(DependentStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExtractContentConfig) DeepCopyInto(out *ExtractContentConfig) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtractContentConfig.
-func (in *ExtractContentConfig) DeepCopy() *ExtractContentConfig {
- if in == nil {
- return nil
- }
- out := new(ExtractContentConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GRPCConnectionState) DeepCopyInto(out *GRPCConnectionState) {
- *out = *in
- in.LastConnectTime.DeepCopyInto(&out.LastConnectTime)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCConnectionState.
-func (in *GRPCConnectionState) DeepCopy() *GRPCConnectionState {
- if in == nil {
- return nil
- }
- out := new(GRPCConnectionState)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GrpcPodConfig) DeepCopyInto(out *GrpcPodConfig) {
- *out = *in
- if in.NodeSelector != nil {
- in, out := &in.NodeSelector, &out.NodeSelector
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Tolerations != nil {
- in, out := &in.Tolerations, &out.Tolerations
- *out = make([]v1.Toleration, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Affinity != nil {
- in, out := &in.Affinity, &out.Affinity
- *out = new(v1.Affinity)
- (*in).DeepCopyInto(*out)
- }
- if in.PriorityClassName != nil {
- in, out := &in.PriorityClassName, &out.PriorityClassName
- *out = new(string)
- **out = **in
- }
- if in.MemoryTarget != nil {
- in, out := &in.MemoryTarget, &out.MemoryTarget
- x := (*in).DeepCopy()
- *out = &x
- }
- if in.ExtractContent != nil {
- in, out := &in.ExtractContent, &out.ExtractContent
- *out = new(ExtractContentConfig)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrpcPodConfig.
-func (in *GrpcPodConfig) DeepCopy() *GrpcPodConfig {
- if in == nil {
- return nil
- }
- out := new(GrpcPodConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Icon) DeepCopyInto(out *Icon) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Icon.
-func (in *Icon) DeepCopy() *Icon {
- if in == nil {
- return nil
- }
- out := new(Icon)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InstallMode) DeepCopyInto(out *InstallMode) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallMode.
-func (in *InstallMode) DeepCopy() *InstallMode {
- if in == nil {
- return nil
- }
- out := new(InstallMode)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in InstallModeSet) DeepCopyInto(out *InstallModeSet) {
- {
- in := &in
- *out = make(InstallModeSet, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallModeSet.
-func (in InstallModeSet) DeepCopy() InstallModeSet {
- if in == nil {
- return nil
- }
- out := new(InstallModeSet)
- in.DeepCopyInto(out)
- return *out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InstallPlan) DeepCopyInto(out *InstallPlan) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlan.
-func (in *InstallPlan) DeepCopy() *InstallPlan {
- if in == nil {
- return nil
- }
- out := new(InstallPlan)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *InstallPlan) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InstallPlanCondition) DeepCopyInto(out *InstallPlanCondition) {
- *out = *in
- if in.LastUpdateTime != nil {
- in, out := &in.LastUpdateTime, &out.LastUpdateTime
- *out = (*in).DeepCopy()
- }
- if in.LastTransitionTime != nil {
- in, out := &in.LastTransitionTime, &out.LastTransitionTime
- *out = (*in).DeepCopy()
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanCondition.
-func (in *InstallPlanCondition) DeepCopy() *InstallPlanCondition {
- if in == nil {
- return nil
- }
- out := new(InstallPlanCondition)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InstallPlanList) DeepCopyInto(out *InstallPlanList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]InstallPlan, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanList.
-func (in *InstallPlanList) DeepCopy() *InstallPlanList {
- if in == nil {
- return nil
- }
- out := new(InstallPlanList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *InstallPlanList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InstallPlanReference) DeepCopyInto(out *InstallPlanReference) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanReference.
-func (in *InstallPlanReference) DeepCopy() *InstallPlanReference {
- if in == nil {
- return nil
- }
- out := new(InstallPlanReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InstallPlanSpec) DeepCopyInto(out *InstallPlanSpec) {
- *out = *in
- if in.ClusterServiceVersionNames != nil {
- in, out := &in.ClusterServiceVersionNames, &out.ClusterServiceVersionNames
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanSpec.
-func (in *InstallPlanSpec) DeepCopy() *InstallPlanSpec {
- if in == nil {
- return nil
- }
- out := new(InstallPlanSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InstallPlanStatus) DeepCopyInto(out *InstallPlanStatus) {
- *out = *in
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]InstallPlanCondition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.CatalogSources != nil {
- in, out := &in.CatalogSources, &out.CatalogSources
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Plan != nil {
- in, out := &in.Plan, &out.Plan
- *out = make([]*Step, len(*in))
- for i := range *in {
- if (*in)[i] != nil {
- in, out := &(*in)[i], &(*out)[i]
- *out = new(Step)
- **out = **in
- }
- }
- }
- if in.BundleLookups != nil {
- in, out := &in.BundleLookups, &out.BundleLookups
- *out = make([]BundleLookup, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.AttenuatedServiceAccountRef != nil {
- in, out := &in.AttenuatedServiceAccountRef, &out.AttenuatedServiceAccountRef
- *out = new(v1.ObjectReference)
- **out = **in
- }
- if in.StartTime != nil {
- in, out := &in.StartTime, &out.StartTime
- *out = (*in).DeepCopy()
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanStatus.
-func (in *InstallPlanStatus) DeepCopy() *InstallPlanStatus {
- if in == nil {
- return nil
- }
- out := new(InstallPlanStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Maintainer) DeepCopyInto(out *Maintainer) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintainer.
-func (in *Maintainer) DeepCopy() *Maintainer {
- if in == nil {
- return nil
- }
- out := new(Maintainer)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NamedInstallStrategy) DeepCopyInto(out *NamedInstallStrategy) {
- *out = *in
- in.StrategySpec.DeepCopyInto(&out.StrategySpec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedInstallStrategy.
-func (in *NamedInstallStrategy) DeepCopy() *NamedInstallStrategy {
- if in == nil {
- return nil
- }
- out := new(NamedInstallStrategy)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RegistryPoll) DeepCopyInto(out *RegistryPoll) {
- *out = *in
- if in.Interval != nil {
- in, out := &in.Interval, &out.Interval
- *out = new(metav1.Duration)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryPoll.
-func (in *RegistryPoll) DeepCopy() *RegistryPoll {
- if in == nil {
- return nil
- }
- out := new(RegistryPoll)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RegistryServiceStatus) DeepCopyInto(out *RegistryServiceStatus) {
- *out = *in
- in.CreatedAt.DeepCopyInto(&out.CreatedAt)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryServiceStatus.
-func (in *RegistryServiceStatus) DeepCopy() *RegistryServiceStatus {
- if in == nil {
- return nil
- }
- out := new(RegistryServiceStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RelatedImage) DeepCopyInto(out *RelatedImage) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedImage.
-func (in *RelatedImage) DeepCopy() *RelatedImage {
- if in == nil {
- return nil
- }
- out := new(RelatedImage)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RequirementStatus) DeepCopyInto(out *RequirementStatus) {
- *out = *in
- if in.Dependents != nil {
- in, out := &in.Dependents, &out.Dependents
- *out = make([]DependentStatus, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequirementStatus.
-func (in *RequirementStatus) DeepCopy() *RequirementStatus {
- if in == nil {
- return nil
- }
- out := new(RequirementStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ResourceInstance) DeepCopyInto(out *ResourceInstance) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceInstance.
-func (in *ResourceInstance) DeepCopy() *ResourceInstance {
- if in == nil {
- return nil
- }
- out := new(ResourceInstance)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ResourceList) DeepCopyInto(out *ResourceList) {
- *out = *in
- if in.Instances != nil {
- in, out := &in.Instances, &out.Instances
- *out = make([]ResourceInstance, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList.
-func (in *ResourceList) DeepCopy() *ResourceList {
- if in == nil {
- return nil
- }
- out := new(ResourceList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SpecDescriptor) DeepCopyInto(out *SpecDescriptor) {
- *out = *in
- if in.XDescriptors != nil {
- in, out := &in.XDescriptors, &out.XDescriptors
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Value != nil {
- in, out := &in.Value, &out.Value
- *out = make(json.RawMessage, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecDescriptor.
-func (in *SpecDescriptor) DeepCopy() *SpecDescriptor {
- if in == nil {
- return nil
- }
- out := new(SpecDescriptor)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StatusDescriptor) DeepCopyInto(out *StatusDescriptor) {
- *out = *in
- if in.XDescriptors != nil {
- in, out := &in.XDescriptors, &out.XDescriptors
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Value != nil {
- in, out := &in.Value, &out.Value
- *out = make(json.RawMessage, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDescriptor.
-func (in *StatusDescriptor) DeepCopy() *StatusDescriptor {
- if in == nil {
- return nil
- }
- out := new(StatusDescriptor)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Step) DeepCopyInto(out *Step) {
- *out = *in
- out.Resource = in.Resource
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step.
-func (in *Step) DeepCopy() *Step {
- if in == nil {
- return nil
- }
- out := new(Step)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StepResource) DeepCopyInto(out *StepResource) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepResource.
-func (in *StepResource) DeepCopy() *StepResource {
- if in == nil {
- return nil
- }
- out := new(StepResource)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StrategyDeploymentPermissions) DeepCopyInto(out *StrategyDeploymentPermissions) {
- *out = *in
- if in.Rules != nil {
- in, out := &in.Rules, &out.Rules
- *out = make([]rbacv1.PolicyRule, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentPermissions.
-func (in *StrategyDeploymentPermissions) DeepCopy() *StrategyDeploymentPermissions {
- if in == nil {
- return nil
- }
- out := new(StrategyDeploymentPermissions)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StrategyDeploymentSpec) DeepCopyInto(out *StrategyDeploymentSpec) {
- *out = *in
- in.Spec.DeepCopyInto(&out.Spec)
- if in.Label != nil {
- in, out := &in.Label, &out.Label
- *out = make(labels.Set, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentSpec.
-func (in *StrategyDeploymentSpec) DeepCopy() *StrategyDeploymentSpec {
- if in == nil {
- return nil
- }
- out := new(StrategyDeploymentSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StrategyDetailsDeployment) DeepCopyInto(out *StrategyDetailsDeployment) {
- *out = *in
- if in.DeploymentSpecs != nil {
- in, out := &in.DeploymentSpecs, &out.DeploymentSpecs
- *out = make([]StrategyDeploymentSpec, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Permissions != nil {
- in, out := &in.Permissions, &out.Permissions
- *out = make([]StrategyDeploymentPermissions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.ClusterPermissions != nil {
- in, out := &in.ClusterPermissions, &out.ClusterPermissions
- *out = make([]StrategyDeploymentPermissions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDetailsDeployment.
-func (in *StrategyDetailsDeployment) DeepCopy() *StrategyDetailsDeployment {
- if in == nil {
- return nil
- }
- out := new(StrategyDetailsDeployment)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Subscription) DeepCopyInto(out *Subscription) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- if in.Spec != nil {
- in, out := &in.Spec, &out.Spec
- *out = new(SubscriptionSpec)
- (*in).DeepCopyInto(*out)
- }
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription.
-func (in *Subscription) DeepCopy() *Subscription {
- if in == nil {
- return nil
- }
- out := new(Subscription)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Subscription) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SubscriptionCatalogHealth) DeepCopyInto(out *SubscriptionCatalogHealth) {
- *out = *in
- if in.CatalogSourceRef != nil {
- in, out := &in.CatalogSourceRef, &out.CatalogSourceRef
- *out = new(v1.ObjectReference)
- **out = **in
- }
- if in.LastUpdated != nil {
- in, out := &in.LastUpdated, &out.LastUpdated
- *out = (*in).DeepCopy()
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCatalogHealth.
-func (in *SubscriptionCatalogHealth) DeepCopy() *SubscriptionCatalogHealth {
- if in == nil {
- return nil
- }
- out := new(SubscriptionCatalogHealth)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SubscriptionCondition) DeepCopyInto(out *SubscriptionCondition) {
- *out = *in
- if in.LastHeartbeatTime != nil {
- in, out := &in.LastHeartbeatTime, &out.LastHeartbeatTime
- *out = (*in).DeepCopy()
- }
- if in.LastTransitionTime != nil {
- in, out := &in.LastTransitionTime, &out.LastTransitionTime
- *out = (*in).DeepCopy()
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCondition.
-func (in *SubscriptionCondition) DeepCopy() *SubscriptionCondition {
- if in == nil {
- return nil
- }
- out := new(SubscriptionCondition)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) {
- *out = *in
- if in.Selector != nil {
- in, out := &in.Selector, &out.Selector
- *out = new(metav1.LabelSelector)
- (*in).DeepCopyInto(*out)
- }
- if in.NodeSelector != nil {
- in, out := &in.NodeSelector, &out.NodeSelector
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Tolerations != nil {
- in, out := &in.Tolerations, &out.Tolerations
- *out = make([]v1.Toleration, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Resources != nil {
- in, out := &in.Resources, &out.Resources
- *out = new(v1.ResourceRequirements)
- (*in).DeepCopyInto(*out)
- }
- if in.EnvFrom != nil {
- in, out := &in.EnvFrom, &out.EnvFrom
- *out = make([]v1.EnvFromSource, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Env != nil {
- in, out := &in.Env, &out.Env
- *out = make([]v1.EnvVar, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Volumes != nil {
- in, out := &in.Volumes, &out.Volumes
- *out = make([]v1.Volume, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.VolumeMounts != nil {
- in, out := &in.VolumeMounts, &out.VolumeMounts
- *out = make([]v1.VolumeMount, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Affinity != nil {
- in, out := &in.Affinity, &out.Affinity
- *out = new(v1.Affinity)
- (*in).DeepCopyInto(*out)
- }
- if in.Annotations != nil {
- in, out := &in.Annotations, &out.Annotations
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionConfig.
-func (in *SubscriptionConfig) DeepCopy() *SubscriptionConfig {
- if in == nil {
- return nil
- }
- out := new(SubscriptionConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Subscription, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList.
-func (in *SubscriptionList) DeepCopy() *SubscriptionList {
- if in == nil {
- return nil
- }
- out := new(SubscriptionList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *SubscriptionList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) {
- *out = *in
- if in.Config != nil {
- in, out := &in.Config, &out.Config
- *out = new(SubscriptionConfig)
- (*in).DeepCopyInto(*out)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec.
-func (in *SubscriptionSpec) DeepCopy() *SubscriptionSpec {
- if in == nil {
- return nil
- }
- out := new(SubscriptionSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) {
- *out = *in
- if in.Install != nil {
- in, out := &in.Install, &out.Install
- *out = new(InstallPlanReference)
- **out = **in
- }
- if in.InstallPlanRef != nil {
- in, out := &in.InstallPlanRef, &out.InstallPlanRef
- *out = new(v1.ObjectReference)
- **out = **in
- }
- if in.CatalogHealth != nil {
- in, out := &in.CatalogHealth, &out.CatalogHealth
- *out = make([]SubscriptionCatalogHealth, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]SubscriptionCondition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- in.LastUpdated.DeepCopyInto(&out.LastUpdated)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus.
-func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus {
- if in == nil {
- return nil
- }
- out := new(SubscriptionStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) {
- *out = *in
- if in.RegistryPoll != nil {
- in, out := &in.RegistryPoll, &out.RegistryPoll
- *out = new(RegistryPoll)
- (*in).DeepCopyInto(*out)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy.
-func (in *UpdateStrategy) DeepCopy() *UpdateStrategy {
- if in == nil {
- return nil
- }
- out := new(UpdateStrategy)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *WebhookDescription) DeepCopyInto(out *WebhookDescription) {
- *out = *in
- if in.TargetPort != nil {
- in, out := &in.TargetPort, &out.TargetPort
- *out = new(intstr.IntOrString)
- **out = **in
- }
- if in.Rules != nil {
- in, out := &in.Rules, &out.Rules
- *out = make([]admissionregistrationv1.RuleWithOperations, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.FailurePolicy != nil {
- in, out := &in.FailurePolicy, &out.FailurePolicy
- *out = new(admissionregistrationv1.FailurePolicyType)
- **out = **in
- }
- if in.MatchPolicy != nil {
- in, out := &in.MatchPolicy, &out.MatchPolicy
- *out = new(admissionregistrationv1.MatchPolicyType)
- **out = **in
- }
- if in.ObjectSelector != nil {
- in, out := &in.ObjectSelector, &out.ObjectSelector
- *out = new(metav1.LabelSelector)
- (*in).DeepCopyInto(*out)
- }
- if in.SideEffects != nil {
- in, out := &in.SideEffects, &out.SideEffects
- *out = new(admissionregistrationv1.SideEffectClass)
- **out = **in
- }
- if in.TimeoutSeconds != nil {
- in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
- *out = new(int32)
- **out = **in
- }
- if in.AdmissionReviewVersions != nil {
- in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.ReinvocationPolicy != nil {
- in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy
- *out = new(admissionregistrationv1.ReinvocationPolicyType)
- **out = **in
- }
- if in.WebhookPath != nil {
- in, out := &in.WebhookPath, &out.WebhookPath
- *out = new(string)
- **out = **in
- }
- if in.ConversionCRDs != nil {
- in, out := &in.ConversionCRDs, &out.ConversionCRDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookDescription.
-func (in *WebhookDescription) DeepCopy() *WebhookDescription {
- if in == nil {
- return nil
- }
- out := new(WebhookDescription)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/doc.go
deleted file mode 100644
index b881240adfe9..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +groupName=operators.coreos.com
-// +k8s:deepcopy-gen=package
-// +k8s:conversion-gen=github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators
-
-// Package v1alpha2 contains resources types for version v1alpha2 of the operators.coreos.com API group.
-package v1alpha2
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/groupversion_info.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/groupversion_info.go
deleted file mode 100644
index 637dc4dfc802..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/groupversion_info.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +kubebuilder:object:generate=true
-
-// Package v1alpha2 contains API Schema definitions for the discovery v1alpha2 API group.
-package v1alpha2
-
-import (
- "k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/controller-runtime/pkg/scheme"
-)
-
-var (
- // GroupVersion is group version used to register these objects.
- GroupVersion = schema.GroupVersion{Group: "operators.coreos.com", Version: "v1alpha2"}
-
- // SchemeGroupVersion is required for compatibility with client generation.
- SchemeGroupVersion = GroupVersion
-
- // SchemeBuilder is used to add go types to the GroupVersionKind scheme.
- SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
-
- // AddToScheme adds the types in this group-version to the given scheme.
- AddToScheme = SchemeBuilder.AddToScheme
-)
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
-func Resource(resource string) schema.GroupResource {
- return GroupVersion.WithResource(resource).GroupResource()
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/operatorgroup_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/operatorgroup_types.go
deleted file mode 100644
index 2e67773f5c52..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/operatorgroup_types.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package v1alpha2
-
-import (
- "sort"
- "strings"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
- OperatorGroupAnnotationKey = "olm.operatorGroup"
- OperatorGroupNamespaceAnnotationKey = "olm.operatorNamespace"
- OperatorGroupTargetsAnnotationKey = "olm.targetNamespaces"
- OperatorGroupProvidedAPIsAnnotationKey = "olm.providedAPIs"
-
- OperatorGroupKind = "OperatorGroup"
-)
-
-// OperatorGroupSpec is the spec for an OperatorGroup resource.
-type OperatorGroupSpec struct {
- // Selector selects the OperatorGroup's target namespaces.
- // +optional
- Selector *metav1.LabelSelector `json:"selector,omitempty"`
-
- // TargetNamespaces is an explicit set of namespaces to target.
- // If it is set, Selector is ignored.
- // +optional
- TargetNamespaces []string `json:"targetNamespaces,omitempty"`
-
- // ServiceAccountName is the admin specified service account which will be
- // used to deploy operator(s) in this operator group.
- ServiceAccountName string `json:"serviceAccountName,omitempty"`
-
- // Static tells OLM not to update the OperatorGroup's providedAPIs annotation
- // +optional
- StaticProvidedAPIs bool `json:"staticProvidedAPIs,omitempty"`
-}
-
-// OperatorGroupStatus is the status for an OperatorGroupResource.
-type OperatorGroupStatus struct {
- // Namespaces is the set of target namespaces for the OperatorGroup.
- Namespaces []string `json:"namespaces,omitempty"`
-
- // ServiceAccountRef references the service account object specified.
- ServiceAccountRef *corev1.ObjectReference `json:"serviceAccountRef,omitempty"`
-
- // LastUpdated is a timestamp of the last time the OperatorGroup's status was Updated.
- LastUpdated *metav1.Time `json:"lastUpdated"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +genclient
-// +kubebuilder:resource:shortName=og,categories=olm
-// +kubebuilder:subresource:status
-
-// OperatorGroup is the unit of multitenancy for OLM managed operators.
-// It constrains the installation of operators in its namespace to a specified set of target namespaces.
-type OperatorGroup struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- // +optional
- Spec OperatorGroupSpec `json:"spec"`
- Status OperatorGroupStatus `json:"status,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// OperatorGroupList is a list of OperatorGroup resources.
-type OperatorGroupList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []OperatorGroup `json:"items"`
-}
-
-func (o *OperatorGroup) BuildTargetNamespaces() string {
- sort.Strings(o.Status.Namespaces)
- return strings.Join(o.Status.Namespaces, ",")
-}
-
-// IsServiceAccountSpecified returns true if the spec has a service account name specified.
-func (o *OperatorGroup) IsServiceAccountSpecified() bool {
- if o.Spec.ServiceAccountName == "" {
- return false
- }
-
- return true
-}
-
-// HasServiceAccountSynced returns true if the service account specified has been synced.
-func (o *OperatorGroup) HasServiceAccountSynced() bool {
- if o.IsServiceAccountSpecified() && o.Status.ServiceAccountRef != nil {
- return true
- }
-
- return false
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/zz_generated.deepcopy.go
deleted file mode 100644
index 885643cb75d8..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,139 +0,0 @@
-//go:build !ignore_autogenerated
-
-/*
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by controller-gen. DO NOT EDIT.
-
-package v1alpha2
-
-import (
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorGroup) DeepCopyInto(out *OperatorGroup) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroup.
-func (in *OperatorGroup) DeepCopy() *OperatorGroup {
- if in == nil {
- return nil
- }
- out := new(OperatorGroup)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OperatorGroup) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorGroupList) DeepCopyInto(out *OperatorGroupList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]OperatorGroup, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupList.
-func (in *OperatorGroupList) DeepCopy() *OperatorGroupList {
- if in == nil {
- return nil
- }
- out := new(OperatorGroupList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OperatorGroupList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorGroupSpec) DeepCopyInto(out *OperatorGroupSpec) {
- *out = *in
- if in.Selector != nil {
- in, out := &in.Selector, &out.Selector
- *out = new(v1.LabelSelector)
- (*in).DeepCopyInto(*out)
- }
- if in.TargetNamespaces != nil {
- in, out := &in.TargetNamespaces, &out.TargetNamespaces
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupSpec.
-func (in *OperatorGroupSpec) DeepCopy() *OperatorGroupSpec {
- if in == nil {
- return nil
- }
- out := new(OperatorGroupSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorGroupStatus) DeepCopyInto(out *OperatorGroupStatus) {
- *out = *in
- if in.Namespaces != nil {
- in, out := &in.Namespaces, &out.Namespaces
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.ServiceAccountRef != nil {
- in, out := &in.ServiceAccountRef, &out.ServiceAccountRef
- *out = new(corev1.ObjectReference)
- **out = **in
- }
- if in.LastUpdated != nil {
- in, out := &in.LastUpdated, &out.LastUpdated
- *out = (*in).DeepCopy()
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupStatus.
-func (in *OperatorGroupStatus) DeepCopy() *OperatorGroupStatus {
- if in == nil {
- return nil
- }
- out := new(OperatorGroupStatus)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v2/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/v2/doc.go
deleted file mode 100644
index f85f7924252e..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v2/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-// +groupName=operators.coreos.com
-
-// Package v2 contains resources types for version v2 of the operators.coreos.com API group.
-package v2
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v2/groupversion_info.go b/vendor/github.com/operator-framework/api/pkg/operators/v2/groupversion_info.go
deleted file mode 100644
index 2d2d923d1be6..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v2/groupversion_info.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// +kubebuilder:object:generate=true
-
-// Package v2 contains API Schema definitions for the operator v2 API group.
-package v2
-
-import (
- "k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/controller-runtime/pkg/scheme"
-)
-
-var (
- // GroupVersion is group version used to register these objects.
- GroupVersion = schema.GroupVersion{Group: "operators.coreos.com", Version: "v2"}
-
- // SchemeGroupVersion is required for compatibility with client generation.
- SchemeGroupVersion = GroupVersion
-
- // SchemeBuilder is used to add go types to the GroupVersionKind scheme.
- SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
-
- // AddToScheme adds the types in this group-version to the given scheme.
- AddToScheme = SchemeBuilder.AddToScheme
-)
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
-func Resource(resource string) schema.GroupResource {
- return GroupVersion.WithResource(resource).GroupResource()
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v2/operatorcondition_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v2/operatorcondition_types.go
deleted file mode 100644
index ef1c56de61be..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v2/operatorcondition_types.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package v2
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
- // Upgradeable indicates that the operator is upgradeable
- Upgradeable string = "Upgradeable"
-)
-
-// ConditionType codifies a condition's type.
-type ConditionType string
-
-// OperatorConditionSpec allows an operator to report state to OLM and provides
-// cluster admin with the ability to manually override state reported by the operator.
-type OperatorConditionSpec struct {
- ServiceAccounts []string `json:"serviceAccounts,omitempty"`
- Deployments []string `json:"deployments,omitempty"`
- Overrides []metav1.Condition `json:"overrides,omitempty"`
- Conditions []metav1.Condition `json:"conditions,omitempty"`
-}
-
-// OperatorConditionStatus allows OLM to convey which conditions have been observed.
-type OperatorConditionStatus struct {
- Conditions []metav1.Condition `json:"conditions,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +genclient
-// +kubebuilder:storageversion
-// +kubebuilder:resource:shortName=condition,categories=olm
-// +kubebuilder:subresource:status
-// OperatorCondition is a Custom Resource of type `OperatorCondition` which is used to convey information to OLM about the state of an operator.
-type OperatorCondition struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- Spec OperatorConditionSpec `json:"spec,omitempty"`
- Status OperatorConditionStatus `json:"status,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// OperatorConditionList represents a list of Conditions.
-type OperatorConditionList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []OperatorCondition `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&OperatorCondition{}, &OperatorConditionList{})
-}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v2/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v2/zz_generated.deepcopy.go
deleted file mode 100644
index 92ecc812ac29..000000000000
--- a/vendor/github.com/operator-framework/api/pkg/operators/v2/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,145 +0,0 @@
-//go:build !ignore_autogenerated
-
-/*
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by controller-gen. DO NOT EDIT.
-
-package v2
-
-import (
- "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition.
-func (in *OperatorCondition) DeepCopy() *OperatorCondition {
- if in == nil {
- return nil
- }
- out := new(OperatorCondition)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OperatorCondition) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorConditionList) DeepCopyInto(out *OperatorConditionList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]OperatorCondition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConditionList.
-func (in *OperatorConditionList) DeepCopy() *OperatorConditionList {
- if in == nil {
- return nil
- }
- out := new(OperatorConditionList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OperatorConditionList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorConditionSpec) DeepCopyInto(out *OperatorConditionSpec) {
- *out = *in
- if in.ServiceAccounts != nil {
- in, out := &in.ServiceAccounts, &out.ServiceAccounts
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Deployments != nil {
- in, out := &in.Deployments, &out.Deployments
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Overrides != nil {
- in, out := &in.Overrides, &out.Overrides
- *out = make([]v1.Condition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]v1.Condition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConditionSpec.
-func (in *OperatorConditionSpec) DeepCopy() *OperatorConditionSpec {
- if in == nil {
- return nil
- }
- out := new(OperatorConditionSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorConditionStatus) DeepCopyInto(out *OperatorConditionStatus) {
- *out = *in
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]v1.Condition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConditionStatus.
-func (in *OperatorConditionStatus) DeepCopy() *OperatorConditionStatus {
- if in == nil {
- return nil
- }
- out := new(OperatorConditionStatus)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/LICENSE b/vendor/github.com/operator-framework/operator-lifecycle-manager/LICENSE
deleted file mode 100644
index 261eeb9e9f8b..000000000000
--- a/vendor/github.com/operator-framework/operator-lifecycle-manager/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/doc.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/doc.go
deleted file mode 100644
index 251358bb4a5d..000000000000
--- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-Copyright Red Hat, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-// This package contains the scheme of the automatically generated clientset.
-package scheme
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/register.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/register.go
deleted file mode 100644
index 7c1a1137dd86..000000000000
--- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/register.go
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
-Copyright Red Hat, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package scheme
-
-import (
- operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
- operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
- operatorsv1alpha2 "github.com/operator-framework/api/pkg/operators/v1alpha2"
- operatorsv2 "github.com/operator-framework/api/pkg/operators/v2"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
- schema "k8s.io/apimachinery/pkg/runtime/schema"
- serializer "k8s.io/apimachinery/pkg/runtime/serializer"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-var Scheme = runtime.NewScheme()
-var Codecs = serializer.NewCodecFactory(Scheme)
-var ParameterCodec = runtime.NewParameterCodec(Scheme)
-var localSchemeBuilder = runtime.SchemeBuilder{
- operatorsv1.AddToScheme,
- operatorsv1alpha1.AddToScheme,
- operatorsv1alpha2.AddToScheme,
- operatorsv2.AddToScheme,
-}
-
-// AddToScheme adds all types of this clientset into the given scheme. This allows composition
-// of clientsets, like in:
-//
-// import (
-// "k8s.io/client-go/kubernetes"
-// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
-// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
-// )
-//
-// kclientset, _ := kubernetes.NewForConfig(c)
-// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
-//
-// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
-// correctly.
-var AddToScheme = localSchemeBuilder.AddToScheme
-
-func init() {
- v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
- utilruntime.Must(AddToScheme(Scheme))
-}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/doc.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/doc.go
deleted file mode 100644
index d84e927bcdb1..000000000000
--- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-Copyright Red Hat, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-// This package has the automatically generated typed clients.
-package v1
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/generated_expansion.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/generated_expansion.go
deleted file mode 100644
index 357fc8aae01f..000000000000
--- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/generated_expansion.go
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
-Copyright Red Hat, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-type OLMConfigExpansion interface{}
-
-type OperatorExpansion interface{}
-
-type OperatorConditionExpansion interface{}
-
-type OperatorGroupExpansion interface{}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/olmconfig.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/olmconfig.go
deleted file mode 100644
index 804cfd5681dc..000000000000
--- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/olmconfig.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-Copyright Red Hat, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-import (
- context "context"
-
- operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
- scheme "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- gentype "k8s.io/client-go/gentype"
-)
-
-// OLMConfigsGetter has a method to return a OLMConfigInterface.
-// A group's client should implement this interface.
-type OLMConfigsGetter interface {
- OLMConfigs() OLMConfigInterface
-}
-
-// OLMConfigInterface has methods to work with OLMConfig resources.
-type OLMConfigInterface interface {
- Create(ctx context.Context, oLMConfig *operatorsv1.OLMConfig, opts metav1.CreateOptions) (*operatorsv1.OLMConfig, error)
- Update(ctx context.Context, oLMConfig *operatorsv1.OLMConfig, opts metav1.UpdateOptions) (*operatorsv1.OLMConfig, error)
- // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, oLMConfig *operatorsv1.OLMConfig, opts metav1.UpdateOptions) (*operatorsv1.OLMConfig, error)
- Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
- DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*operatorsv1.OLMConfig, error)
- List(ctx context.Context, opts metav1.ListOptions) (*operatorsv1.OLMConfigList, error)
- Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *operatorsv1.OLMConfig, err error)
- OLMConfigExpansion
-}
-
-// oLMConfigs implements OLMConfigInterface
-type oLMConfigs struct {
- *gentype.ClientWithList[*operatorsv1.OLMConfig, *operatorsv1.OLMConfigList]
-}
-
-// newOLMConfigs returns a OLMConfigs
-func newOLMConfigs(c *OperatorsV1Client) *oLMConfigs {
- return &oLMConfigs{
- gentype.NewClientWithList[*operatorsv1.OLMConfig, *operatorsv1.OLMConfigList](
- "olmconfigs",
- c.RESTClient(),
- scheme.ParameterCodec,
- "",
- func() *operatorsv1.OLMConfig { return &operatorsv1.OLMConfig{} },
- func() *operatorsv1.OLMConfigList { return &operatorsv1.OLMConfigList{} },
- ),
- }
-}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operator.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operator.go
deleted file mode 100644
index 9d7176670166..000000000000
--- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operator.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-Copyright Red Hat, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-import (
- context "context"
-
- operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
- scheme "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- gentype "k8s.io/client-go/gentype"
-)
-
-// OperatorsGetter has a method to return a OperatorInterface.
-// A group's client should implement this interface.
-type OperatorsGetter interface {
- Operators() OperatorInterface
-}
-
-// OperatorInterface has methods to work with Operator resources.
-type OperatorInterface interface {
- Create(ctx context.Context, operator *operatorsv1.Operator, opts metav1.CreateOptions) (*operatorsv1.Operator, error)
- Update(ctx context.Context, operator *operatorsv1.Operator, opts metav1.UpdateOptions) (*operatorsv1.Operator, error)
- // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, operator *operatorsv1.Operator, opts metav1.UpdateOptions) (*operatorsv1.Operator, error)
- Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
- DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*operatorsv1.Operator, error)
- List(ctx context.Context, opts metav1.ListOptions) (*operatorsv1.OperatorList, error)
- Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *operatorsv1.Operator, err error)
- OperatorExpansion
-}
-
-// operators implements OperatorInterface
-type operators struct {
- *gentype.ClientWithList[*operatorsv1.Operator, *operatorsv1.OperatorList]
-}
-
-// newOperators returns a Operators
-func newOperators(c *OperatorsV1Client) *operators {
- return &operators{
- gentype.NewClientWithList[*operatorsv1.Operator, *operatorsv1.OperatorList](
- "operators",
- c.RESTClient(),
- scheme.ParameterCodec,
- "",
- func() *operatorsv1.Operator { return &operatorsv1.Operator{} },
- func() *operatorsv1.OperatorList { return &operatorsv1.OperatorList{} },
- ),
- }
-}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorcondition.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorcondition.go
deleted file mode 100644
index 9d11723fb51e..000000000000
--- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorcondition.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-Copyright Red Hat, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-import (
- context "context"
-
- operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
- scheme "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- gentype "k8s.io/client-go/gentype"
-)
-
-// OperatorConditionsGetter has a method to return a OperatorConditionInterface.
-// A group's client should implement this interface.
-type OperatorConditionsGetter interface {
- OperatorConditions(namespace string) OperatorConditionInterface
-}
-
-// OperatorConditionInterface has methods to work with OperatorCondition resources.
-type OperatorConditionInterface interface {
- Create(ctx context.Context, operatorCondition *operatorsv1.OperatorCondition, opts metav1.CreateOptions) (*operatorsv1.OperatorCondition, error)
- Update(ctx context.Context, operatorCondition *operatorsv1.OperatorCondition, opts metav1.UpdateOptions) (*operatorsv1.OperatorCondition, error)
- // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, operatorCondition *operatorsv1.OperatorCondition, opts metav1.UpdateOptions) (*operatorsv1.OperatorCondition, error)
- Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
- DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*operatorsv1.OperatorCondition, error)
- List(ctx context.Context, opts metav1.ListOptions) (*operatorsv1.OperatorConditionList, error)
- Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *operatorsv1.OperatorCondition, err error)
- OperatorConditionExpansion
-}
-
-// operatorConditions implements OperatorConditionInterface
-type operatorConditions struct {
- *gentype.ClientWithList[*operatorsv1.OperatorCondition, *operatorsv1.OperatorConditionList]
-}
-
-// newOperatorConditions returns a OperatorConditions
-func newOperatorConditions(c *OperatorsV1Client, namespace string) *operatorConditions {
- return &operatorConditions{
- gentype.NewClientWithList[*operatorsv1.OperatorCondition, *operatorsv1.OperatorConditionList](
- "operatorconditions",
- c.RESTClient(),
- scheme.ParameterCodec,
- namespace,
- func() *operatorsv1.OperatorCondition { return &operatorsv1.OperatorCondition{} },
- func() *operatorsv1.OperatorConditionList { return &operatorsv1.OperatorConditionList{} },
- ),
- }
-}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorgroup.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorgroup.go
deleted file mode 100644
index 7df6bc50ad9c..000000000000
--- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorgroup.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-Copyright Red Hat, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-import (
- context "context"
-
- operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
- scheme "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- gentype "k8s.io/client-go/gentype"
-)
-
-// OperatorGroupsGetter has a method to return a OperatorGroupInterface.
-// A group's client should implement this interface.
-type OperatorGroupsGetter interface {
- OperatorGroups(namespace string) OperatorGroupInterface
-}
-
-// OperatorGroupInterface has methods to work with OperatorGroup resources.
-type OperatorGroupInterface interface {
- Create(ctx context.Context, operatorGroup *operatorsv1.OperatorGroup, opts metav1.CreateOptions) (*operatorsv1.OperatorGroup, error)
- Update(ctx context.Context, operatorGroup *operatorsv1.OperatorGroup, opts metav1.UpdateOptions) (*operatorsv1.OperatorGroup, error)
- // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, operatorGroup *operatorsv1.OperatorGroup, opts metav1.UpdateOptions) (*operatorsv1.OperatorGroup, error)
- Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
- DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*operatorsv1.OperatorGroup, error)
- List(ctx context.Context, opts metav1.ListOptions) (*operatorsv1.OperatorGroupList, error)
- Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *operatorsv1.OperatorGroup, err error)
- OperatorGroupExpansion
-}
-
-// operatorGroups implements OperatorGroupInterface
-type operatorGroups struct {
- *gentype.ClientWithList[*operatorsv1.OperatorGroup, *operatorsv1.OperatorGroupList]
-}
-
-// newOperatorGroups returns a OperatorGroups
-func newOperatorGroups(c *OperatorsV1Client, namespace string) *operatorGroups {
- return &operatorGroups{
- gentype.NewClientWithList[*operatorsv1.OperatorGroup, *operatorsv1.OperatorGroupList](
- "operatorgroups",
- c.RESTClient(),
- scheme.ParameterCodec,
- namespace,
- func() *operatorsv1.OperatorGroup { return &operatorsv1.OperatorGroup{} },
- func() *operatorsv1.OperatorGroupList { return &operatorsv1.OperatorGroupList{} },
- ),
- }
-}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operators_client.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operators_client.go
deleted file mode 100644
index d355cd94127a..000000000000
--- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operators_client.go
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
-Copyright Red Hat, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-import (
- http "net/http"
-
- operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
- scheme "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
- rest "k8s.io/client-go/rest"
-)
-
-type OperatorsV1Interface interface {
- RESTClient() rest.Interface
- OLMConfigsGetter
- OperatorsGetter
- OperatorConditionsGetter
- OperatorGroupsGetter
-}
-
-// OperatorsV1Client is used to interact with features provided by the operators.coreos.com group.
-type OperatorsV1Client struct {
- restClient rest.Interface
-}
-
-func (c *OperatorsV1Client) OLMConfigs() OLMConfigInterface {
- return newOLMConfigs(c)
-}
-
-func (c *OperatorsV1Client) Operators() OperatorInterface {
- return newOperators(c)
-}
-
-func (c *OperatorsV1Client) OperatorConditions(namespace string) OperatorConditionInterface {
- return newOperatorConditions(c, namespace)
-}
-
-func (c *OperatorsV1Client) OperatorGroups(namespace string) OperatorGroupInterface {
- return newOperatorGroups(c, namespace)
-}
-
-// NewForConfig creates a new OperatorsV1Client for the given config.
-// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
-// where httpClient was generated with rest.HTTPClientFor(c).
-func NewForConfig(c *rest.Config) (*OperatorsV1Client, error) {
- config := *c
- if err := setConfigDefaults(&config); err != nil {
- return nil, err
- }
- httpClient, err := rest.HTTPClientFor(&config)
- if err != nil {
- return nil, err
- }
- return NewForConfigAndClient(&config, httpClient)
-}
-
-// NewForConfigAndClient creates a new OperatorsV1Client for the given config and http client.
-// Note the http client provided takes precedence over the configured transport values.
-func NewForConfigAndClient(c *rest.Config, h *http.Client) (*OperatorsV1Client, error) {
- config := *c
- if err := setConfigDefaults(&config); err != nil {
- return nil, err
- }
- client, err := rest.RESTClientForConfigAndClient(&config, h)
- if err != nil {
- return nil, err
- }
- return &OperatorsV1Client{client}, nil
-}
-
-// NewForConfigOrDie creates a new OperatorsV1Client for the given config and
-// panics if there is an error in the config.
-func NewForConfigOrDie(c *rest.Config) *OperatorsV1Client {
- client, err := NewForConfig(c)
- if err != nil {
- panic(err)
- }
- return client
-}
-
-// New creates a new OperatorsV1Client for the given RESTClient.
-func New(c rest.Interface) *OperatorsV1Client {
- return &OperatorsV1Client{c}
-}
-
-func setConfigDefaults(config *rest.Config) error {
- gv := operatorsv1.SchemeGroupVersion
- config.GroupVersion = &gv
- config.APIPath = "/apis"
- config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
-
- if config.UserAgent == "" {
- config.UserAgent = rest.DefaultKubernetesUserAgent()
- }
-
- return nil
-}
-
-// RESTClient returns a RESTClient that is used to communicate
-// with API server by this client implementation.
-func (c *OperatorsV1Client) RESTClient() rest.Interface {
- if c == nil {
- return nil
- }
- return c.restClient
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 340ba372429c..6354025a85d6 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -49,6 +49,8 @@ cloud.google.com/go/storage/internal/apiv2/storagepb
# git.sr.ht/~sbinet/gg v0.5.0
## explicit; go 1.19
git.sr.ht/~sbinet/gg
+# github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
+## explicit; go 1.20
# github.com/Azure/azure-pipeline-go v0.2.3
## explicit; go 1.14
github.com/Azure/azure-pipeline-go/pipeline
@@ -540,7 +542,7 @@ github.com/distribution/distribution/v3/reference
# github.com/distribution/reference v0.6.0
## explicit; go 1.20
github.com/distribution/reference
-# github.com/docker/docker v27.3.1+incompatible
+# github.com/docker/docker v27.1.2+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
@@ -649,16 +651,17 @@ github.com/exponent-io/jsonpath
# github.com/fatih/camelcase v1.0.0
## explicit
github.com/fatih/camelcase
+# github.com/fatih/color v1.18.0
+## explicit; go 1.17
# github.com/felixge/fgprof v0.9.4
## explicit; go 1.14
github.com/felixge/fgprof
# github.com/felixge/httpsnoop v1.0.4
## explicit; go 1.13
github.com/felixge/httpsnoop
-# github.com/fsnotify/fsnotify v1.8.0
+# github.com/fsnotify/fsnotify v1.7.0
## explicit; go 1.17
github.com/fsnotify/fsnotify
-github.com/fsnotify/fsnotify/internal
# github.com/fsouza/go-dockerclient v1.12.0
## explicit; go 1.22
github.com/fsouza/go-dockerclient
@@ -1431,7 +1434,7 @@ github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo
github.com/openshift-eng/openshift-tests-extension/pkg/junit
github.com/openshift-eng/openshift-tests-extension/pkg/util/sets
github.com/openshift-eng/openshift-tests-extension/pkg/version
-# github.com/openshift/api v3.9.0+incompatible => github.com/openshift/api v0.0.0-20250710004639-926605d3338b
+# github.com/openshift/api v0.0.0-20250710004639-926605d3338b
## explicit; go 1.24.0
github.com/openshift/api
github.com/openshift/api/annotations
@@ -1749,16 +1752,7 @@ github.com/opentracing/opentracing-go/ext
github.com/opentracing/opentracing-go/log
# github.com/operator-framework/api v0.27.0
## explicit; go 1.22.0
-github.com/operator-framework/api/pkg/lib/version
-github.com/operator-framework/api/pkg/operators
github.com/operator-framework/api/pkg/operators/v1
-github.com/operator-framework/api/pkg/operators/v1alpha1
-github.com/operator-framework/api/pkg/operators/v1alpha2
-github.com/operator-framework/api/pkg/operators/v2
-# github.com/operator-framework/operator-lifecycle-manager v0.30.1-0.20250114164243-1b6752ec65fa
-## explicit; go 1.23.0
-github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme
-github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1
# github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417
## explicit; go 1.22.0
github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1
@@ -2234,8 +2228,8 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
-# golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
-## explicit; go 1.22.0
+# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
+## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/slices
# golang.org/x/image v0.11.0
@@ -4837,4 +4831,3 @@ sigs.k8s.io/yaml/goyaml.v3
# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250906192346-6efb6a95323f
# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250906192346-6efb6a95323f
# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250906192346-6efb6a95323f
-# github.com/openshift/api => github.com/openshift/api v0.0.0-20250710004639-926605d3338b