diff --git a/go.mod b/go.mod
index 7f290bc2d7b9..7910e01e49db 100644
--- a/go.mod
+++ b/go.mod
@@ -32,7 +32,7 @@ require (
github.com/coreos/stream-metadata-go v0.4.9
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1
- github.com/docker/docker v27.1.2+incompatible
+ github.com/docker/docker v27.3.1+incompatible
github.com/fsouza/go-dockerclient v1.12.0
github.com/gebn/bmc v0.0.0-20250519231546-bf709e03fe3c
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
@@ -59,11 +59,12 @@ require (
github.com/onsi/gomega v1.37.0
github.com/opencontainers/go-digest v1.0.0
github.com/openshift-eng/openshift-tests-extension v0.0.0-20250711173707-dc2a20e5a5f8
- github.com/openshift/api v0.0.0-20250710004639-926605d3338b
+ github.com/openshift/api v3.9.0+incompatible
github.com/openshift/apiserver-library-go v0.0.0-20250710132015-f0d44ef6e53b
github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee
github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee
github.com/openshift/library-go v0.0.0-20250812160438-378de074fe7b
+ github.com/operator-framework/operator-lifecycle-manager v0.30.1-0.20250114164243-1b6752ec65fa
github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417
github.com/pborman/uuid v1.2.0
github.com/pkg/errors v0.9.1
@@ -88,7 +89,7 @@ require (
go.etcd.io/etcd/client/pkg/v3 v3.5.21
go.etcd.io/etcd/client/v3 v3.5.21
golang.org/x/crypto v0.40.0
- golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
+ golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
golang.org/x/mod v0.25.0
golang.org/x/net v0.42.0
golang.org/x/oauth2 v0.30.0
@@ -132,7 +133,6 @@ require (
cloud.google.com/go/iam v1.5.2 // indirect
cloud.google.com/go/monitoring v1.24.2 // indirect
git.sr.ht/~sbinet/gg v0.5.0 // indirect
- github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 // indirect
@@ -198,7 +198,7 @@ require (
github.com/fatih/camelcase v1.0.0 // indirect
github.com/felixge/fgprof v0.9.4 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
@@ -310,6 +310,7 @@ require (
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/opencontainers/selinux v1.11.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
+ github.com/operator-framework/api v0.27.0 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
@@ -398,7 +399,7 @@ require (
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.29 // indirect
sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16 // indirect
- sigs.k8s.io/controller-runtime v0.19.0 // indirect
+ sigs.k8s.io/controller-runtime v0.19.4 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect
sigs.k8s.io/kustomize/api v0.19.0 // indirect
@@ -440,3 +441,6 @@ replace (
k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250906192346-6efb6a95323f
k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250906192346-6efb6a95323f
)
+
+// github.com/operator-framework/operator-lifecycle-manager requires this import.
+replace github.com/openshift/api => github.com/openshift/api v0.0.0-20250710004639-926605d3338b
diff --git a/go.sum b/go.sum
index 8870c880280a..cfe03afdd6b5 100644
--- a/go.sum
+++ b/go.sum
@@ -292,8 +292,8 @@ github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1 h1:go
github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1/go.mod h1:+AmQ9ZZMMxKQCOOUFHIN/5viLDj3tEQGPsLbNPSc0EI=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/docker v27.1.2+incompatible h1:AhGzR1xaQIy53qCkxARaFluI00WPGtXn0AJuoQsVYTY=
-github.com/docker/docker v27.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
+github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -329,8 +329,8 @@ github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSY
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
-github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88=
github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
@@ -341,8 +341,8 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
+github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fsouza/go-dockerclient v1.12.0 h1:S2f2crEUbBNCFiF06kR/GvioEB8EMsb3Td/bpawD+aU=
github.com/fsouza/go-dockerclient v1.12.0/go.mod h1:YWUtjg8japrqD/80L98nTtCoxQFp5B5wrSsnyeB5lFo=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
@@ -868,6 +868,10 @@ github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20250416174521-4eb003743b54/go.mod
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/operator-framework/api v0.27.0 h1:OrVaGKZJvbZo58HTv2guz7aURkhVKYhFqZ/6VpifiXI=
+github.com/operator-framework/api v0.27.0/go.mod h1:lg2Xx+S8NQWGYlEOvFwQvH46E5EK5IrAIL7HWfAhciM=
+github.com/operator-framework/operator-lifecycle-manager v0.30.1-0.20250114164243-1b6752ec65fa h1:VzZn+vxGFprQPnaLVDgU5Wfu+2UHsQJh/xLwJw8rXkA=
+github.com/operator-framework/operator-lifecycle-manager v0.30.1-0.20250114164243-1b6752ec65fa/go.mod h1:kzt/wadHjn76OoJeuu7BFzJFRh780BSMYuWaSxk9HGA=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417 h1:7k+dokKFfpICbkpX5TvvpFbKTFsl/6YQd46EpY2JNhc=
github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417/go.mod h1:9LxDV3rAHlGHAYtVrT62y/fqfIxc5RrDiYi9RVeD0gg=
@@ -1139,8 +1143,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
-golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
+golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
+golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo=
@@ -1592,8 +1596,8 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.29 h1:qiifAaaBqV3d/EcN9dKJaJI
sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.29/go.mod h1:ZFAt0qF1kR+w8nBVJK56s6CFvLrlosN1i2c+Sxb7LBk=
sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16 h1:Fm/Yjv4nXjUtJ90uXKSKwPwaTWYuDFMhDNNOd77PlOg=
sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16/go.mod h1:+kl90flu4+WCP6HBGVYbKVQR+5ztDzUNrWJz8rsnvRU=
-sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q=
-sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
+sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo=
+sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
sigs.k8s.io/gateway-api v1.2.1 h1:fZZ/+RyRb+Y5tGkwxFKuYuSRQHu9dZtbjenblleOLHM=
sigs.k8s.io/gateway-api v1.2.1/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
diff --git a/test/extended/cpu_partitioning/pods.go b/test/extended/cpu_partitioning/pods.go
index 08a1b24e399f..8b8046c6d36e 100644
--- a/test/extended/cpu_partitioning/pods.go
+++ b/test/extended/cpu_partitioning/pods.go
@@ -54,6 +54,8 @@ var (
"cloud-ingress-operator": {"openshift-cloud-ingress-operator"},
"managed-velero-operator": {"openshift-velero"},
"velero": {"openshift-velero"},
+
+ "gateway": {"openshift-ingress"},
}
excludedBestEffortDaemonSets = map[string][]string{
@@ -107,6 +109,13 @@ var _ = g.Describe("[sig-node][apigroup:config.openshift.io] CPU Partitioning cl
o.Expect(err).NotTo(o.HaveOccurred())
for _, deployment := range deployments.Items {
+ if deployment.Namespace == "openshift-ingress" && strings.HasPrefix(deployment.Name, "gateway-") {
+ // The gateway deployment's name contains a hash, which
+ // must be removed in order to be able to define an
+ // exception. Remove this if block when the
+ // corresponding exception is removed.
+ deployment.Name = "gateway"
+ }
// If we find a deployment that is to be excluded from resource checks, we skip looking for their pods.
if isExcluded(excludedBestEffortDeployments, deployment.Namespace, deployment.Name) {
framework.Logf("skipping resource check on deployment (%s/%s) due to presence in BestEffort exclude list", deployment.Namespace, deployment.Name)
diff --git a/test/extended/pods/priorityclasses.go b/test/extended/pods/priorityclasses.go
index 31bdddbc8399..221c21d97602 100644
--- a/test/extended/pods/priorityclasses.go
+++ b/test/extended/pods/priorityclasses.go
@@ -37,6 +37,13 @@ var excludedPriorityClassPods = map[string][]string{
"openshift-operators": {
"servicemesh-operator3-",
},
+
+ // Istio does not provide an option to set priority class on gateway
+ // pods. https://issues.redhat.com/browse/OCPBUGS-54652 tracks setting
+ // the annotation so that we can remove this exclusion.
+ "openshift-ingress": {
+ "gateway-",
+ },
}
var _ = Describe("[sig-arch] Managed cluster should", func() {
diff --git a/test/extended/router/gatewayapicontroller.go b/test/extended/router/gatewayapicontroller.go
index a5564a593c03..c5fbc8c32841 100644
--- a/test/extended/router/gatewayapicontroller.go
+++ b/test/extended/router/gatewayapicontroller.go
@@ -14,6 +14,7 @@ import (
configv1 "github.com/openshift/api/config/v1"
operatoringressv1 "github.com/openshift/api/operatoringress/v1"
+ operatorsv1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1"
exutil "github.com/openshift/origin/test/extended/util"
corev1 "k8s.io/api/core/v1"
@@ -24,24 +25,51 @@ import (
"k8s.io/utils/pointer"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/names"
gatewayapiv1 "sigs.k8s.io/gateway-api/apis/v1"
)
-var (
- requiredCapabilities = []configv1.ClusterVersionCapability{
- configv1.ClusterVersionCapabilityMarketplace,
- configv1.ClusterVersionCapabilityOperatorLifecycleManager,
- }
-)
-
const (
// Max time duration for the DNS resolution
dnsResolutionTimeout = 10 * time.Minute
// Max time duration for the Load balancer address
loadBalancerReadyTimeout = 10 * time.Minute
+ // ingressNamespace is the name of the "openshift-ingress" operand
+ // namespace.
+ ingressNamespace = "openshift-ingress"
+ // istioName is the name of the Istio CR.
+ istioName = "openshift-gateway"
+ // The name of the default gatewayclass, which is used to install OSSM.
+ gatewayClassName = "openshift-default"
+
+ ossmAndOLMResourcesCreated = "ensure-resources-are-created"
+ defaultGatewayclassAccepted = "ensure-default-gatewayclass-is-accepted"
+ customGatewayclassAccepted = "ensure-custom-gatewayclass-is-accepted"
+ lbAndServiceAndDnsrecordAreCreated = "ensure-lb-and-service-and-dnsrecord-are-created"
+ httprouteObjectCreated = "ensure-httproute-object-is-created"
+ gieEnabled = "ensure-gie-is-enabled"
+)
+
+var (
+ requiredCapabilities = []configv1.ClusterVersionCapability{
+ configv1.ClusterVersionCapabilityMarketplace,
+ configv1.ClusterVersionCapabilityOperatorLifecycleManager,
+ }
+ // testNames is a list of names that are used to track when tests are
+ // done in order to check whether it is safe to clean up resources that
+ // these tests share, such as the gatewayclass and Istio CR. These
+ // names are embedded within annotation keys of the form test-%s-done.
+ // Because annotation keys are limited to 63 characters, each of these
+ // names must be no longer than 53 characters.
+ testNames = []string{
+ ossmAndOLMResourcesCreated,
+ defaultGatewayclassAccepted,
+ customGatewayclassAccepted,
+ lbAndServiceAndDnsrecordAreCreated,
+ httprouteObjectCreated,
+ gieEnabled,
+ }
)
var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feature:Router][apigroup:gateway.networking.k8s.io]", g.Ordered, g.Serial, func() {
@@ -57,18 +85,18 @@ var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feat
const (
// The expected OSSM subscription name.
expectedSubscriptionName = "servicemeshoperator3"
+ // The expected OSSM operator name.
+ serviceMeshOperatorName = expectedSubscriptionName + ".openshift-operators"
// Expected Subscription Source
expectedSubscriptionSource = "redhat-operators"
// The expected OSSM operator namespace.
expectedSubscriptionNamespace = "openshift-operators"
- // The gatewayclass name used to create ossm and other gateway api resources.
- gatewayClassName = "openshift-default"
// gatewayClassControllerName is the name that must be used to create a supported gatewayClass.
gatewayClassControllerName = "openshift.io/gateway-controller/v1"
//OSSM Deployment Pod Name
deploymentOSSMName = "servicemesh-operator3"
)
- g.BeforeAll(func() {
+ g.BeforeEach(func() {
isokd, err := isOKD(oc)
if err != nil {
e2e.Failf("Failed to get clusterversion to determine if release is OKD: %v", err)
@@ -89,19 +117,66 @@ var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feat
gatewayClass := buildGatewayClass(gatewayClassName, gatewayClassControllerName)
_, err = oc.AdminGatewayApiClient().GatewayV1().GatewayClasses().Create(context.TODO(), gatewayClass, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
- e2e.Failf("Failed to create GatewayClass %q", gatewayClassName)
+ e2e.Failf("Failed to create GatewayClass %q: %v", gatewayClassName, err)
}
})
- g.AfterAll(func() {
- g.By("Cleaning up the GatewayAPI Objects")
- for _, name := range gateways {
- err = oc.AdminGatewayApiClient().GatewayV1().Gateways("openshift-ingress").Delete(context.Background(), name, metav1.DeleteOptions{})
- o.Expect(err).NotTo(o.HaveOccurred(), "Gateway %s could not be deleted", name)
+ g.AfterEach(func() {
+ if !checkAllTestsDone(oc) {
+ e2e.Logf("Skipping cleanup while not all GatewayAPIController tests are done")
+ } else {
+ g.By("Deleting the gateways")
+
+ for _, name := range gateways {
+ err = oc.AdminGatewayApiClient().GatewayV1().Gateways(ingressNamespace).Delete(context.Background(), name, metav1.DeleteOptions{})
+ o.Expect(err).NotTo(o.HaveOccurred(), "Gateway %s could not be deleted", name)
+ }
+
+ g.By("Deleting the GatewayClass")
+
+ if err := oc.AdminGatewayApiClient().GatewayV1().GatewayClasses().Delete(context.Background(), gatewayClassName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
+ e2e.Failf("Failed to delete GatewayClass %q", gatewayClassName)
+ }
+
+ g.By("Deleting the Istio CR")
+
+ // Explicitly deleting the Istio CR should not strictly be
+ // necessary; the Istio CR has an owner reference on the
+ // gatewayclass, and so deleting the gatewayclass should cause
+ // the garbage collector to delete the Istio CR. However, it
+ // has been observed that the Istio CR sometimes does not get
+ // deleted, and so we have an explicit delete command here just
+ // in case. The --ignore-not-found option should prevent errors
+ // if garbage collection has already deleted the object.
+ o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("--ignore-not-found=true", "istio", istioName).Execute()).Should(o.Succeed())
+
+ g.By("Waiting for the istiod pod to be deleted")
+
+ o.Eventually(func(g o.Gomega) {
+ podsList, err := oc.AdminKubeClient().CoreV1().Pods(ingressNamespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app=istiod"})
+ g.Expect(err).NotTo(o.HaveOccurred())
+ g.Expect(podsList.Items).Should(o.BeEmpty())
+ }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(o.Succeed())
+
+ g.By("Deleting the OSSM Operator resources")
+
+ operator, err := operatorsv1.NewForConfigOrDie(oc.AsAdmin().UserConfig()).Operators().Get(context.Background(), serviceMeshOperatorName, metav1.GetOptions{})
+ o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get Operator %q", serviceMeshOperatorName)
+
+ restmapper := oc.AsAdmin().RESTMapper()
+ for _, ref := range operator.Status.Components.Refs {
+ mapping, err := restmapper.RESTMapping(ref.GroupVersionKind().GroupKind())
+ o.Expect(err).NotTo(o.HaveOccurred())
+
+ err = oc.KubeFramework().DynamicClient.Resource(mapping.Resource).Namespace(ref.Namespace).Delete(context.Background(), ref.Name, metav1.DeleteOptions{})
+ o.Expect(err).Should(o.Or(o.Not(o.HaveOccurred()), o.MatchError(apierrors.IsNotFound, "IsNotFound")), "Failed to delete %s %q: %v", ref.GroupVersionKind().Kind, ref.Name, err)
+ }
}
})
g.It("Ensure OSSM and OLM related resources are created after creating GatewayClass", func() {
+ defer markTestDone(oc, ossmAndOLMResourcesCreated)
+
//check the catalogSource
g.By("Check OLM catalogSource, subscription, CSV and Pod")
waitCatalogErr := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 20*time.Minute, false, func(context context.Context) (bool, error) {
@@ -119,7 +194,10 @@ var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feat
// check Subscription
waitVersionErr := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 20*time.Minute, false, func(context context.Context) (bool, error) {
csvName, err = oc.AsAdmin().Run("get").Args("-n", expectedSubscriptionNamespace, "subscription", expectedSubscriptionName, "-o=jsonpath={.status.installedCSV}").Output()
- o.Expect(err).NotTo(o.HaveOccurred())
+ if err != nil {
+ e2e.Logf("Failed to get Subscription %q: %v; retrying...", expectedSubscriptionName, err)
+ return false, nil
+ }
if csvName == "" {
e2e.Logf("Subscription %q doesn't have installed CSV, retrying...", expectedSubscriptionName)
return false, nil
@@ -145,7 +223,7 @@ var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feat
waitErr := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 20*time.Minute, false, func(context context.Context) (bool, error) {
deployOSSM, err := oc.AdminKubeClient().AppsV1().Deployments(expectedSubscriptionNamespace).Get(context, "servicemesh-operator3", metav1.GetOptions{})
if err != nil {
- e2e.Logf("Failed to get OSSM operator deployment %q, retrying...", deploymentOSSMName)
+ e2e.Logf("Failed to get OSSM operator deployment %q: %v; retrying...", deploymentOSSMName, err)
return false, nil
}
if deployOSSM.Status.ReadyReplicas < 1 {
@@ -162,6 +240,7 @@ var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feat
})
g.It("Ensure default gatewayclass is accepted", func() {
+ defer markTestDone(oc, defaultGatewayclassAccepted)
g.By("Check if default GatewayClass is accepted after OLM resources are successful")
errCheck := checkGatewayClass(oc, gatewayClassName)
@@ -169,13 +248,15 @@ var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feat
})
g.It("Ensure custom gatewayclass can be accepted", func() {
+ defer markTestDone(oc, customGatewayclassAccepted)
+
customGatewayClassName := "custom-gatewayclass"
g.By("Create Custom GatewayClass")
gatewayClass := buildGatewayClass(customGatewayClassName, gatewayClassControllerName)
gwc, err := oc.AdminGatewayApiClient().GatewayV1().GatewayClasses().Create(context.TODO(), gatewayClass, metav1.CreateOptions{})
if err != nil {
- e2e.Logf("Gateway Class \"custom-gatewayclass\" already exists, or has failed to be created, checking its status")
+ e2e.Logf("Failed to create GatewayClass %q: %v; checking its status...", customGatewayClassName, err)
}
errCheck := checkGatewayClass(oc, customGatewayClassName)
o.Expect(errCheck).NotTo(o.HaveOccurred(), "GatewayClass %q was not installed and accepted", gwc.Name)
@@ -196,6 +277,8 @@ var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feat
})
g.It("Ensure LB, service, and dnsRecord are created for a Gateway object", func() {
+ defer markTestDone(oc, lbAndServiceAndDnsrecordAreCreated)
+
g.By("Ensure default GatewayClass is accepted")
errCheck := checkGatewayClass(oc, gatewayClassName)
o.Expect(errCheck).NotTo(o.HaveOccurred(), "GatewayClass %q was not installed and accepted", gatewayClassName)
@@ -219,6 +302,8 @@ var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feat
})
g.It("Ensure HTTPRoute object is created", func() {
+ defer markTestDone(oc, httprouteObjectCreated)
+
g.By("Ensure default GatewayClass is accepted")
errCheck := checkGatewayClass(oc, gatewayClassName)
o.Expect(errCheck).NotTo(o.HaveOccurred(), "GatewayClass %q was not installed and accepted", gatewayClassName)
@@ -249,6 +334,8 @@ var _ = g.Describe("[sig-network-edge][OCPFeatureGate:GatewayAPIController][Feat
})
g.It("Ensure GIE is enabled after creating an inferencePool CRD", func() {
+ defer markTestDone(oc, gieEnabled)
+
errCheck := checkGatewayClass(oc, gatewayClassName)
o.Expect(errCheck).NotTo(o.HaveOccurred(), "GatewayClass %q was not installed and accepted", gatewayClassName)
@@ -310,28 +397,29 @@ func skipGatewayIfNonCloudPlatform(oc *exutil.CLI) {
}
func waitForIstioHealthy(oc *exutil.CLI) {
- resource := types.NamespacedName{Namespace: "openshift-ingress", Name: "openshift-gateway"}
- err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 10*time.Minute, false, func(context context.Context) (bool, error) {
- istioStatus, errIstio := oc.AsAdmin().Run("get").Args("-n", resource.Namespace, "istio", resource.Name, "-o=jsonpath={.status.state}").Output()
+ timeout := 20 * time.Minute
+ err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, timeout, false, func(context context.Context) (bool, error) {
+ istioStatus, errIstio := oc.AsAdmin().Run("get").Args("istio", istioName, "-o=jsonpath={.status.state}").Output()
if errIstio != nil {
- e2e.Logf("Failed getting openshift-gateway istio cr status: %v", errIstio)
+ e2e.Logf("Failed to get Istio CR %q: %v; retrying...", istioName, errIstio)
return false, nil
}
if istioStatus != "Healthy" {
- e2e.Logf("Istio CR %q is not healthy, retrying...", resource.Name)
+ e2e.Logf("Istio CR %q is not healthy, retrying...", istioName)
return false, nil
}
- e2e.Logf("Istio CR %q is healthy", resource.Name)
+ e2e.Logf("Istio CR %q is healthy", istioName)
return true, nil
})
- o.Expect(err).NotTo(o.HaveOccurred(), "Istio CR %q did not reach healthy state in time", resource.Name)
+ o.Expect(err).NotTo(o.HaveOccurred(), "Istio CR %q did not reach healthy state within %v", istioName, timeout)
}
func checkGatewayClass(oc *exutil.CLI, name string) error {
- waitErr := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 10*time.Minute, false, func(context context.Context) (bool, error) {
+ timeout := 20 * time.Minute
+ waitErr := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, timeout, false, func(context context.Context) (bool, error) {
gwc, err := oc.AdminGatewayApiClient().GatewayV1().GatewayClasses().Get(context, name, metav1.GetOptions{})
if err != nil {
- e2e.Logf("Failed to get gatewayclass %s, retrying...", name)
+ e2e.Logf("Failed to get gatewayclass %s: %v; retrying...", name, err)
return false, nil
}
for _, condition := range gwc.Status.Conditions {
@@ -345,7 +433,7 @@ func checkGatewayClass(oc *exutil.CLI, name string) error {
return false, nil
})
- o.Expect(waitErr).NotTo(o.HaveOccurred(), "Gatewayclass %s is not accepted", name)
+ o.Expect(waitErr).NotTo(o.HaveOccurred(), "GatewayClass %q was not accepted within %v", name, timeout)
return nil
}
@@ -361,24 +449,23 @@ func buildGatewayClass(name, controllerName string) *gatewayapiv1.GatewayClass {
// createAndCheckGateway build and creates the Gateway.
func createAndCheckGateway(oc *exutil.CLI, gwname, gwclassname, domain string) (*gatewayapiv1.Gateway, error) {
- ingressNameSpace := "openshift-ingress"
-
// Build the gateway object
- gatewaybuild := buildGateway(gwname, ingressNameSpace, gwclassname, "All", domain)
+ gatewaybuild := buildGateway(gwname, ingressNamespace, gwclassname, "All", domain)
// Create the gateway object
- _, errGwObj := oc.AdminGatewayApiClient().GatewayV1().Gateways(ingressNameSpace).Create(context.TODO(), gatewaybuild, metav1.CreateOptions{})
+ _, errGwObj := oc.AdminGatewayApiClient().GatewayV1().Gateways(ingressNamespace).Create(context.TODO(), gatewaybuild, metav1.CreateOptions{})
if errGwObj != nil {
return nil, errGwObj
}
// Confirm the gateway is up and running
- return checkGatewayStatus(oc, gwname, ingressNameSpace)
+ return checkGatewayStatus(oc, gwname, ingressNamespace)
}
func checkGatewayStatus(oc *exutil.CLI, gwname, ingressNameSpace string) (*gatewayapiv1.Gateway, error) {
programmedGateway := &gatewayapiv1.Gateway{}
- if err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 10*time.Minute, false, func(context context.Context) (bool, error) {
+ timeout := 20 * time.Minute
+ if err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, timeout, false, func(context context.Context) (bool, error) {
gateway, err := oc.AdminGatewayApiClient().GatewayV1().Gateways(ingressNameSpace).Get(context, gwname, metav1.GetOptions{})
if err != nil {
e2e.Logf("Failed to get gateway %q: %v, retrying...", gwname, err)
@@ -397,7 +484,7 @@ func checkGatewayStatus(oc *exutil.CLI, gwname, ingressNameSpace string) (*gatew
e2e.Logf("Found gateway %q but the controller is still not programmed, retrying...", gwname)
return false, nil
}); err != nil {
- return nil, fmt.Errorf("timed out waiting for gateway %q to become programmed: %w", gwname, err)
+ return nil, fmt.Errorf("timed out after %v waiting for gateway %q to become programmed: %w", timeout, gwname, err)
}
e2e.Logf("Gateway %q successfully programmed!", gwname)
return programmedGateway, nil
@@ -425,11 +512,15 @@ func assertGatewayLoadbalancerReady(oc *exutil.CLI, gwName, gwServiceName string
// check gateway LB service, note that External-IP might be hostname (AWS) or IP (Azure/GCP)
var lbAddress string
err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, loadBalancerReadyTimeout, false, func(context context.Context) (bool, error) {
- lbService, err := oc.AdminKubeClient().CoreV1().Services("openshift-ingress").Get(context, gwServiceName, metav1.GetOptions{})
+ lbService, err := oc.AdminKubeClient().CoreV1().Services(ingressNamespace).Get(context, gwServiceName, metav1.GetOptions{})
if err != nil {
e2e.Logf("Failed to get service %q: %v, retrying...", gwServiceName, err)
return false, nil
}
+ if len(lbService.Status.LoadBalancer.Ingress) == 0 {
+ e2e.Logf("Service %q has no load balancer; retrying...", gwServiceName)
+ return false, nil
+ }
if lbService.Status.LoadBalancer.Ingress[0].Hostname != "" {
lbAddress = lbService.Status.LoadBalancer.Ingress[0].Hostname
} else {
@@ -441,9 +532,9 @@ func assertGatewayLoadbalancerReady(oc *exutil.CLI, gwName, gwServiceName string
}
e2e.Logf("Got load balancer address for service %q: %v", gwServiceName, lbAddress)
- gw, err := oc.AdminGatewayApiClient().GatewayV1().Gateways("openshift-ingress").Get(context, gwName, metav1.GetOptions{})
+ gw, err := oc.AdminGatewayApiClient().GatewayV1().Gateways(ingressNamespace).Get(context, gwName, metav1.GetOptions{})
if err != nil {
- e2e.Logf("Failed to get gateway %q, retrying...", gwName)
+ e2e.Logf("Failed to get gateway %q: %v; retrying...", err, gwName)
return false, nil
}
for _, gwAddr := range gw.Status.Addresses {
@@ -463,7 +554,7 @@ func assertDNSRecordStatus(oc *exutil.CLI, gatewayName string) {
// find the DNS Record and confirm its zone status is True
err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 10*time.Minute, false, func(context context.Context) (bool, error) {
gatewayDNSRecord := &operatoringressv1.DNSRecord{}
- gatewayDNSRecords, err := oc.AdminIngressClient().IngressV1().DNSRecords("openshift-ingress").List(context, metav1.ListOptions{})
+ gatewayDNSRecords, err := oc.AdminIngressClient().IngressV1().DNSRecords(ingressNamespace).List(context, metav1.ListOptions{})
if err != nil {
e2e.Logf("Failed to list DNS records for gateway %q: %v, retrying...", gatewayName, err)
return false, nil
@@ -495,8 +586,7 @@ func assertDNSRecordStatus(oc *exutil.CLI, gatewayName string) {
// If it can't an error is returned.
func createHttpRoute(oc *exutil.CLI, gwName, routeName, hostname, backendRefname string) (*gatewayapiv1.HTTPRoute, error) {
namespace := oc.Namespace()
- ingressNameSpace := "openshift-ingress"
- gateway, errGwStatus := oc.AdminGatewayApiClient().GatewayV1().Gateways(ingressNameSpace).Get(context.TODO(), gwName, metav1.GetOptions{})
+ gateway, errGwStatus := oc.AdminGatewayApiClient().GatewayV1().Gateways(ingressNamespace).Get(context.TODO(), gwName, metav1.GetOptions{})
if errGwStatus != nil || gateway == nil {
e2e.Failf("Unable to create httpRoute, no gateway available during route assertion %v", errGwStatus)
}
@@ -515,7 +605,7 @@ func createHttpRoute(oc *exutil.CLI, gwName, routeName, hostname, backendRefname
o.Expect(echoServiceErr).NotTo(o.HaveOccurred())
// Create the HTTPRoute
- buildHTTPRoute := buildHTTPRoute(routeName, namespace, gateway.Name, ingressNameSpace, hostname, backendRefname)
+ buildHTTPRoute := buildHTTPRoute(routeName, namespace, gateway.Name, ingressNamespace, hostname, backendRefname)
httpRoute, err := oc.GatewayApiClient().GatewayV1().HTTPRoutes(namespace).Create(context.Background(), buildHTTPRoute, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
@@ -523,10 +613,12 @@ func createHttpRoute(oc *exutil.CLI, gwName, routeName, hostname, backendRefname
waitErr := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 4*time.Minute, false, func(context context.Context) (bool, error) {
checkHttpRoute, err := oc.GatewayApiClient().GatewayV1().HTTPRoutes(namespace).Get(context, httpRoute.Name, metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
- for _, condition := range checkHttpRoute.Status.Parents[0].Conditions {
- if condition.Type == string(gatewayapiv1.RouteConditionAccepted) {
- if condition.Status == metav1.ConditionTrue {
- return true, nil
+ if len(checkHttpRoute.Status.Parents) > 0 {
+ for _, condition := range checkHttpRoute.Status.Parents[0].Conditions {
+ if condition.Type == string(gatewayapiv1.RouteConditionAccepted) {
+ if condition.Status == metav1.ConditionTrue {
+ return true, nil
+ }
}
}
}
@@ -634,8 +726,7 @@ func buildHTTPRoute(routeName, namespace, parentgateway, parentNamespace, hostna
func assertHttpRouteSuccessful(oc *exutil.CLI, gwName, name string) (*gatewayapiv1.HTTPRoute, error) {
namespace := oc.Namespace()
checkHttpRoute := &gatewayapiv1.HTTPRoute{}
- ingressNameSpace := "openshift-ingress"
- gateway, errGwStatus := oc.AdminGatewayApiClient().GatewayV1().Gateways(ingressNameSpace).Get(context.TODO(), gwName, metav1.GetOptions{})
+ gateway, errGwStatus := oc.AdminGatewayApiClient().GatewayV1().Gateways(ingressNamespace).Get(context.TODO(), gwName, metav1.GetOptions{})
if errGwStatus != nil || gateway == nil {
e2e.Failf("Unable to assert httproute, no gateway available, error %v", errGwStatus)
}
@@ -721,3 +812,41 @@ func isOKD(oc *exutil.CLI) (bool, error) {
}
return false, nil
}
+
+// annotationKeyForTest returns the key for an annotation on the default
+// gatewayclass that indicates whether the specified test is done.
+func annotationKeyForTest(testName string) string {
+ return fmt.Sprintf("test-%s-done", testName)
+}
+
+// markTestDone adds an annotation to the default gatewayclass that all the
+// GatewayAPIController tests use to indicate that a particular test has ended.
+// These annotations are used to determine whether it is safe to clean up the
+// gatewayclass and other shared resources.
+func markTestDone(oc *exutil.CLI, testName string) {
+ gwc, err := oc.AdminGatewayApiClient().GatewayV1().GatewayClasses().Get(context.Background(), gatewayClassName, metav1.GetOptions{})
+ o.Expect(err).NotTo(o.HaveOccurred())
+
+ if gwc.Annotations == nil {
+ gwc.Annotations = map[string]string{}
+ }
+ gwc.Annotations[annotationKeyForTest(testName)] = ""
+ _, err = oc.AdminGatewayApiClient().GatewayV1().GatewayClasses().Update(context.Background(), gwc, metav1.UpdateOptions{})
+ o.Expect(err).NotTo(o.HaveOccurred())
+}
+
+// checkAllTestsDone checks the annotations on the default gatewayclass that all
+// the GatewayAPIController tests use to determine whether all the tests are
+// done.
+func checkAllTestsDone(oc *exutil.CLI) bool {
+ gwc, err := oc.AdminGatewayApiClient().GatewayV1().GatewayClasses().Get(context.Background(), gatewayClassName, metav1.GetOptions{})
+ o.Expect(err).NotTo(o.HaveOccurred())
+
+ for _, testName := range testNames {
+ if _, ok := gwc.Annotations[annotationKeyForTest(testName)]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go
index f831735f840e..93d64cd8d5ff 100644
--- a/vendor/github.com/docker/docker/api/common.go
+++ b/vendor/github.com/docker/docker/api/common.go
@@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of the current REST API.
- DefaultVersion = "1.46"
+ DefaultVersion = "1.47"
// MinSupportedAPIVersion is the minimum API version that can be supported
// by the API server, specified as "major.minor". Note that the daemon
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index 4a1b7087d8c0..7164e1eba53d 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -19,10 +19,10 @@ produces:
consumes:
- "application/json"
- "text/plain"
-basePath: "/v1.46"
+basePath: "/v1.47"
info:
title: "Docker Engine API"
- version: "1.46"
+ version: "1.47"
x-logo:
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
description: |
@@ -55,8 +55,8 @@ info:
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
is returned.
- If you omit the version-prefix, the current version of the API (v1.46) is used.
- For example, calling `/info` is the same as calling `/v1.46/info`. Using the
+ If you omit the version-prefix, the current version of the API (v1.47) is used.
+ For example, calling `/info` is the same as calling `/v1.47/info`. Using the
API without a version-prefix is deprecated and will be removed in a future release.
Engine releases in the near future should support this version of the API,
@@ -393,7 +393,7 @@ definitions:
Make the mount non-recursively read-only, but still leave the mount recursive
(unless NonRecursive is set to `true` in conjunction).
- Addded in v1.44, before that version all read-only mounts were
+ Added in v1.44, before that version all read-only mounts were
non-recursive by default. To match the previous behaviour this
will default to `true` for clients on versions prior to v1.44.
type: "boolean"
@@ -1384,7 +1384,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always empty. It must not be used, and will be removed in API v1.47.
+ > always empty. It must not be used, and will be removed in API v1.48.
type: "string"
example: ""
Domainname:
@@ -1394,7 +1394,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always empty. It must not be used, and will be removed in API v1.47.
+ > always empty. It must not be used, and will be removed in API v1.48.
type: "string"
example: ""
User:
@@ -1408,7 +1408,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.47.
+ > always false. It must not be used, and will be removed in API v1.48.
type: "boolean"
default: false
example: false
@@ -1419,7 +1419,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.47.
+ > always false. It must not be used, and will be removed in API v1.48.
type: "boolean"
default: false
example: false
@@ -1430,7 +1430,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.47.
+ > always false. It must not be used, and will be removed in API v1.48.
type: "boolean"
default: false
example: false
@@ -1457,7 +1457,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.47.
+ > always false. It must not be used, and will be removed in API v1.48.
type: "boolean"
default: false
example: false
@@ -1468,7 +1468,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.47.
+ > always false. It must not be used, and will be removed in API v1.48.
type: "boolean"
default: false
example: false
@@ -1479,7 +1479,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.47.
+ > always false. It must not be used, and will be removed in API v1.48.
type: "boolean"
default: false
example: false
@@ -1516,7 +1516,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always empty. It must not be used, and will be removed in API v1.47.
+ > always empty. It must not be used, and will be removed in API v1.48.
type: "string"
default: ""
example: ""
@@ -1555,7 +1555,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always omitted. It must not be used, and will be removed in API v1.47.
+ > always omitted. It must not be used, and will be removed in API v1.48.
type: "boolean"
default: false
example: false
@@ -1567,7 +1567,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always omitted. It must not be used, and will be removed in API v1.47.
+ > always omitted. It must not be used, and will be removed in API v1.48.
type: "string"
default: ""
example: ""
@@ -1601,7 +1601,7 @@ definitions:
> **Deprecated**: this field is not part of the image specification and is
- > always omitted. It must not be used, and will be removed in API v1.47.
+ > always omitted. It must not be used, and will be removed in API v1.48.
type: "integer"
default: 10
x-nullable: true
@@ -2216,7 +2216,7 @@ definitions:
Created:
description: |
Date and time at which the image was created as a Unix timestamp
- (number of seconds sinds EPOCH).
+ (number of seconds since EPOCH).
type: "integer"
x-nullable: false
example: "1644009612"
@@ -2265,6 +2265,19 @@ definitions:
x-nullable: false
type: "integer"
example: 2
+ Manifests:
+ description: |
+ Manifests is a list of manifests available in this image.
+ It provides a more detailed view of the platform-specific image manifests
+ or other image-attached data like build attestations.
+
+ WARNING: This is experimental and may change at any time without any backward
+ compatibility.
+ type: "array"
+ x-nullable: false
+ x-omitempty: true
+ items:
+ $ref: "#/definitions/ImageManifestSummary"
AuthConfig:
type: "object"
@@ -2500,7 +2513,7 @@ definitions:
example: false
Attachable:
description: |
- Wheter a global / swarm scope network is manually attachable by regular
+ Whether a global / swarm scope network is manually attachable by regular
containers from workers in swarm mode.
type: "boolean"
default: false
@@ -3723,7 +3736,7 @@ definitions:
example: "json-file"
Options:
description: |
- Driver-specific options for the selectd log driver, specified
+ Driver-specific options for the selected log driver, specified
as key/value pairs.
type: "object"
additionalProperties:
@@ -5318,7 +5331,7 @@ definitions:
description: |
The default (and highest) API version that is supported by the daemon
type: "string"
- example: "1.46"
+ example: "1.47"
MinAPIVersion:
description: |
The minimum API version that is supported by the daemon
@@ -5334,7 +5347,7 @@ definitions:
The version Go used to compile the daemon, and the version of the Go
runtime in use.
type: "string"
- example: "go1.21.13"
+ example: "go1.22.7"
Os:
description: |
The operating system that the daemon is running on ("linux" or "windows")
@@ -6644,6 +6657,120 @@ definitions:
additionalProperties:
type: "string"
+ ImageManifestSummary:
+ x-go-name: "ManifestSummary"
+ description: |
+ ImageManifestSummary represents a summary of an image manifest.
+ type: "object"
+ required: ["ID", "Descriptor", "Available", "Size", "Kind"]
+ properties:
+ ID:
+ description: |
+ ID is the content-addressable ID of an image and is the same as the
+ digest of the image manifest.
+ type: "string"
+ example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f"
+ Descriptor:
+ $ref: "#/definitions/OCIDescriptor"
+ Available:
+ description: Indicates whether all the child content (image config, layers) is fully available locally.
+ type: "boolean"
+ example: true
+ Size:
+ type: "object"
+ x-nullable: false
+ required: ["Content", "Total"]
+ properties:
+ Total:
+ type: "integer"
+ format: "int64"
+ example: 8213251
+ description: |
+ Total is the total size (in bytes) of all the locally present
+ data (both distributable and non-distributable) that's related to
+ this manifest and its children.
+ This equal to the sum of [Content] size AND all the sizes in the
+ [Size] struct present in the Kind-specific data struct.
+ For example, for an image kind (Kind == "image")
+ this would include the size of the image content and unpacked
+ image snapshots ([Size.Content] + [ImageData.Size.Unpacked]).
+ Content:
+ description: |
+ Content is the size (in bytes) of all the locally present
+ content in the content store (e.g. image config, layers)
+ referenced by this manifest and its children.
+ This only includes blobs in the content store.
+ type: "integer"
+ format: "int64"
+ example: 3987495
+ Kind:
+ type: "string"
+ example: "image"
+ enum:
+ - "image"
+ - "attestation"
+ - "unknown"
+ description: |
+ The kind of the manifest.
+
+ kind | description
+ -------------|-----------------------------------------------------------
+ image | Image manifest that can be used to start a container.
+ attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest.
+ ImageData:
+ description: |
+ The image data for the image manifest.
+ This field is only populated when Kind is "image".
+ type: "object"
+ x-nullable: true
+ x-omitempty: true
+ required: ["Platform", "Containers", "Size", "UnpackedSize"]
+ properties:
+ Platform:
+ $ref: "#/definitions/OCIPlatform"
+ description: |
+ OCI platform of the image. This will be the platform specified in the
+ manifest descriptor from the index/manifest list.
+ If it's not available, it will be obtained from the image config.
+ Containers:
+ description: |
+ The IDs of the containers that are using this image.
+ type: "array"
+ items:
+ type: "string"
+ example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"]
+ Size:
+ type: "object"
+ x-nullable: false
+ required: ["Unpacked"]
+ properties:
+ Unpacked:
+ type: "integer"
+ format: "int64"
+ example: 3987495
+ description: |
+ Unpacked is the size (in bytes) of the locally unpacked
+ (uncompressed) image content that's directly usable by the containers
+ running this image.
+ It's independent of the distributable content - e.g.
+ the image might still have an unpacked data that's still used by
+ some container even when the distributable/compressed content is
+ already gone.
+ AttestationData:
+ description: |
+ The image data for the attestation manifest.
+ This field is only populated when Kind is "attestation".
+ type: "object"
+ x-nullable: true
+ x-omitempty: true
+ required: ["For"]
+ properties:
+ For:
+ description: |
+ The digest of the image manifest that this attestation is for.
+ type: "string"
+ example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f"
+
paths:
/containers/json:
get:
@@ -7585,7 +7712,7 @@ paths:
* Memory usage % = `(used_memory / available_memory) * 100.0`
* cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage`
* system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage`
- * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus`
+ * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus`
* CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0`
operationId: "ContainerStats"
produces: ["application/json"]
@@ -8622,6 +8749,11 @@ paths:
description: "Show digest information as a `RepoDigests` field on each image."
type: "boolean"
default: false
+ - name: "manifests"
+ in: "query"
+ description: "Include `Manifests` in the image summary."
+ type: "boolean"
+ default: false
tags: ["Image"]
/build:
post:
@@ -9094,12 +9226,23 @@ paths:
parameters:
- name: "name"
in: "path"
- description: "Image name or ID."
+ description: |
+ Name of the image to push. For example, `registry.example.com/myimage`.
+ The image must be present in the local image store with the same name.
+
+ The name should be provided without tag; if a tag is provided, it
+ is ignored. For example, `registry.example.com/myimage:latest` is
+ considered equivalent to `registry.example.com/myimage`.
+
+ Use the `tag` parameter to specify the tag to push.
type: "string"
required: true
- name: "tag"
in: "query"
- description: "The tag to associate with the image on the registry."
+ description: |
+ Tag of the image to push. For example, `latest`. If no tag is provided,
+ all tags of the given image that are present in the local image store
+ are pushed.
type: "string"
- name: "X-Registry-Auth"
in: "header"
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go
index 727da8839cc2..03648fb7b5dc 100644
--- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go
@@ -1,6 +1,7 @@
package container // import "github.com/docker/docker/api/types/container"
import (
+ "errors"
"fmt"
"strings"
@@ -325,12 +326,12 @@ func ValidateRestartPolicy(policy RestartPolicy) error {
if policy.MaximumRetryCount < 0 {
msg += " and cannot be negative"
}
- return &errInvalidParameter{fmt.Errorf(msg)}
+ return &errInvalidParameter{errors.New(msg)}
}
return nil
case RestartPolicyOnFailure:
if policy.MaximumRetryCount < 0 {
- return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")}
+ return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")}
}
return nil
case "":
diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go
index 0c39ab5f18b5..0914b2a4410c 100644
--- a/vendor/github.com/docker/docker/api/types/filters/parse.go
+++ b/vendor/github.com/docker/docker/api/types/filters/parse.go
@@ -196,7 +196,7 @@ func (args Args) Match(field, source string) bool {
}
// GetBoolOrDefault returns a boolean value of the key if the key is present
-// and is intepretable as a boolean value. Otherwise the default value is returned.
+// and is interpretable as a boolean value. Otherwise the default value is returned.
// Error is not nil only if the filter values are not valid boolean or are conflicting.
func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) {
fieldValues, ok := args.fields[key]
diff --git a/vendor/github.com/docker/docker/api/types/image/manifest.go b/vendor/github.com/docker/docker/api/types/image/manifest.go
new file mode 100644
index 000000000000..db8a00830e70
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/manifest.go
@@ -0,0 +1,99 @@
+package image
+
+import (
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type ManifestKind string
+
+const (
+ ManifestKindImage ManifestKind = "image"
+ ManifestKindAttestation ManifestKind = "attestation"
+ ManifestKindUnknown ManifestKind = "unknown"
+)
+
+type ManifestSummary struct {
+ // ID is the content-addressable ID of an image and is the same as the
+ // digest of the image manifest.
+ //
+ // Required: true
+ ID string `json:"ID"`
+
+ // Descriptor is the OCI descriptor of the image.
+ //
+ // Required: true
+ Descriptor ocispec.Descriptor `json:"Descriptor"`
+
+ // Indicates whether all the child content (image config, layers) is
+ // fully available locally
+ //
+ // Required: true
+ Available bool `json:"Available"`
+
+ // Size is the size information of the content related to this manifest.
+ // Note: These sizes only take the locally available content into account.
+ //
+ // Required: true
+ Size struct {
+ // Content is the size (in bytes) of all the locally present
+ // content in the content store (e.g. image config, layers)
+ // referenced by this manifest and its children.
+ // This only includes blobs in the content store.
+ Content int64 `json:"Content"`
+
+ // Total is the total size (in bytes) of all the locally present
+ // data (both distributable and non-distributable) that's related to
+ // this manifest and its children.
+ // This equal to the sum of [Content] size AND all the sizes in the
+ // [Size] struct present in the Kind-specific data struct.
+ // For example, for an image kind (Kind == ManifestKindImage),
+ // this would include the size of the image content and unpacked
+ // image snapshots ([Size.Content] + [ImageData.Size.Unpacked]).
+ Total int64 `json:"Total"`
+ } `json:"Size"`
+
+ // Kind is the kind of the image manifest.
+ //
+ // Required: true
+ Kind ManifestKind `json:"Kind"`
+
+ // Fields below are specific to the kind of the image manifest.
+
+ // Present only if Kind == ManifestKindImage.
+ ImageData *ImageProperties `json:"ImageData,omitempty"`
+
+ // Present only if Kind == ManifestKindAttestation.
+ AttestationData *AttestationProperties `json:"AttestationData,omitempty"`
+}
+
+type ImageProperties struct {
+ // Platform is the OCI platform object describing the platform of the image.
+ //
+ // Required: true
+ Platform ocispec.Platform `json:"Platform"`
+
+ Size struct {
+ // Unpacked is the size (in bytes) of the locally unpacked
+ // (uncompressed) image content that's directly usable by the containers
+ // running this image.
+ // It's independent of the distributable content - e.g.
+ // the image might still have an unpacked data that's still used by
+ // some container even when the distributable/compressed content is
+ // already gone.
+ //
+ // Required: true
+ Unpacked int64 `json:"Unpacked"`
+ }
+
+ // Containers is an array containing the IDs of the containers that are
+ // using this image.
+ //
+ // Required: true
+ Containers []string `json:"Containers"`
+}
+
+type AttestationProperties struct {
+ // For is the digest of the image manifest that this attestation is for.
+ For digest.Digest `json:"For"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go
index 8e32c9af8689..923ebe5a06a0 100644
--- a/vendor/github.com/docker/docker/api/types/image/opts.go
+++ b/vendor/github.com/docker/docker/api/types/image/opts.go
@@ -76,6 +76,9 @@ type ListOptions struct {
// ContainerCount indicates whether container count should be computed.
ContainerCount bool
+
+ // Manifests indicates whether the image manifests should be returned.
+ Manifests bool
}
// RemoveOptions holds parameters to remove images.
diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go
index f1e3e2ef018f..e87e216a28b3 100644
--- a/vendor/github.com/docker/docker/api/types/image/summary.go
+++ b/vendor/github.com/docker/docker/api/types/image/summary.go
@@ -1,10 +1,5 @@
package image
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// Summary summary
-// swagger:model Summary
type Summary struct {
// Number of containers using this image. Includes both stopped and running
@@ -17,7 +12,7 @@ type Summary struct {
Containers int64 `json:"Containers"`
// Date and time at which the image was created as a Unix timestamp
- // (number of seconds sinds EPOCH).
+ // (number of seconds since EPOCH).
//
// Required: true
Created int64 `json:"Created"`
@@ -47,6 +42,14 @@ type Summary struct {
// Required: true
ParentID string `json:"ParentId"`
+ // Manifests is a list of image manifests available in this image. It
+ // provides a more detailed view of the platform-specific image manifests or
+ // other image-attached data like build attestations.
+ //
+ // WARNING: This is experimental and may change at any time without any backward
+ // compatibility.
+ Manifests []ManifestSummary `json:"Manifests,omitempty"`
+
// List of content-addressable digests of locally available image manifests
// that the image is referenced from. Multiple manifests can refer to the
// same image.
diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go
index 97a924e37477..8e383f6e60cb 100644
--- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go
+++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go
@@ -34,10 +34,9 @@ type AuthConfig struct {
}
// EncodeAuthConfig serializes the auth configuration as a base64url encoded
-// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header.
+// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header.
//
-// For details on base64url encoding, see:
-// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
+// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5
func EncodeAuthConfig(authConfig AuthConfig) (string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
@@ -46,15 +45,14 @@ func EncodeAuthConfig(authConfig AuthConfig) (string, error) {
return base64.URLEncoding.EncodeToString(buf), nil
}
-// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON
+// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON
// authentication information as sent through the X-Registry-Auth header.
//
-// This function always returns an AuthConfig, even if an error occurs. It is up
+// This function always returns an [AuthConfig], even if an error occurs. It is up
// to the caller to decide if authentication is required, and if the error can
// be ignored.
//
-// For details on base64url encoding, see:
-// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
+// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5
func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) {
if authEncoded == "" {
return &AuthConfig{}, nil
@@ -69,7 +67,7 @@ func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) {
// clients and API versions. Current clients and API versions expect authentication
// to be provided through the X-Registry-Auth header.
//
-// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an
+// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an
// error occurs. It is up to the caller to decide if authentication is required,
// and if the error can be ignored.
func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) {
diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
index 3eae4b9b297d..1b4be6fffbab 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
@@ -122,7 +122,7 @@ type CAConfig struct {
SigningCAKey string `json:",omitempty"`
// If this value changes, and there is no specified signing cert and key,
- // then the swarm is forced to generate a new root certificate ane key.
+ // then the swarm is forced to generate a new root certificate and key.
ForceRotate uint64 `json:",omitempty"`
}
diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
index bbd9ff0b8f97..618a4816209a 100644
--- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
+++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
@@ -414,7 +414,7 @@ type Info struct {
// the Volume has not been successfully created yet.
VolumeID string `json:",omitempty"`
- // AccessibleTopolgoy is the topology this volume is actually accessible
+ // AccessibleTopology is the topology this volume is actually accessible
// from.
AccessibleTopology []Topology `json:",omitempty"`
}
diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go
index a9cc1e21e5dd..bef679431dce 100644
--- a/vendor/github.com/docker/docker/client/image_list.go
+++ b/vendor/github.com/docker/docker/client/image_list.go
@@ -11,6 +11,11 @@ import (
)
// ImageList returns a list of images in the docker host.
+//
+// Experimental: Setting the [options.Manifest] will populate
+// [image.Summary.Manifests] with information about image manifests.
+// This is experimental and might change in the future without any backward
+// compatibility.
func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) {
var images []image.Summary
@@ -47,6 +52,9 @@ func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]
if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") {
query.Set("shared-size", "1")
}
+ if options.Manifests && versions.GreaterThanOrEqualTo(cli.version, "1.47") {
+ query.Set("manifests", "1")
+ }
serverResp, err := cli.get(ctx, "/images/json", query, nil)
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
index 035160c834e4..8d2c8857fb03 100644
--- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -290,7 +290,7 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr,
}
// Stream is an io.Writer for output with utilities to get the output's file
-// descriptor and to detect wether it's a terminal.
+// descriptor and to detect whether it's a terminal.
//
// it is subset of the streams.Out type in
// https://pkg.go.dev/github.com/docker/cli@v20.10.17+incompatible/cli/streams#Out
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
index 3792c67a9e45..3ea3012b188b 100644
--- a/vendor/github.com/docker/docker/pkg/pools/pools.go
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -124,7 +124,7 @@ func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
}
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
-// into the pool and closes the writer if it's an io.Writecloser.
+// into the pool and closes the writer if it's an io.WriteCloser.
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
return ioutils.NewWriteCloserWrapper(w, func() error {
buf.Flush()
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
index facfbb3126f1..b877ecc5a942 100644
--- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -6,7 +6,7 @@ import (
// Lgetxattr retrieves the value of the extended attribute identified by attr
// and associated with the given path in the file system.
-// It will returns a nil slice and nil error if the xattr is not set.
+// It returns a nil slice and nil error if the xattr is not set.
func Lgetxattr(path string, attr string) ([]byte, error) {
sysErr := func(err error) ([]byte, error) {
return nil, &XattrError{Op: "lgetxattr", Attr: attr, Path: path, Err: err}
diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
index ffc7b992b3c7..f4e7dbf37b36 100644
--- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
+++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
@@ -1,7 +1,7 @@
freebsd_task:
name: 'FreeBSD'
freebsd_instance:
- image_family: freebsd-13-2
+ image_family: freebsd-14-1
install_script:
- pkg update -f
- pkg install -y go
@@ -9,5 +9,6 @@ freebsd_task:
# run tests as user "cirrus" instead of root
- pw useradd cirrus -m
- chown -R cirrus:cirrus .
- - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
- - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
+ - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
+ - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
+ - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
deleted file mode 100644
index fad895851e56..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/.editorconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-root = true
-
-[*.go]
-indent_style = tab
-indent_size = 4
-insert_final_newline = true
-
-[*.{yml,yaml}]
-indent_style = space
-indent_size = 2
-insert_final_newline = true
-trim_trailing_whitespace = true
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes
deleted file mode 100644
index 32f1001be0a5..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-go.sum linguist-generated
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
index 391cc076b126..daea9dd6d6d2 100644
--- a/vendor/github.com/fsnotify/fsnotify/.gitignore
+++ b/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -5,3 +5,6 @@
# Output of go build ./cmd/fsnotify
/fsnotify
/fsnotify.exe
+
+/test/kqueue
+/test/a.out
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
index e0e57575496c..fa854785d0f5 100644
--- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -1,8 +1,36 @@
# Changelog
-Unreleased
-----------
-Nothing yet.
+1.8.0 2023-10-31
+----------------
+
+### Additions
+
+- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
+
+### Changes and fixes
+
+- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
+
+- kqueue: ignore events with Ident=0 ([#590])
+
+- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
+
+- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
+
+- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
+
+- inotify: fix panic when calling Remove() in a goroutine ([#650])
+
+- fen: allow watching subdirectories of watched directories ([#621])
+
+[#590]: https://github.com/fsnotify/fsnotify/pull/590
+[#610]: https://github.com/fsnotify/fsnotify/pull/610
+[#617]: https://github.com/fsnotify/fsnotify/pull/617
+[#619]: https://github.com/fsnotify/fsnotify/pull/619
+[#620]: https://github.com/fsnotify/fsnotify/pull/620
+[#621]: https://github.com/fsnotify/fsnotify/pull/621
+[#625]: https://github.com/fsnotify/fsnotify/pull/625
+[#650]: https://github.com/fsnotify/fsnotify/pull/650
1.7.0 - 2023-10-22
------------------
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
index ea379759d51a..e4ac2a2fffdc 100644
--- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -1,7 +1,7 @@
Thank you for your interest in contributing to fsnotify! We try to review and
merge PRs in a reasonable timeframe, but please be aware that:
-- To avoid "wasted" work, please discus changes on the issue tracker first. You
+- To avoid "wasted" work, please discuss changes on the issue tracker first. You
can just send PRs, but they may end up being rejected for one reason or the
other.
@@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like
Use the `-short` flag to make the "stress test" run faster.
+Writing new tests
+-----------------
+Scripts in the testdata directory allow creating test cases in a "shell-like"
+syntax. The basic format is:
+
+ script
+
+ Output:
+ desired output
+
+For example:
+
+ # Create a new empty file with some data.
+ watch /
+ echo data >/file
+
+ Output:
+ create /file
+ write /file
+
+Just create a new file to add a new test; select which tests to run with
+`-run TestScript/[path]`.
+
+script
+------
+The script is a "shell-like" script:
+
+ cmd arg arg
+
+Comments are supported with `#`:
+
+ # Comment
+ cmd arg arg # Comment
+
+All operations are done in a temp directory; a path like "/foo" is rewritten to
+"/tmp/TestFoo/foo".
+
+Arguments can be quoted with `"` or `'`; there are no escapes and they're
+functionally identical right now, but this may change in the future, so best to
+assume shell-like rules.
+
+ touch "/file with spaces"
+
+End-of-line escapes with `\` are not supported.
+
+### Supported commands
+
+ watch path [ops] # Watch the path, reporting events for it. Nothing is
+ # watched by default. Optionally a list of ops can be
+ # given, as with AddWith(path, WithOps(...)).
+ unwatch path # Stop watching the path.
+ watchlist n # Assert watchlist length.
+
+ stop # Stop running the script; for debugging.
+ debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
+ parallel by default, so -parallel=1 is probably a good
+ idea).
+
+ touch path
+ mkdir [-p] dir
+ ln -s target link # Only ln -s supported.
+ mkfifo path
+ mknod dev path
+ mv src dst
+ rm [-r] path
+ chmod mode path # Octal only
+ sleep time-in-ms
+
+ cat path # Read path (does nothing with the data; just reads it).
+ echo str >>path # Append "str" to "path".
+ echo str >path # Truncate "path" and write "str".
+
+ require reason # Skip the test if "reason" is true; "skip" and
+ skip reason # "require" behave identical; it supports both for
+ # readability. Possible reasons are:
+ #
+ # always Always skip this test.
+ # symlink Symlinks are supported (requires admin
+ # permissions on Windows).
+ # mkfifo Platform doesn't support FIFO named sockets.
+ # mknod Platform doesn't support device nodes.
+
+
+output
+------
+After `Output:` the desired output is given; this is indented by convention, but
+that's not required.
+
+The format of that is:
+
+ # Comment
+ event path # Comment
+
+ system:
+ event path
+ system2:
+ event path
+
+Every event is one line, and any whitespace between the event and path are
+ignored. The path can optionally be surrounded in ". Anything after a "#" is
+ignored.
+
+Platform-specific tests can be added after GOOS; for example:
+
+ watch /
+ touch /file
+
+ Output:
+ # Tested if nothing else matches
+ create /file
+
+ # Windows-specific test.
+ windows:
+ write /file
+
+You can specify multiple platforms with a comma (e.g. "windows, linux:").
+"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
+
[goon]: https://github.com/arp242/goon
[Vagrant]: https://www.vagrantup.com/
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go
index 28497f1dd8e6..c349c326c718 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go
@@ -1,8 +1,8 @@
//go:build solaris
-// +build solaris
-// Note: the documentation on the Watcher type and methods is generated from
-// mkdoc.zsh
+// FEN backend for illumos (supported) and Solaris (untested, but should work).
+//
+// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
package fsnotify
@@ -12,150 +12,33 @@ import (
"os"
"path/filepath"
"sync"
+ "time"
+ "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\path\to\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all times, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
+type fen struct {
Events chan Event
-
- // Errors sends any errors.
- //
- // ErrEventOverflow is used to indicate there are too many events:
- //
- // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
Errors chan error
mu sync.Mutex
port *unix.EventPort
- done chan struct{} // Channel for sending a "quit message" to the reader goroutine
- dirs map[string]struct{} // Explicitly watched directories
- watches map[string]struct{} // Explicitly watched non-directories
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ dirs map[string]Op // Explicitly watched directories
+ watches map[string]Op // Explicitly watched non-directories
}
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- return NewBufferedWatcher(0)
+func newBackend(ev chan Event, errs chan error) (backend, error) {
+ return newBufferedBackend(0, ev, errs)
}
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) {
- w := &Watcher{
- Events: make(chan Event, sz),
- Errors: make(chan error),
- dirs: make(map[string]struct{}),
- watches: make(map[string]struct{}),
+func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
+ w := &fen{
+ Events: ev,
+ Errors: errs,
+ dirs: make(map[string]Op),
+ watches: make(map[string]Op),
done: make(chan struct{}),
}
@@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
// sendEvent attempts to send an event to the user, returning true if the event
// was put in the channel successfully and false if the watcher has been closed.
-func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
+func (w *fen) sendEvent(name string, op Op) (sent bool) {
select {
- case w.Events <- Event{Name: name, Op: op}:
- return true
case <-w.done:
return false
+ case w.Events <- Event{Name: name, Op: op}:
+ return true
}
}
// sendError attempts to send an error to the user, returning true if the error
// was put in the channel successfully and false if the watcher has been closed.
-func (w *Watcher) sendError(err error) (sent bool) {
- select {
- case w.Errors <- err:
+func (w *fen) sendError(err error) (sent bool) {
+ if err == nil {
return true
+ }
+ select {
case <-w.done:
return false
+ case w.Errors <- err:
+ return true
}
}
-func (w *Watcher) isClosed() bool {
+func (w *fen) isClosed() bool {
select {
case <-w.done:
return true
@@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool {
}
}
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error {
+func (w *fen) Close() error {
// Take the lock used by associateFile to prevent lingering events from
// being processed after the close
w.mu.Lock()
@@ -213,60 +98,21 @@ func (w *Watcher) Close() error {
return w.port.Close()
}
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+func (w *fen) Add(name string) error { return w.AddWith(name) }
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+func (w *fen) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
- if w.port.PathIsWatched(name) {
- return nil
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
+ time.Now().Format("15:04:05.000000000"), name)
}
- _ = getOptions(opts...)
+ with := getOptions(opts...)
+ if !w.xSupports(with.op) {
+ return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
+ }
// Currently we resolve symlinks that were explicitly requested to be
// watched. Otherwise we would use LStat here.
@@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
}
w.mu.Lock()
- w.dirs[name] = struct{}{}
+ w.dirs[name] = with.op
w.mu.Unlock()
return nil
}
@@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
}
w.mu.Lock()
- w.watches[name] = struct{}{}
+ w.watches[name] = with.op
w.mu.Unlock()
return nil
}
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(name string) error {
+func (w *fen) Remove(name string) error {
if w.isClosed() {
return nil
}
if !w.port.PathIsWatched(name) {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
+ time.Now().Format("15:04:05.000000000"), name)
+ }
// The user has expressed an intent. Immediately remove this name from
// whichever watch list it might be in. If it's not in there the delete
@@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error {
}
// readEvents contains the main loop that runs in a goroutine watching for events.
-func (w *Watcher) readEvents() {
+func (w *fen) readEvents() {
// If this function returns, the watcher has been closed and we can close
// these channels
defer func() {
@@ -382,17 +224,19 @@ func (w *Watcher) readEvents() {
continue
}
+ if debug {
+ internal.Debug(pevent.Path, pevent.Events)
+ }
+
err = w.handleEvent(&pevent)
- if err != nil {
- if !w.sendError(err) {
- return
- }
+ if !w.sendError(err) {
+ return
}
}
}
}
-func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
+func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
files, err := os.ReadDir(path)
if err != nil {
return err
@@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha
// bitmap matches more than one event type (e.g. the file was both modified and
// had the attributes changed between when the association was created and the
// when event was returned)
-func (w *Watcher) handleEvent(event *unix.PortEvent) error {
+func (w *fen) handleEvent(event *unix.PortEvent) error {
var (
events = event.Events
path = event.Path
@@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
}
if events&unix.FILE_MODIFIED != 0 {
- if fmode.IsDir() {
- if watchedDir {
- if err := w.updateDirectory(path); err != nil {
- return err
- }
- } else {
- if !w.sendEvent(path, Write) {
- return nil
- }
+ if fmode.IsDir() && watchedDir {
+ if err := w.updateDirectory(path); err != nil {
+ return err
}
} else {
if !w.sendEvent(path, Write) {
@@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
return nil
}
-func (w *Watcher) updateDirectory(path string) error {
+func (w *fen) updateDirectory(path string) error {
// The directory was modified, so we must find unwatched entities and watch
// them. If something was removed from the directory, nothing will happen,
// as everything else should still be watched.
@@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error {
return err
}
err = w.associateFile(path, finfo, false)
- if err != nil {
- if !w.sendError(err) {
- return nil
- }
+ if !w.sendError(err) {
+ return nil
}
if !w.sendEvent(path, Create) {
return nil
@@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error {
return nil
}
-func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
+func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
if w.isClosed() {
return ErrClosed
}
@@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro
// cleared up that discrepancy. The most likely cause is that the event
// has fired but we haven't processed it yet.
err := w.port.DissociatePath(path)
- if err != nil && err != unix.ENOENT {
+ if err != nil && !errors.Is(err, unix.ENOENT) {
return err
}
}
- // FILE_NOFOLLOW means we watch symlinks themselves rather than their
- // targets.
- events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
- if follow {
- // We *DO* follow symlinks for explicitly watched entries.
- events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
+
+ var events int
+ if !follow {
+ // Watch symlinks themselves rather than their targets unless this entry
+ // is explicitly watched.
+ events |= unix.FILE_NOFOLLOW
+ }
+ if true { // TODO: implement withOps()
+ events |= unix.FILE_MODIFIED
}
- return w.port.AssociatePath(path, stat,
- events,
- stat.Mode())
+ if true {
+ events |= unix.FILE_ATTRIB
+ }
+ return w.port.AssociatePath(path, stat, events, stat.Mode())
}
-func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
+func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
if !w.port.PathIsWatched(path) {
return nil
}
return w.port.DissociatePath(path)
}
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string {
+func (w *fen) WatchList() []string {
if w.isClosed() {
return nil
}
@@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string {
return entries
}
+
+func (w *fen) xSupports(op Op) bool {
+ if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
+ op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
index 921c1c1e4012..36c311694cd5 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
@@ -1,8 +1,4 @@
//go:build linux && !appengine
-// +build linux,!appengine
-
-// Note: the documentation on the Watcher type and methods is generated from
-// mkdoc.zsh
package fsnotify
@@ -10,127 +6,20 @@ import (
"errors"
"fmt"
"io"
+ "io/fs"
"os"
"path/filepath"
"strings"
"sync"
+ "time"
"unsafe"
+ "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\path\to\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all times, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
+type inotify struct {
Events chan Event
-
- // Errors sends any errors.
- //
- // ErrEventOverflow is used to indicate there are too many events:
- //
- // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
Errors chan error
// Store fd here as os.File.Read() will no longer return on close after
@@ -139,8 +28,26 @@ type Watcher struct {
inotifyFile *os.File
watches *watches
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
- closeMu sync.Mutex
+ doneMu sync.Mutex
doneResp chan struct{} // Channel to respond to Close
+
+ // Store rename cookies in an array, with the index wrapping to 0. Almost
+ // all of the time what we get is a MOVED_FROM to set the cookie and the
+ // next event inotify sends will be MOVED_TO to read it. However, this is
+ // not guaranteed – as described in inotify(7) – and we may get other events
+ // between the two MOVED_* events (including other MOVED_* ones).
+ //
+ // A second issue is that moving a file outside the watched directory will
+ // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
+ // read and delete it. So just storing it in a map would slowly leak memory.
+ //
+ // Doing it like this gives us a simple fast LRU-cache that won't allocate.
+ // Ten items should be more than enough for our purpose, and a loop over
+ // such a short array is faster than a map access anyway (not that it hugely
+ // matters since we're talking about hundreds of ns at the most, but still).
+ cookies [10]koekje
+ cookieIndex uint8
+ cookiesMu sync.Mutex
}
type (
@@ -150,9 +57,14 @@ type (
path map[string]uint32 // pathname → wd
}
watch struct {
- wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
- flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
- path string // Watch path.
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+ path string // Watch path.
+ recurse bool // Recursion with ./...?
+ }
+ koekje struct {
+ cookie uint32
+ path string
}
)
@@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) {
func (w *watches) remove(wd uint32) {
w.mu.Lock()
defer w.mu.Unlock()
- delete(w.path, w.wd[wd].path)
+ watch := w.wd[wd] // Could have had Remove() called. See #616.
+ if watch == nil {
+ return
+ }
+ delete(w.path, watch.path)
delete(w.wd, wd)
}
-func (w *watches) removePath(path string) (uint32, bool) {
+func (w *watches) removePath(path string) ([]uint32, error) {
w.mu.Lock()
defer w.mu.Unlock()
+ path, recurse := recursivePath(path)
wd, ok := w.path[path]
if !ok {
- return 0, false
+ return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
+ }
+
+ watch := w.wd[wd]
+ if recurse && !watch.recurse {
+ return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
}
delete(w.path, path)
delete(w.wd, wd)
+ if !watch.recurse {
+ return []uint32{wd}, nil
+ }
- return wd, true
+ wds := make([]uint32, 0, 8)
+ wds = append(wds, wd)
+ for p, rwd := range w.path {
+ if filepath.HasPrefix(p, path) {
+ delete(w.path, p)
+ delete(w.wd, rwd)
+ wds = append(wds, rwd)
+ }
+ }
+ return wds, nil
}
func (w *watches) byPath(path string) *watch {
@@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error
return nil
}
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- return NewBufferedWatcher(0)
+func newBackend(ev chan Event, errs chan error) (backend, error) {
+ return newBufferedBackend(0, ev, errs)
}
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) {
+func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
// I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
@@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
return nil, errno
}
- w := &Watcher{
+ w := &inotify{
+ Events: ev,
+ Errors: errs,
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
watches: newWatches(),
- Events: make(chan Event, sz),
- Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
@@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
}
// Returns true if the event was sent, or false if watcher is closed.
-func (w *Watcher) sendEvent(e Event) bool {
+func (w *inotify) sendEvent(e Event) bool {
select {
- case w.Events <- e:
- return true
case <-w.done:
return false
+ case w.Events <- e:
+ return true
}
}
// Returns true if the error was sent, or false if watcher is closed.
-func (w *Watcher) sendError(err error) bool {
- select {
- case w.Errors <- err:
+func (w *inotify) sendError(err error) bool {
+ if err == nil {
return true
+ }
+ select {
case <-w.done:
return false
+ case w.Errors <- err:
+ return true
}
}
-func (w *Watcher) isClosed() bool {
+func (w *inotify) isClosed() bool {
select {
case <-w.done:
return true
@@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool {
}
}
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error {
- w.closeMu.Lock()
+func (w *inotify) Close() error {
+ w.doneMu.Lock()
if w.isClosed() {
- w.closeMu.Unlock()
+ w.doneMu.Unlock()
return nil
}
close(w.done)
- w.closeMu.Unlock()
+ w.doneMu.Unlock()
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
@@ -323,78 +250,104 @@ func (w *Watcher) Close() error {
return nil
}
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(name string) error { return w.AddWith(name) }
-
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+func (w *inotify) Add(name string) error { return w.AddWith(name) }
+
+func (w *inotify) AddWith(path string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
+ time.Now().Format("15:04:05.000000000"), path)
+ }
+
+ with := getOptions(opts...)
+ if !w.xSupports(with.op) {
+ return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
+ }
- name = filepath.Clean(name)
- _ = getOptions(opts...)
+ path, recurse := recursivePath(path)
+ if recurse {
+ return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if !d.IsDir() {
+ if root == path {
+ return fmt.Errorf("fsnotify: not a directory: %q", path)
+ }
+ return nil
+ }
- var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
- unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
- unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+ // Send a Create event when adding new directory from a recursive
+ // watch; this is for "mkdir -p one/two/three". Usually all those
+ // directories will be created before we can set up watchers on the
+ // subdirectories, so only "one" would be sent as a Create event and
+ // not "one/two" and "one/two/three" (inotifywait -r has the same
+ // problem).
+ if with.sendCreate && root != path {
+ w.sendEvent(Event{Name: root, Op: Create})
+ }
+
+ return w.add(root, with, true)
+ })
+ }
- return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
+ return w.add(path, with, false)
+}
+
+func (w *inotify) add(path string, with withOpts, recurse bool) error {
+ var flags uint32
+ if with.noFollow {
+ flags |= unix.IN_DONT_FOLLOW
+ }
+ if with.op.Has(Create) {
+ flags |= unix.IN_CREATE
+ }
+ if with.op.Has(Write) {
+ flags |= unix.IN_MODIFY
+ }
+ if with.op.Has(Remove) {
+ flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
+ }
+ if with.op.Has(Rename) {
+ flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
+ }
+ if with.op.Has(Chmod) {
+ flags |= unix.IN_ATTRIB
+ }
+ if with.op.Has(xUnportableOpen) {
+ flags |= unix.IN_OPEN
+ }
+ if with.op.Has(xUnportableRead) {
+ flags |= unix.IN_ACCESS
+ }
+ if with.op.Has(xUnportableCloseWrite) {
+ flags |= unix.IN_CLOSE_WRITE
+ }
+ if with.op.Has(xUnportableCloseRead) {
+ flags |= unix.IN_CLOSE_NOWRITE
+ }
+ return w.register(path, flags, recurse)
+}
+
+func (w *inotify) register(path string, flags uint32, recurse bool) error {
+ return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
if existing != nil {
flags |= existing.flags | unix.IN_MASK_ADD
}
- wd, err := unix.InotifyAddWatch(w.fd, name, flags)
+ wd, err := unix.InotifyAddWatch(w.fd, path, flags)
if wd == -1 {
return nil, err
}
if existing == nil {
return &watch{
- wd: uint32(wd),
- path: name,
- flags: flags,
+ wd: uint32(wd),
+ path: path,
+ flags: flags,
+ recurse: recurse,
}, nil
}
@@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
})
}
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(name string) error {
+func (w *inotify) Remove(name string) error {
if w.isClosed() {
return nil
}
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
+ time.Now().Format("15:04:05.000000000"), name)
+ }
return w.remove(filepath.Clean(name))
}
-func (w *Watcher) remove(name string) error {
- wd, ok := w.watches.removePath(name)
- if !ok {
- return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
- }
-
- success, errno := unix.InotifyRmWatch(w.fd, wd)
- if success == -1 {
- // TODO: Perhaps it's not helpful to return an error here in every case;
- // The only two possible errors are:
- //
- // - EBADF, which happens when w.fd is not a valid file descriptor
- // of any kind.
- // - EINVAL, which is when fd is not an inotify descriptor or wd
- // is not a valid watch descriptor. Watch descriptors are
- // invalidated when they are removed explicitly or implicitly;
- // explicitly by inotify_rm_watch, implicitly when the file they
- // are watching is deleted.
- return errno
+func (w *inotify) remove(name string) error {
+ wds, err := w.watches.removePath(name)
+ if err != nil {
+ return err
+ }
+
+ for _, wd := range wds {
+ _, err := unix.InotifyRmWatch(w.fd, wd)
+ if err != nil {
+ // TODO: Perhaps it's not helpful to return an error here in every
+ // case; the only two possible errors are:
+ //
+ // EBADF, which happens when w.fd is not a valid file descriptor of
+ // any kind.
+ //
+ // EINVAL, which is when fd is not an inotify descriptor or wd is
+ // not a valid watch descriptor. Watch descriptors are invalidated
+ // when they are removed explicitly or implicitly; explicitly by
+ // inotify_rm_watch, implicitly when the file they are watching is
+ // deleted.
+ return err
+ }
}
return nil
}
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string {
+func (w *inotify) WatchList() []string {
if w.isClosed() {
return nil
}
@@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string {
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
-func (w *Watcher) readEvents() {
+func (w *inotify) readEvents() {
defer func() {
close(w.doneResp)
close(w.Errors)
@@ -506,15 +454,17 @@ func (w *Watcher) readEvents() {
continue
}
- var offset uint32
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
+ var offset uint32
for offset <= uint32(n-unix.SizeofInotifyEvent) {
var (
// Point "raw" to the event in the buffer
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask = uint32(raw.Mask)
nameLen = uint32(raw.Len)
+ // Move to the next event in the buffer
+ next = func() { offset += unix.SizeofInotifyEvent + nameLen }
)
if mask&unix.IN_Q_OVERFLOW != 0 {
@@ -523,21 +473,53 @@ func (w *Watcher) readEvents() {
}
}
- // If the event happened to the watched directory or the watched file, the kernel
- // doesn't append the filename to the event, but we would like to always fill the
- // the "Name" field with a valid filename. We retrieve the path of the watch from
- // the "paths" map.
+ /// If the event happened to the watched directory or the watched
+ /// file, the kernel doesn't append the filename to the event, but
+ /// we would like to always fill the the "Name" field with a valid
+ /// filename. We retrieve the path of the watch from the "paths"
+ /// map.
watch := w.watches.byWd(uint32(raw.Wd))
+ /// Can be nil if Remove() was called in another goroutine for this
+ /// path inbetween reading the events from the kernel and reading
+ /// the internal state. Not much we can do about it, so just skip.
+ /// See #616.
+ if watch == nil {
+ next()
+ continue
+ }
+
+ name := watch.path
+ if nameLen > 0 {
+ /// Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
+ /// The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ if debug {
+ internal.Debug(name, raw.Mask, raw.Cookie)
+ }
+
+ if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0
+ next()
+ continue
+ }
// inotify will automatically remove the watch on deletes; just need
// to clean our state here.
- if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
w.watches.remove(watch.wd)
}
+
// We can't really update the state when a watched path is moved;
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
// the watch.
- if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
+ if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
+ if watch.recurse {
+ next() // Do nothing
+ continue
+ }
+
err := w.remove(watch.path)
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
if !w.sendError(err) {
@@ -546,34 +528,69 @@ func (w *Watcher) readEvents() {
}
}
- var name string
- if watch != nil {
- name = watch.path
- }
- if nameLen > 0 {
- // Point "bytes" at the first byte of the filename
- bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
- // The filename is padded with NULL bytes. TrimRight() gets rid of those.
- name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ /// Skip if we're watching both this path and the parent; the parent
+ /// will already send a delete so no need to do it twice.
+ if mask&unix.IN_DELETE_SELF != 0 {
+ if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok {
+ next()
+ continue
+ }
}
- event := w.newEvent(name, mask)
+ ev := w.newEvent(name, mask, raw.Cookie)
+ // Need to update watch path for recurse.
+ if watch.recurse {
+ isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR
+ /// New directory created: set up watch on it.
+ if isDir && ev.Has(Create) {
+ err := w.register(ev.Name, watch.flags, true)
+ if !w.sendError(err) {
+ return
+ }
- // Send the events that are not ignored on the events channel
- if mask&unix.IN_IGNORED == 0 {
- if !w.sendEvent(event) {
- return
+ // This was a directory rename, so we need to update all
+ // the children.
+ //
+ // TODO: this is of course pretty slow; we should use a
+ // better data structure for storing all of this, e.g. store
+ // children in the watch. I have some code for this in my
+ // kqueue refactor we can use in the future. For now I'm
+ // okay with this as it's not publicly available.
+ // Correctness first, performance second.
+ if ev.renamedFrom != "" {
+ w.watches.mu.Lock()
+ for k, ww := range w.watches.wd {
+ if k == watch.wd || ww.path == ev.Name {
+ continue
+ }
+ if strings.HasPrefix(ww.path, ev.renamedFrom) {
+ ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
+ w.watches.wd[k] = ww
+ }
+ }
+ w.watches.mu.Unlock()
+ }
}
}
- // Move to the next event in the buffer
- offset += unix.SizeofInotifyEvent + nameLen
+ /// Send the events that are not ignored on the events channel
+ if !w.sendEvent(ev) {
+ return
+ }
+ next()
}
}
}
-// newEvent returns an platform-independent Event based on an inotify mask.
-func (w *Watcher) newEvent(name string, mask uint32) Event {
+func (w *inotify) isRecursive(path string) bool {
+ ww := w.watches.byPath(path)
+ if ww == nil { // path could be a file, so also check the Dir.
+ ww = w.watches.byPath(filepath.Dir(path))
+ }
+ return ww != nil && ww.recurse
+}
+
+func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
@@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
+ if mask&unix.IN_OPEN == unix.IN_OPEN {
+ e.Op |= xUnportableOpen
+ }
+ if mask&unix.IN_ACCESS == unix.IN_ACCESS {
+ e.Op |= xUnportableRead
+ }
+ if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
+ e.Op |= xUnportableCloseWrite
+ }
+ if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
+ e.Op |= xUnportableCloseRead
+ }
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
+
+ if cookie != 0 {
+ if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ w.cookiesMu.Lock()
+ w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
+ w.cookieIndex++
+ if w.cookieIndex > 9 {
+ w.cookieIndex = 0
+ }
+ w.cookiesMu.Unlock()
+ } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ w.cookiesMu.Lock()
+ var prev string
+ for _, c := range w.cookies {
+ if c.cookie == cookie {
+ prev = c.path
+ break
+ }
+ }
+ w.cookiesMu.Unlock()
+ e.renamedFrom = prev
+ }
+ }
return e
}
+
+func (w *inotify) xSupports(op Op) bool {
+ return true // Supports everything.
+}
+
+func (w *inotify) state() {
+ w.watches.mu.Lock()
+ defer w.watches.mu.Unlock()
+ for wd, ww := range w.watches.wd {
+ fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
index 063a0915a07a..d8de5ab76fdd 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
@@ -1,8 +1,4 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
-// +build freebsd openbsd netbsd dragonfly darwin
-
-// Note: the documentation on the Watcher type and methods is generated from
-// mkdoc.zsh
package fsnotify
@@ -11,174 +7,195 @@ import (
"fmt"
"os"
"path/filepath"
+ "runtime"
"sync"
+ "time"
+ "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\path\to\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all times, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
+type kqueue struct {
Events chan Event
-
- // Errors sends any errors.
- //
- // ErrEventOverflow is used to indicate there are too many events:
- //
- // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
Errors chan error
- done chan struct{}
- kq int // File descriptor (as returned by the kqueue() syscall).
- closepipe [2]int // Pipe used for closing.
- mu sync.Mutex // Protects access to watcher data
- watches map[string]int // Watched file descriptors (key: path).
- watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)).
- userWatches map[string]struct{} // Watches added with Watcher.Add()
- dirFlags map[string]uint32 // Watched directories to fflags used in kqueue.
- paths map[int]pathInfo // File descriptors to path names for processing kqueue events.
- fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events).
- isClosed bool // Set to true when Close() is first called
+ kq int // File descriptor (as returned by the kqueue() syscall).
+ closepipe [2]int // Pipe used for closing kq.
+ watches *watches
+ done chan struct{}
+ doneMu sync.Mutex
}
-type pathInfo struct {
- name string
- isDir bool
+type (
+ watches struct {
+ mu sync.RWMutex
+ wd map[int]watch // wd → watch
+ path map[string]int // pathname → wd
+ byDir map[string]map[int]struct{} // dirname(path) → wd
+ seen map[string]struct{} // Keep track of if we know this file exists.
+ byUser map[string]struct{} // Watches added with Watcher.Add()
+ }
+ watch struct {
+ wd int
+ name string
+ linkName string // In case of links; name is the target, and this is the link.
+ isDir bool
+ dirFlags uint32
+ }
+)
+
+func newWatches() *watches {
+ return &watches{
+ wd: make(map[int]watch),
+ path: make(map[string]int),
+ byDir: make(map[string]map[int]struct{}),
+ seen: make(map[string]struct{}),
+ byUser: make(map[string]struct{}),
+ }
}
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- return NewBufferedWatcher(0)
+func (w *watches) listPaths(userOnly bool) []string {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ if userOnly {
+ l := make([]string, 0, len(w.byUser))
+ for p := range w.byUser {
+ l = append(l, p)
+ }
+ return l
+ }
+
+ l := make([]string, 0, len(w.path))
+ for p := range w.path {
+ l = append(l, p)
+ }
+ return l
}
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) {
+func (w *watches) watchesInDir(path string) []string {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ l := make([]string, 0, 4)
+ for fd := range w.byDir[path] {
+ info := w.wd[fd]
+ if _, ok := w.byUser[info.name]; !ok {
+ l = append(l, info.name)
+ }
+ }
+ return l
+}
+
+// Mark path as added by the user.
+func (w *watches) addUserWatch(path string) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.byUser[path] = struct{}{}
+}
+
+func (w *watches) addLink(path string, fd int) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.path[path] = fd
+ w.seen[path] = struct{}{}
+}
+
+func (w *watches) add(path, linkPath string, fd int, isDir bool) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.path[path] = fd
+ w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir}
+
+ parent := filepath.Dir(path)
+ byDir, ok := w.byDir[parent]
+ if !ok {
+ byDir = make(map[int]struct{}, 1)
+ w.byDir[parent] = byDir
+ }
+ byDir[fd] = struct{}{}
+}
+
+func (w *watches) byWd(fd int) (watch, bool) {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ info, ok := w.wd[fd]
+ return info, ok
+}
+
+func (w *watches) byPath(path string) (watch, bool) {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ info, ok := w.wd[w.path[path]]
+ return info, ok
+}
+
+func (w *watches) updateDirFlags(path string, flags uint32) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ fd := w.path[path]
+ info := w.wd[fd]
+ info.dirFlags = flags
+ w.wd[fd] = info
+}
+
+func (w *watches) remove(fd int, path string) bool {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ isDir := w.wd[fd].isDir
+ delete(w.path, path)
+ delete(w.byUser, path)
+
+ parent := filepath.Dir(path)
+ delete(w.byDir[parent], fd)
+
+ if len(w.byDir[parent]) == 0 {
+ delete(w.byDir, parent)
+ }
+
+ delete(w.wd, fd)
+ delete(w.seen, path)
+ return isDir
+}
+
+func (w *watches) markSeen(path string, exists bool) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if exists {
+ w.seen[path] = struct{}{}
+ } else {
+ delete(w.seen, path)
+ }
+}
+
+func (w *watches) seenBefore(path string) bool {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ _, ok := w.seen[path]
+ return ok
+}
+
+func newBackend(ev chan Event, errs chan error) (backend, error) {
+ return newBufferedBackend(0, ev, errs)
+}
+
+func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
}
- w := &Watcher{
- kq: kq,
- closepipe: closepipe,
- watches: make(map[string]int),
- watchesByDir: make(map[string]map[int]struct{}),
- dirFlags: make(map[string]uint32),
- paths: make(map[int]pathInfo),
- fileExists: make(map[string]struct{}),
- userWatches: make(map[string]struct{}),
- Events: make(chan Event, sz),
- Errors: make(chan error),
- done: make(chan struct{}),
+ w := &kqueue{
+ Events: ev,
+ Errors: errs,
+ kq: kq,
+ closepipe: closepipe,
+ done: make(chan struct{}),
+ watches: newWatches(),
}
go w.readEvents()
@@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) {
unix.Close(kq)
return kq, closepipe, err
}
+ unix.CloseOnExec(closepipe[0])
+ unix.CloseOnExec(closepipe[1])
// Register changes to listen on the closepipe.
changes := make([]unix.Kevent_t, 1)
@@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) {
}
// Returns true if the event was sent, or false if watcher is closed.
-func (w *Watcher) sendEvent(e Event) bool {
+func (w *kqueue) sendEvent(e Event) bool {
select {
- case w.Events <- e:
- return true
case <-w.done:
return false
+ case w.Events <- e:
+ return true
}
}
// Returns true if the error was sent, or false if watcher is closed.
-func (w *Watcher) sendError(err error) bool {
+func (w *kqueue) sendError(err error) bool {
+ if err == nil {
+ return true
+ }
select {
+ case <-w.done:
+ return false
case w.Errors <- err:
return true
+ }
+}
+
+func (w *kqueue) isClosed() bool {
+ select {
case <-w.done:
+ return true
+ default:
return false
}
}
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
+func (w *kqueue) Close() error {
+ w.doneMu.Lock()
+ if w.isClosed() {
+ w.doneMu.Unlock()
return nil
}
- w.isClosed = true
+ close(w.done)
+ w.doneMu.Unlock()
- // copy paths to remove while locked
- pathsToRemove := make([]string, 0, len(w.watches))
- for name := range w.watches {
- pathsToRemove = append(pathsToRemove, name)
- }
- w.mu.Unlock() // Unlock before calling Remove, which also locks
+ pathsToRemove := w.watches.listPaths(false)
for _, name := range pathsToRemove {
w.Remove(name)
}
// Send "quit" message to the reader goroutine.
unix.Close(w.closepipe[1])
- close(w.done)
-
return nil
}
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+func (w *kqueue) Add(name string) error { return w.AddWith(name) }
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(name string, opts ...addOpt) error {
- _ = getOptions(opts...)
+func (w *kqueue) AddWith(name string, opts ...addOpt) error {
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
+ time.Now().Format("15:04:05.000000000"), name)
+ }
+
+ with := getOptions(opts...)
+ if !w.xSupports(with.op) {
+ return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
+ }
- w.mu.Lock()
- w.userWatches[name] = struct{}{}
- w.mu.Unlock()
_, err := w.addWatch(name, noteAllEvents)
- return err
+ if err != nil {
+ return err
+ }
+ w.watches.addUserWatch(name)
+ return nil
}
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(name string) error {
+func (w *kqueue) Remove(name string) error {
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
+ time.Now().Format("15:04:05.000000000"), name)
+ }
return w.remove(name, true)
}
-func (w *Watcher) remove(name string, unwatchFiles bool) error {
- name = filepath.Clean(name)
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
+func (w *kqueue) remove(name string, unwatchFiles bool) error {
+ if w.isClosed() {
return nil
}
- watchfd, ok := w.watches[name]
- w.mu.Unlock()
+
+ name = filepath.Clean(name)
+ info, ok := w.watches.byPath(name)
if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
- err := w.register([]int{watchfd}, unix.EV_DELETE, 0)
+ err := w.register([]int{info.wd}, unix.EV_DELETE, 0)
if err != nil {
return err
}
- unix.Close(watchfd)
-
- w.mu.Lock()
- isDir := w.paths[watchfd].isDir
- delete(w.watches, name)
- delete(w.userWatches, name)
-
- parentName := filepath.Dir(name)
- delete(w.watchesByDir[parentName], watchfd)
-
- if len(w.watchesByDir[parentName]) == 0 {
- delete(w.watchesByDir, parentName)
- }
+ unix.Close(info.wd)
- delete(w.paths, watchfd)
- delete(w.dirFlags, name)
- delete(w.fileExists, name)
- w.mu.Unlock()
+ isDir := w.watches.remove(info.wd, name)
// Find all watched paths that are in this directory that are not external.
if unwatchFiles && isDir {
- var pathsToRemove []string
- w.mu.Lock()
- for fd := range w.watchesByDir[name] {
- path := w.paths[fd]
- if _, ok := w.userWatches[path.name]; !ok {
- pathsToRemove = append(pathsToRemove, path.name)
- }
- }
- w.mu.Unlock()
+ pathsToRemove := w.watches.watchesInDir(name)
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error to
// the user, as that will just confuse them with an error about a
@@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error {
return nil
}
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string {
- w.mu.Lock()
- defer w.mu.Unlock()
- if w.isClosed {
+func (w *kqueue) WatchList() []string {
+ if w.isClosed() {
return nil
}
-
- entries := make([]string, 0, len(w.userWatches))
- for pathname := range w.userWatches {
- entries = append(entries, pathname)
- }
-
- return entries
+ return w.watches.listPaths(true)
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
@@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un
// described in kevent(2).
//
// Returns the real path to the file which was added, with symlinks resolved.
-func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
- var isDir bool
- name = filepath.Clean(name)
-
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
+func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
+ if w.isClosed() {
return "", ErrClosed
}
- watchfd, alreadyWatching := w.watches[name]
- // We already have a watch, but we can still override flags.
- if alreadyWatching {
- isDir = w.paths[watchfd].isDir
- }
- w.mu.Unlock()
+ name = filepath.Clean(name)
+
+ info, alreadyWatching := w.watches.byPath(name)
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
- // Don't watch sockets or named pipes
+ // Don't watch sockets or named pipes.
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
return "", nil
}
- // Follow Symlinks.
+ // Follow symlinks.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
link, err := os.Readlink(name)
if err != nil {
@@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
return "", nil
}
- w.mu.Lock()
- _, alreadyWatching = w.watches[link]
- w.mu.Unlock()
-
+ _, alreadyWatching = w.watches.byPath(link)
if alreadyWatching {
// Add to watches so we don't get spurious Create events later
// on when we diff the directories.
- w.watches[name] = 0
- w.fileExists[name] = struct{}{}
+ w.watches.addLink(name, 0)
return link, nil
}
+ info.linkName = name
name = link
fi, err = os.Lstat(name)
if err != nil {
@@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
// Retry on EINTR; open() can return EINTR in practice on macOS.
// See #354, and Go issues 11180 and 39237.
for {
- watchfd, err = unix.Open(name, openMode, 0)
+ info.wd, err = unix.Open(name, openMode, 0)
if err == nil {
break
}
@@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
return "", err
}
- isDir = fi.IsDir()
+ info.isDir = fi.IsDir()
}
- err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
+ err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
if err != nil {
- unix.Close(watchfd)
+ unix.Close(info.wd)
return "", err
}
if !alreadyWatching {
- w.mu.Lock()
- parentName := filepath.Dir(name)
- w.watches[name] = watchfd
-
- watchesByDir, ok := w.watchesByDir[parentName]
- if !ok {
- watchesByDir = make(map[int]struct{}, 1)
- w.watchesByDir[parentName] = watchesByDir
- }
- watchesByDir[watchfd] = struct{}{}
- w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
- w.mu.Unlock()
+ w.watches.add(name, info.linkName, info.wd, info.isDir)
}
- if isDir {
- // Watch the directory if it has not been watched before, or if it was
- // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
- w.mu.Lock()
-
+ // Watch the directory if it has not been watched before, or if it was
+ // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ if info.isDir {
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
- (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
- // Store flags so this watch can be updated later
- w.dirFlags[name] = flags
- w.mu.Unlock()
+ (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ w.watches.updateDirFlags(name, flags)
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
@@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
-func (w *Watcher) readEvents() {
+func (w *kqueue) readEvents() {
defer func() {
close(w.Events)
close(w.Errors)
@@ -543,50 +466,65 @@ func (w *Watcher) readEvents() {
}()
eventBuffer := make([]unix.Kevent_t, 10)
- for closed := false; !closed; {
+ for {
kevents, err := w.read(eventBuffer)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
- closed = true
+ return
}
- continue
}
- // Flush the events we received to the Events channel
for _, kevent := range kevents {
var (
- watchfd = int(kevent.Ident)
- mask = uint32(kevent.Fflags)
+ wd = int(kevent.Ident)
+ mask = uint32(kevent.Fflags)
)
// Shut down the loop when the pipe is closed, but only after all
// other events have been processed.
- if watchfd == w.closepipe[0] {
- closed = true
- continue
+ if wd == w.closepipe[0] {
+ return
}
- w.mu.Lock()
- path := w.paths[watchfd]
- w.mu.Unlock()
+ path, ok := w.watches.byWd(wd)
+ if debug {
+ internal.Debug(path.name, &kevent)
+ }
- event := w.newEvent(path.name, mask)
+ // On macOS it seems that sometimes an event with Ident=0 is
+ // delivered, and no other flags/information beyond that, even
+ // though we never saw such a file descriptor. For example in
+ // TestWatchSymlink/277 (usually at the end, but sometimes sooner):
+ //
+ // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent)
+ // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
+ // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
+ //
+ // The first is a normal event, the second with Ident 0. No error
+ // flag, no data, no ... nothing.
+ //
+ // I read a bit through bsd/kern_event.c from the xnu source, but I
+ // don't really see an obvious location where this is triggered –
+ // this doesn't seem intentional, but idk...
+ //
+ // Technically fd 0 is a valid descriptor, so only skip it if
+ // there's no path, and if we're on macOS.
+ if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" {
+ continue
+ }
+
+ event := w.newEvent(path.name, path.linkName, mask)
if event.Has(Rename) || event.Has(Remove) {
w.remove(event.Name, false)
- w.mu.Lock()
- delete(w.fileExists, event.Name)
- w.mu.Unlock()
+ w.watches.markSeen(event.Name, false)
}
if path.isDir && event.Has(Write) && !event.Has(Remove) {
- w.sendDirectoryChangeEvents(event.Name)
- } else {
- if !w.sendEvent(event) {
- closed = true
- continue
- }
+ w.dirChange(event.Name)
+ } else if !w.sendEvent(event) {
+ return
}
if event.Has(Remove) {
@@ -594,25 +532,34 @@ func (w *Watcher) readEvents() {
// mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
- w.mu.Lock()
- _, found := w.watches[fileDir]
- w.mu.Unlock()
+ _, found := w.watches.byPath(fileDir)
if found {
- err := w.sendDirectoryChangeEvents(fileDir)
- if err != nil {
- if !w.sendError(err) {
- closed = true
- }
+ // TODO: this branch is never triggered in any test.
+ // Added in d6220df (2012).
+ // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111
+ //
+ // I don't really get how this can be triggered either.
+ // And it wasn't triggered in the patch that added it,
+ // either.
+ //
+ // Original also had a comment:
+ // make sure the directory exists before we watch for
+ // changes. When we do a recursive watch and perform
+ // rm -rf, the parent directory might have gone
+ // missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the
+ // parent directory.
+ err := w.dirChange(fileDir)
+ if !w.sendError(err) {
+ return
}
}
} else {
- filePath := filepath.Clean(event.Name)
- if fi, err := os.Lstat(filePath); err == nil {
- err := w.sendFileCreatedEventIfNew(filePath, fi)
- if err != nil {
- if !w.sendError(err) {
- closed = true
- }
+ path := filepath.Clean(event.Name)
+ if fi, err := os.Lstat(path); err == nil {
+ err := w.sendCreateIfNew(path, fi)
+ if !w.sendError(err) {
+ return
}
}
}
@@ -622,8 +569,14 @@ func (w *Watcher) readEvents() {
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
-func (w *Watcher) newEvent(name string, mask uint32) Event {
+func (w *kqueue) newEvent(name, linkName string, mask uint32) Event {
e := Event{Name: name}
+ if linkName != "" {
+ // If the user watched "/path/link" then emit events as "/path/link"
+ // rather than "/path/target".
+ e.Name = linkName
+ }
+
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
@@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
-func (w *Watcher) watchDirectoryFiles(dirPath string) error {
- // Get all files
+func (w *kqueue) watchDirectoryFiles(dirPath string) error {
files, err := os.ReadDir(dirPath)
if err != nil {
return err
@@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
}
}
- w.mu.Lock()
- w.fileExists[cleanPath] = struct{}{}
- w.mu.Unlock()
+ w.watches.markSeen(cleanPath, true)
}
return nil
@@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
//
// This functionality is to have the BSD watcher match the inotify, which sends
// a create event for files created in a watched directory.
-func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
+func (w *kqueue) dirChange(dir string) error {
files, err := os.ReadDir(dir)
if err != nil {
// Directory no longer exists: we can ignore this safely. kqueue will
@@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
if errors.Is(err, os.ErrNotExist) {
return nil
}
- return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
+ return fmt.Errorf("fsnotify.dirChange: %w", err)
}
for _, f := range files {
fi, err := f.Info()
if err != nil {
- return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
+ return fmt.Errorf("fsnotify.dirChange: %w", err)
}
- err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
+ err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
// Don't need to send an error if this file isn't readable.
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
return nil
}
- return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
+ return fmt.Errorf("fsnotify.dirChange: %w", err)
}
}
return nil
}
-// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
-func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) {
- w.mu.Lock()
- _, doesExist := w.fileExists[filePath]
- w.mu.Unlock()
- if !doesExist {
- if !w.sendEvent(Event{Name: filePath, Op: Create}) {
- return
+// Send a create event if the file isn't already being tracked, and start
+// watching this file.
+func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error {
+ if !w.watches.seenBefore(path) {
+ if !w.sendEvent(Event{Name: path, Op: Create}) {
+ return nil
}
}
- // like watchDirectoryFiles (but without doing another ReadDir)
- filePath, err = w.internalWatch(filePath, fi)
+ // Like watchDirectoryFiles, but without doing another ReadDir.
+ path, err := w.internalWatch(path, fi)
if err != nil {
return err
}
-
- w.mu.Lock()
- w.fileExists[filePath] = struct{}{}
- w.mu.Unlock()
-
+ w.watches.markSeen(path, true)
return nil
}
-func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) {
+func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
// mimic Linux providing delete events for subdirectories, but preserve
// the flags used if currently watching subdirectory
- w.mu.Lock()
- flags := w.dirFlags[name]
- w.mu.Unlock()
-
- flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
- return w.addWatch(name, flags)
+ info, _ := w.watches.byPath(name)
+ return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME)
}
// watch file to mimic Linux inotify
@@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) {
}
// Register events with the queue.
-func (w *Watcher) register(fds []int, flags int, fflags uint32) error {
+func (w *kqueue) register(fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types.
@@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error {
}
// read retrieves pending events, or waits until an event occurs.
-func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
+func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(w.kq, nil, events, nil)
if err != nil {
return nil, err
}
return events[0:n], nil
}
+
+func (w *kqueue) xSupports(op Op) bool {
+ if runtime.GOOS == "freebsd" {
+ //return true // Supports everything.
+ }
+ if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
+ op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go
index d34a23c015f8..5eb5dbc66f26 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_other.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go
@@ -1,205 +1,23 @@
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
-// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
-
-// Note: the documentation on the Watcher type and methods is generated from
-// mkdoc.zsh
package fsnotify
import "errors"
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\path\to\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all times, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
+type other struct {
Events chan Event
-
- // Errors sends any errors.
- //
- // ErrEventOverflow is used to indicate there are too many events:
- //
- // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
Errors chan error
}
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
+func newBackend(ev chan Event, errs chan error) (backend, error) {
return nil, errors.New("fsnotify not supported on the current platform")
}
-
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
-
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error { return nil }
-
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string { return nil }
-
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(name string) error { return nil }
-
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
-
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(name string) error { return nil }
+func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
+ return newBackend(ev, errs)
+}
+func (w *other) Close() error { return nil }
+func (w *other) WatchList() []string { return nil }
+func (w *other) Add(name string) error { return nil }
+func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
+func (w *other) Remove(name string) error { return nil }
+func (w *other) xSupports(op Op) bool { return false }
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go
index 9bc91e5d613f..c54a63083835 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go
@@ -1,12 +1,8 @@
//go:build windows
-// +build windows
// Windows backend based on ReadDirectoryChangesW()
//
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
-//
-// Note: the documentation on the Watcher type and methods is generated from
-// mkdoc.zsh
package fsnotify
@@ -19,123 +15,15 @@ import (
"runtime"
"strings"
"sync"
+ "time"
"unsafe"
+ "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/windows"
)
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\path\to\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all times, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
+type readDirChangesW struct {
Events chan Event
-
- // Errors sends any errors.
- //
- // ErrEventOverflow is used to indicate there are too many events:
- //
- // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
Errors chan error
port windows.Handle // Handle to completion port
@@ -147,48 +35,40 @@ type Watcher struct {
closed bool // Set to true when Close() is first called
}
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- return NewBufferedWatcher(50)
+func newBackend(ev chan Event, errs chan error) (backend, error) {
+ return newBufferedBackend(50, ev, errs)
}
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) {
+func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
}
- w := &Watcher{
+ w := &readDirChangesW{
+ Events: ev,
+ Errors: errs,
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
- Events: make(chan Event, sz),
- Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
-func (w *Watcher) isClosed() bool {
+func (w *readDirChangesW) isClosed() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.closed
}
-func (w *Watcher) sendEvent(name string, mask uint64) bool {
+func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
if mask == 0 {
return false
}
event := w.newEvent(name, uint32(mask))
+ event.renamedFrom = renamedFrom
select {
case ch := <-w.quit:
w.quit <- ch
@@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool {
}
// Returns true if the error was sent, or false if watcher is closed.
-func (w *Watcher) sendError(err error) bool {
+func (w *readDirChangesW) sendError(err error) bool {
+ if err == nil {
+ return true
+ }
select {
case w.Errors <- err:
return true
case <-w.quit:
+ return false
}
- return false
}
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error {
+func (w *readDirChangesW) Close() error {
if w.isClosed() {
return nil
}
@@ -226,57 +108,21 @@ func (w *Watcher) Close() error {
return <-ch
}
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
+ time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
+ }
with := getOptions(opts...)
+ if !w.xSupports(with.op) {
+ return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
+ }
if with.bufsize < 4096 {
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
}
@@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
return <-in.reply
}
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(name string) error {
+func (w *readDirChangesW) Remove(name string) error {
if w.isClosed() {
return nil
}
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
+ time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
+ }
in := &input{
op: opRemoveWatch,
@@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error {
return <-in.reply
}
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string {
+func (w *readDirChangesW) WatchList() []string {
if w.isClosed() {
return nil
}
@@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string {
entries := make([]string, 0, len(w.watches))
for _, entry := range w.watches {
for _, watchEntry := range entry {
- entries = append(entries, watchEntry.path)
+ for name := range watchEntry.names {
+ entries = append(entries, filepath.Join(watchEntry.path, name))
+ }
+ // the directory itself is being watched
+ if watchEntry.mask != 0 {
+ entries = append(entries, watchEntry.path)
+ }
}
}
@@ -361,7 +205,7 @@ const (
sysFSIGNORED = 0x8000
)
-func (w *Watcher) newEvent(name string, mask uint32) Event {
+func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
@@ -417,7 +261,7 @@ type (
watchMap map[uint32]indexMap
)
-func (w *Watcher) wakeupReader() error {
+func (w *readDirChangesW) wakeupReader() error {
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if err != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", err)
@@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error {
return nil
}
-func (w *Watcher) getDir(pathname string) (dir string, err error) {
+func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
if err != nil {
return "", os.NewSyscallError("GetFileAttributes", err)
@@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) {
return
}
-func (w *Watcher) getIno(path string) (ino *inode, err error) {
+func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
windows.FILE_LIST_DIRECTORY,
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
@@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) {
}
// Must run within the I/O thread.
-func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
- //pathname, recurse := recursivePath(pathname)
- recurse := false
+func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
+ pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname)
if err != nil {
@@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
}
// Must run within the I/O thread.
-func (w *Watcher) remWatch(pathname string) error {
+func (w *readDirChangesW) remWatch(pathname string) error {
pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname)
@@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
}
if pathname == dir {
- w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
- w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
@@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error {
}
// Must run within the I/O thread.
-func (w *Watcher) deleteWatch(watch *watch) {
+func (w *readDirChangesW) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
- w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
- w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
-func (w *Watcher) startRead(watch *watch) error {
+func (w *readDirChangesW) startRead(watch *watch) error {
err := windows.CancelIo(watch.ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CancelIo", err))
@@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error {
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
- w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
err = nil
}
w.deleteWatch(watch)
@@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error {
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
-func (w *Watcher) readEvents() {
+func (w *readDirChangesW) readEvents() {
var (
n uint32
key uintptr
@@ -700,7 +543,7 @@ func (w *Watcher) readEvents() {
}
case windows.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
- w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
@@ -733,6 +576,10 @@ func (w *Watcher) readEvents() {
name := windows.UTF16ToString(buf)
fullname := filepath.Join(watch.path, name)
+ if debug {
+ internal.Debug(fullname, raw.Action)
+ }
+
var mask uint64
switch raw.Action {
case windows.FILE_ACTION_REMOVED:
@@ -761,21 +608,22 @@ func (w *Watcher) readEvents() {
}
}
- sendNameEvent := func() {
- w.sendEvent(fullname, watch.names[name]&mask)
- }
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
- sendNameEvent()
+ w.sendEvent(fullname, "", watch.names[name]&mask)
}
if raw.Action == windows.FILE_ACTION_REMOVED {
- w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
- w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
+ if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
+ w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
+ } else {
+ w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
+ }
+
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
- fullname = filepath.Join(watch.path, watch.rename)
- sendNameEvent()
+ w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
}
// Move to the next event in the buffer
@@ -787,8 +635,7 @@ func (w *Watcher) readEvents() {
// Error!
if offset >= n {
//lint:ignore ST1005 Windows should be capitalized
- w.sendError(errors.New(
- "Windows system assumed buffer larger than it is, events have likely been missed"))
+ w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
break
}
}
@@ -799,7 +646,7 @@ func (w *Watcher) readEvents() {
}
}
-func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
+func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSMODIFY != 0 {
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
@@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
return m
}
-func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
+func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
switch action {
case windows.FILE_ACTION_ADDED:
return sysFSCREATE
@@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
}
return 0
}
+
+func (w *readDirChangesW) xSupports(op Op) bool {
+ if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
+ op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
index 24c99cc4999e..0760efe91600 100644
--- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -3,19 +3,146 @@
//
// Currently supported systems:
//
-// Linux 2.6.32+ via inotify
-// BSD, macOS via kqueue
-// Windows via ReadDirectoryChangesW
-// illumos via FEN
+// - Linux via inotify
+// - BSD, macOS via kqueue
+// - Windows via ReadDirectoryChangesW
+// - illumos via FEN
+//
+// # FSNOTIFY_DEBUG
+//
+// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
+// stderr. This can be useful to track down some problems, especially in cases
+// where fsnotify is used as an indirect dependency.
+//
+// Every event will be printed as soon as there's something useful to print,
+// with as little processing from fsnotify.
+//
+// Example output:
+//
+// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
+// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
+// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
package fsnotify
import (
"errors"
"fmt"
+ "os"
"path/filepath"
"strings"
)
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # Windows notes
+//
+// Paths can be added as "C:\\path\\to\\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
+//
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all files, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
+type Watcher struct {
+ b backend
+
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // when a file is truncated. On Windows it's never
+ // sent.
+ Events chan Event
+
+ // Errors sends any errors.
+ Errors chan error
+}
+
// Event represents a file system notification.
type Event struct {
// Path to the file or directory.
@@ -30,6 +157,16 @@ type Event struct {
// This is a bitmask and some systems may send multiple operations at once.
// Use the Event.Has() method instead of comparing with ==.
Op Op
+
+ // Create events will have this set to the old path if it's a rename. This
+ // only works when both the source and destination are watched. It's not
+ // reliable when watching individual files, only directories.
+ //
+ // For example "mv /tmp/file /tmp/rename" will emit:
+ //
+ // Event{Op: Rename, Name: "/tmp/file"}
+ // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
+ renamedFrom string
}
// Op describes a set of file operations.
@@ -50,7 +187,7 @@ const (
// example "remove to trash" is often a rename).
Remove
- // The path was renamed to something else; any watched on it will be
+ // The path was renamed to something else; any watches on it will be
// removed.
Rename
@@ -60,15 +197,155 @@ const (
// get triggered very frequently by some software. For example, Spotlight
// indexing on macOS, anti-virus software, backup software, etc.
Chmod
+
+ // File descriptor was opened.
+ //
+ // Only works on Linux and FreeBSD.
+ xUnportableOpen
+
+ // File was read from.
+ //
+ // Only works on Linux and FreeBSD.
+ xUnportableRead
+
+ // File opened for writing was closed.
+ //
+ // Only works on Linux and FreeBSD.
+ //
+ // The advantage of using this over Write is that it's more reliable than
+ // waiting for Write events to stop. It's also faster (if you're not
+ // listening to Write events): copying a file of a few GB can easily
+ // generate tens of thousands of Write events in a short span of time.
+ xUnportableCloseWrite
+
+ // File opened for reading was closed.
+ //
+ // Only works on Linux and FreeBSD.
+ xUnportableCloseRead
)
-// Common errors that can be reported.
var (
+ // ErrNonExistentWatch is used when Remove() is called on a path that's not
+ // added.
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
- ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
- ErrClosed = errors.New("fsnotify: watcher already closed")
+
+ // ErrClosed is used when trying to operate on a closed Watcher.
+ ErrClosed = errors.New("fsnotify: watcher already closed")
+
+ // ErrEventOverflow is reported from the Errors channel when there are too
+ // many events:
+ //
+ // - inotify: inotify returns IN_Q_OVERFLOW – because there are too
+ // many queued events (the fs.inotify.max_queued_events
+ // sysctl can be used to increase this).
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
+ ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
+
+ // ErrUnsupported is returned by AddWith() when WithOps() specified an
+ // Unportable event that's not supported on this platform.
+ xErrUnsupported = errors.New("fsnotify: not supported with this backend")
)
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ ev, errs := make(chan Event), make(chan error)
+ b, err := newBackend(ev, errs)
+ if err != nil {
+ return nil, err
+ }
+ return &Watcher{b: b, Events: ev, Errors: errs}, nil
+}
+
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
+ ev, errs := make(chan Event), make(chan error)
+ b, err := newBufferedBackend(sz, ev, errs)
+ if err != nil {
+ return nil, err
+ }
+ return &Watcher{b: b, Events: ev, Errors: errs}, nil
+}
+
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
+//
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(path string) error { return w.b.Add(path) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
+
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
+
+// Close removes all watches and closes the Events channel.
+func (w *Watcher) Close() error { return w.b.Close() }
+
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string { return w.b.WatchList() }
+
+// Supports reports if all the listed operations are supported by this platform.
+//
+// Create, Write, Remove, Rename, and Chmod are always supported. It can only
+// return false for an Op starting with Unportable.
+func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
+
func (o Op) String() string {
var b strings.Builder
if o.Has(Create) {
@@ -80,6 +357,18 @@ func (o Op) String() string {
if o.Has(Write) {
b.WriteString("|WRITE")
}
+ if o.Has(xUnportableOpen) {
+ b.WriteString("|OPEN")
+ }
+ if o.Has(xUnportableRead) {
+ b.WriteString("|READ")
+ }
+ if o.Has(xUnportableCloseWrite) {
+ b.WriteString("|CLOSE_WRITE")
+ }
+ if o.Has(xUnportableCloseRead) {
+ b.WriteString("|CLOSE_READ")
+ }
if o.Has(Rename) {
b.WriteString("|RENAME")
}
@@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
// String returns a string representation of the event with their path.
func (e Event) String() string {
+ if e.renamedFrom != "" {
+ return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
+ }
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
}
type (
+ backend interface {
+ Add(string) error
+ AddWith(string, ...addOpt) error
+ Remove(string) error
+ WatchList() []string
+ Close() error
+ xSupports(Op) bool
+ }
addOpt func(opt *withOpts)
withOpts struct {
- bufsize int
+ bufsize int
+ op Op
+ noFollow bool
+ sendCreate bool
}
)
+var debug = func() bool {
+ // Check for exactly "1" (rather than mere existence) so we can add
+ // options/flags in the future. I don't know if we ever want that, but it's
+ // nice to leave the option open.
+ return os.Getenv("FSNOTIFY_DEBUG") == "1"
+}()
+
var defaultOpts = withOpts{
bufsize: 65536, // 64K
+ op: Create | Write | Remove | Rename | Chmod,
}
func getOptions(opts ...addOpt) withOpts {
with := defaultOpts
for _, o := range opts {
- o(&with)
+ if o != nil {
+ o(&with)
+ }
}
return with
}
@@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt {
return func(opt *withOpts) { opt.bufsize = bytes }
}
+// WithOps sets which operations to listen for. The default is [Create],
+// [Write], [Remove], [Rename], and [Chmod].
+//
+// Excluding operations you're not interested in can save quite a bit of CPU
+// time; in some use cases there may be hundreds of thousands of useless Write
+// or Chmod operations per second.
+//
+// This can also be used to add unportable operations not supported by all
+// platforms; unportable operations all start with "Unportable":
+// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
+// [UnportableCloseRead].
+//
+// AddWith returns an error when using an unportable operation that's not
+// supported. Use [Watcher.Support] to check for support.
+func withOps(op Op) addOpt {
+ return func(opt *withOpts) { opt.op = op }
+}
+
+// WithNoFollow disables following symlinks, so the symlinks themselves are
+// watched.
+func withNoFollow() addOpt {
+ return func(opt *withOpts) { opt.noFollow = true }
+}
+
+// "Internal" option for recursive watches on inotify.
+func withCreate() addOpt {
+ return func(opt *withOpts) { opt.sendCreate = true }
+}
+
+var enableRecurse = false
+
// Check if this path is recursive (ends with "/..." or "\..."), and return the
// path with the /... stripped.
func recursivePath(path string) (string, bool) {
+ path = filepath.Clean(path)
+ if !enableRecurse { // Only enabled in tests for now.
+ return path, false
+ }
if filepath.Base(path) == "..." {
return filepath.Dir(path), true
}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go
new file mode 100644
index 000000000000..b0eab10090d3
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go
@@ -0,0 +1,39 @@
+//go:build darwin
+
+package internal
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ SyscallEACCES = syscall.EACCES
+ UnixEACCES = unix.EACCES
+)
+
+var maxfiles uint64
+
+// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
+func SetRlimit() {
+ var l syscall.Rlimit
+ err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
+ if err == nil && l.Cur != l.Max {
+ l.Cur = l.Max
+ syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
+ }
+ maxfiles = l.Cur
+
+ if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
+ maxfiles = uint64(n)
+ }
+
+ if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
+ maxfiles = uint64(n)
+ }
+}
+
+func Maxfiles() uint64 { return maxfiles }
+func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
+func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
new file mode 100644
index 000000000000..928319fb09ab
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
@@ -0,0 +1,57 @@
+package internal
+
+import "golang.org/x/sys/unix"
+
+var names = []struct {
+ n string
+ m uint32
+}{
+ {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE},
+ {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
+ {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND},
+ {"NOTE_CHILD", unix.NOTE_CHILD},
+ {"NOTE_CRITICAL", unix.NOTE_CRITICAL},
+ {"NOTE_DELETE", unix.NOTE_DELETE},
+ {"NOTE_EXEC", unix.NOTE_EXEC},
+ {"NOTE_EXIT", unix.NOTE_EXIT},
+ {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS},
+ {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR},
+ {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL},
+ {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL},
+ {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK},
+ {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY},
+ {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED},
+ {"NOTE_EXTEND", unix.NOTE_EXTEND},
+ {"NOTE_FFAND", unix.NOTE_FFAND},
+ {"NOTE_FFCOPY", unix.NOTE_FFCOPY},
+ {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
+ {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
+ {"NOTE_FFNOP", unix.NOTE_FFNOP},
+ {"NOTE_FFOR", unix.NOTE_FFOR},
+ {"NOTE_FORK", unix.NOTE_FORK},
+ {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK},
+ {"NOTE_LEEWAY", unix.NOTE_LEEWAY},
+ {"NOTE_LINK", unix.NOTE_LINK},
+ {"NOTE_LOWAT", unix.NOTE_LOWAT},
+ {"NOTE_MACHTIME", unix.NOTE_MACHTIME},
+ {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME},
+ {"NOTE_NONE", unix.NOTE_NONE},
+ {"NOTE_NSECONDS", unix.NOTE_NSECONDS},
+ {"NOTE_OOB", unix.NOTE_OOB},
+ //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!)
+ {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
+ {"NOTE_REAP", unix.NOTE_REAP},
+ {"NOTE_RENAME", unix.NOTE_RENAME},
+ {"NOTE_REVOKE", unix.NOTE_REVOKE},
+ {"NOTE_SECONDS", unix.NOTE_SECONDS},
+ {"NOTE_SIGNAL", unix.NOTE_SIGNAL},
+ {"NOTE_TRACK", unix.NOTE_TRACK},
+ {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
+ {"NOTE_TRIGGER", unix.NOTE_TRIGGER},
+ {"NOTE_USECONDS", unix.NOTE_USECONDS},
+ {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR},
+ {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE},
+ {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE},
+ {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE},
+ {"NOTE_WRITE", unix.NOTE_WRITE},
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
new file mode 100644
index 000000000000..3186b0c3491d
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
@@ -0,0 +1,33 @@
+package internal
+
+import "golang.org/x/sys/unix"
+
+var names = []struct {
+ n string
+ m uint32
+}{
+ {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
+ {"NOTE_CHILD", unix.NOTE_CHILD},
+ {"NOTE_DELETE", unix.NOTE_DELETE},
+ {"NOTE_EXEC", unix.NOTE_EXEC},
+ {"NOTE_EXIT", unix.NOTE_EXIT},
+ {"NOTE_EXTEND", unix.NOTE_EXTEND},
+ {"NOTE_FFAND", unix.NOTE_FFAND},
+ {"NOTE_FFCOPY", unix.NOTE_FFCOPY},
+ {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
+ {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
+ {"NOTE_FFNOP", unix.NOTE_FFNOP},
+ {"NOTE_FFOR", unix.NOTE_FFOR},
+ {"NOTE_FORK", unix.NOTE_FORK},
+ {"NOTE_LINK", unix.NOTE_LINK},
+ {"NOTE_LOWAT", unix.NOTE_LOWAT},
+ {"NOTE_OOB", unix.NOTE_OOB},
+ {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
+ {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
+ {"NOTE_RENAME", unix.NOTE_RENAME},
+ {"NOTE_REVOKE", unix.NOTE_REVOKE},
+ {"NOTE_TRACK", unix.NOTE_TRACK},
+ {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
+ {"NOTE_TRIGGER", unix.NOTE_TRIGGER},
+ {"NOTE_WRITE", unix.NOTE_WRITE},
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
new file mode 100644
index 000000000000..f69fdb930f5f
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
@@ -0,0 +1,42 @@
+package internal
+
+import "golang.org/x/sys/unix"
+
+var names = []struct {
+ n string
+ m uint32
+}{
+ {"NOTE_ABSTIME", unix.NOTE_ABSTIME},
+ {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
+ {"NOTE_CHILD", unix.NOTE_CHILD},
+ {"NOTE_CLOSE", unix.NOTE_CLOSE},
+ {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE},
+ {"NOTE_DELETE", unix.NOTE_DELETE},
+ {"NOTE_EXEC", unix.NOTE_EXEC},
+ {"NOTE_EXIT", unix.NOTE_EXIT},
+ {"NOTE_EXTEND", unix.NOTE_EXTEND},
+ {"NOTE_FFAND", unix.NOTE_FFAND},
+ {"NOTE_FFCOPY", unix.NOTE_FFCOPY},
+ {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
+ {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
+ {"NOTE_FFNOP", unix.NOTE_FFNOP},
+ {"NOTE_FFOR", unix.NOTE_FFOR},
+ {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL},
+ {"NOTE_FORK", unix.NOTE_FORK},
+ {"NOTE_LINK", unix.NOTE_LINK},
+ {"NOTE_LOWAT", unix.NOTE_LOWAT},
+ {"NOTE_MSECONDS", unix.NOTE_MSECONDS},
+ {"NOTE_NSECONDS", unix.NOTE_NSECONDS},
+ {"NOTE_OPEN", unix.NOTE_OPEN},
+ {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
+ {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
+ {"NOTE_READ", unix.NOTE_READ},
+ {"NOTE_RENAME", unix.NOTE_RENAME},
+ {"NOTE_REVOKE", unix.NOTE_REVOKE},
+ {"NOTE_SECONDS", unix.NOTE_SECONDS},
+ {"NOTE_TRACK", unix.NOTE_TRACK},
+ {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
+ {"NOTE_TRIGGER", unix.NOTE_TRIGGER},
+ {"NOTE_USECONDS", unix.NOTE_USECONDS},
+ {"NOTE_WRITE", unix.NOTE_WRITE},
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
new file mode 100644
index 000000000000..607e683bd731
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
@@ -0,0 +1,32 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin
+
+package internal
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func Debug(name string, kevent *unix.Kevent_t) {
+ mask := uint32(kevent.Fflags)
+
+ var (
+ l []string
+ unknown = mask
+ )
+ for _, n := range names {
+ if mask&n.m == n.m {
+ l = append(l, n.n)
+ unknown ^= n.m
+ }
+ }
+ if unknown > 0 {
+ l = append(l, fmt.Sprintf("0x%x", unknown))
+ }
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n",
+ time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
new file mode 100644
index 000000000000..35c734be4311
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
@@ -0,0 +1,56 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func Debug(name string, mask, cookie uint32) {
+ names := []struct {
+ n string
+ m uint32
+ }{
+ {"IN_ACCESS", unix.IN_ACCESS},
+ {"IN_ATTRIB", unix.IN_ATTRIB},
+ {"IN_CLOSE", unix.IN_CLOSE},
+ {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE},
+ {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE},
+ {"IN_CREATE", unix.IN_CREATE},
+ {"IN_DELETE", unix.IN_DELETE},
+ {"IN_DELETE_SELF", unix.IN_DELETE_SELF},
+ {"IN_IGNORED", unix.IN_IGNORED},
+ {"IN_ISDIR", unix.IN_ISDIR},
+ {"IN_MODIFY", unix.IN_MODIFY},
+ {"IN_MOVE", unix.IN_MOVE},
+ {"IN_MOVED_FROM", unix.IN_MOVED_FROM},
+ {"IN_MOVED_TO", unix.IN_MOVED_TO},
+ {"IN_MOVE_SELF", unix.IN_MOVE_SELF},
+ {"IN_OPEN", unix.IN_OPEN},
+ {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW},
+ {"IN_UNMOUNT", unix.IN_UNMOUNT},
+ }
+
+ var (
+ l []string
+ unknown = mask
+ )
+ for _, n := range names {
+ if mask&n.m == n.m {
+ l = append(l, n.n)
+ unknown ^= n.m
+ }
+ }
+ if unknown > 0 {
+ l = append(l, fmt.Sprintf("0x%x", unknown))
+ }
+ var c string
+ if cookie > 0 {
+ c = fmt.Sprintf("(cookie: %d) ", cookie)
+ }
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n",
+ time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name)
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
new file mode 100644
index 000000000000..e5b3b6f69433
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
@@ -0,0 +1,25 @@
+package internal
+
+import "golang.org/x/sys/unix"
+
+var names = []struct {
+ n string
+ m uint32
+}{
+ {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
+ {"NOTE_CHILD", unix.NOTE_CHILD},
+ {"NOTE_DELETE", unix.NOTE_DELETE},
+ {"NOTE_EXEC", unix.NOTE_EXEC},
+ {"NOTE_EXIT", unix.NOTE_EXIT},
+ {"NOTE_EXTEND", unix.NOTE_EXTEND},
+ {"NOTE_FORK", unix.NOTE_FORK},
+ {"NOTE_LINK", unix.NOTE_LINK},
+ {"NOTE_LOWAT", unix.NOTE_LOWAT},
+ {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
+ {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
+ {"NOTE_RENAME", unix.NOTE_RENAME},
+ {"NOTE_REVOKE", unix.NOTE_REVOKE},
+ {"NOTE_TRACK", unix.NOTE_TRACK},
+ {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
+ {"NOTE_WRITE", unix.NOTE_WRITE},
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
new file mode 100644
index 000000000000..1dd455bc5a4e
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
@@ -0,0 +1,28 @@
+package internal
+
+import "golang.org/x/sys/unix"
+
+var names = []struct {
+ n string
+ m uint32
+}{
+ {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
+ // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386?
+ {"NOTE_CHILD", unix.NOTE_CHILD},
+ {"NOTE_DELETE", unix.NOTE_DELETE},
+ {"NOTE_EOF", unix.NOTE_EOF},
+ {"NOTE_EXEC", unix.NOTE_EXEC},
+ {"NOTE_EXIT", unix.NOTE_EXIT},
+ {"NOTE_EXTEND", unix.NOTE_EXTEND},
+ {"NOTE_FORK", unix.NOTE_FORK},
+ {"NOTE_LINK", unix.NOTE_LINK},
+ {"NOTE_LOWAT", unix.NOTE_LOWAT},
+ {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
+ {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
+ {"NOTE_RENAME", unix.NOTE_RENAME},
+ {"NOTE_REVOKE", unix.NOTE_REVOKE},
+ {"NOTE_TRACK", unix.NOTE_TRACK},
+ {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
+ {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE},
+ {"NOTE_WRITE", unix.NOTE_WRITE},
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
new file mode 100644
index 000000000000..f1b2e73bd5ba
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
@@ -0,0 +1,45 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func Debug(name string, mask int32) {
+ names := []struct {
+ n string
+ m int32
+ }{
+ {"FILE_ACCESS", unix.FILE_ACCESS},
+ {"FILE_MODIFIED", unix.FILE_MODIFIED},
+ {"FILE_ATTRIB", unix.FILE_ATTRIB},
+ {"FILE_TRUNC", unix.FILE_TRUNC},
+ {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW},
+ {"FILE_DELETE", unix.FILE_DELETE},
+ {"FILE_RENAME_TO", unix.FILE_RENAME_TO},
+ {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM},
+ {"UNMOUNTED", unix.UNMOUNTED},
+ {"MOUNTEDOVER", unix.MOUNTEDOVER},
+ {"FILE_EXCEPTION", unix.FILE_EXCEPTION},
+ }
+
+ var (
+ l []string
+ unknown = mask
+ )
+ for _, n := range names {
+ if mask&n.m == n.m {
+ l = append(l, n.n)
+ unknown ^= n.m
+ }
+ }
+ if unknown > 0 {
+ l = append(l, fmt.Sprintf("0x%x", unknown))
+ }
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n",
+ time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
new file mode 100644
index 000000000000..52bf4ce53b56
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
@@ -0,0 +1,40 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+func Debug(name string, mask uint32) {
+ names := []struct {
+ n string
+ m uint32
+ }{
+ {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED},
+ {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED},
+ {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED},
+ {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME},
+ {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME},
+ }
+
+ var (
+ l []string
+ unknown = mask
+ )
+ for _, n := range names {
+ if mask&n.m == n.m {
+ l = append(l, n.n)
+ unknown ^= n.m
+ }
+ }
+ if unknown > 0 {
+ l = append(l, fmt.Sprintf("0x%x", unknown))
+ }
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n",
+ time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name))
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
new file mode 100644
index 000000000000..547df1df84b5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
@@ -0,0 +1,31 @@
+//go:build freebsd
+
+package internal
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ SyscallEACCES = syscall.EACCES
+ UnixEACCES = unix.EACCES
+)
+
+var maxfiles uint64
+
+func SetRlimit() {
+ // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
+ var l syscall.Rlimit
+ err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
+ if err == nil && l.Cur != l.Max {
+ l.Cur = l.Max
+ syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
+ }
+ maxfiles = uint64(l.Cur)
+}
+
+func Maxfiles() uint64 { return maxfiles }
+func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
+func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) }
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go
new file mode 100644
index 000000000000..7daa45e19eec
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go
@@ -0,0 +1,2 @@
+// Package internal contains some helpers.
+package internal
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go
new file mode 100644
index 000000000000..30976ce97395
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go
@@ -0,0 +1,31 @@
+//go:build !windows && !darwin && !freebsd
+
+package internal
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ SyscallEACCES = syscall.EACCES
+ UnixEACCES = unix.EACCES
+)
+
+var maxfiles uint64
+
+func SetRlimit() {
+ // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
+ var l syscall.Rlimit
+ err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
+ if err == nil && l.Cur != l.Max {
+ l.Cur = l.Max
+ syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
+ }
+ maxfiles = uint64(l.Cur)
+}
+
+func Maxfiles() uint64 { return maxfiles }
+func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
+func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go
new file mode 100644
index 000000000000..37dfeddc2896
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go
@@ -0,0 +1,7 @@
+//go:build !windows
+
+package internal
+
+func HasPrivilegesForSymlink() bool {
+ return true
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go
new file mode 100644
index 000000000000..a72c64954905
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go
@@ -0,0 +1,41 @@
+//go:build windows
+
+package internal
+
+import (
+ "errors"
+
+ "golang.org/x/sys/windows"
+)
+
+// Just a dummy.
+var (
+ SyscallEACCES = errors.New("dummy")
+ UnixEACCES = errors.New("dummy")
+)
+
+func SetRlimit() {}
+func Maxfiles() uint64 { return 1<<64 - 1 }
+func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") }
+func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") }
+
+func HasPrivilegesForSymlink() bool {
+ var sid *windows.SID
+ err := windows.AllocateAndInitializeSid(
+ &windows.SECURITY_NT_AUTHORITY,
+ 2,
+ windows.SECURITY_BUILTIN_DOMAIN_RID,
+ windows.DOMAIN_ALIAS_RID_ADMINS,
+ 0, 0, 0, 0, 0, 0,
+ &sid)
+ if err != nil {
+ return false
+ }
+ defer windows.FreeSid(sid)
+ token := windows.Token(0)
+ member, err := token.IsMember(sid)
+ if err != nil {
+ return false
+ }
+ return member || token.IsElevated()
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
deleted file mode 100644
index 99012ae6539e..000000000000
--- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/env zsh
-[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
-setopt err_exit no_unset pipefail extended_glob
-
-# Simple script to update the godoc comments on all watchers so you don't need
-# to update the same comment 5 times.
-
-watcher=$(</tmp/x
- print -r -- $cmt >>/tmp/x
- tail -n+$(( end + 1 )) $file >>/tmp/x
- mv /tmp/x $file
- done
-}
-
-set-cmt '^type Watcher struct ' $watcher
-set-cmt '^func NewWatcher(' $new
-set-cmt '^func NewBufferedWatcher(' $newbuffered
-set-cmt '^func (w \*Watcher) Add(' $add
-set-cmt '^func (w \*Watcher) AddWith(' $addwith
-set-cmt '^func (w \*Watcher) Remove(' $remove
-set-cmt '^func (w \*Watcher) Close(' $close
-set-cmt '^func (w \*Watcher) WatchList(' $watchlist
-set-cmt '^[[:space:]]*Events *chan Event$' $events
-set-cmt '^[[:space:]]*Errors *chan error$' $errors
diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go
index 4322b0b88557..f65e8fe3edce 100644
--- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go
+++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go
@@ -1,5 +1,4 @@
//go:build freebsd || openbsd || netbsd || dragonfly
-// +build freebsd openbsd netbsd dragonfly
package fsnotify
diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go
index 5da5ffa78fe7..a29fc7aab620 100644
--- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go
+++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go
@@ -1,5 +1,4 @@
//go:build darwin
-// +build darwin
package fsnotify
diff --git a/vendor/github.com/operator-framework/api/LICENSE b/vendor/github.com/operator-framework/api/LICENSE
new file mode 100644
index 000000000000..261eeb9e9f8b
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/operator-framework/api/pkg/lib/version/version.go b/vendor/github.com/operator-framework/api/pkg/lib/version/version.go
new file mode 100644
index 000000000000..a0ffb9fcbe0a
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/lib/version/version.go
@@ -0,0 +1,67 @@
+package version
+
+import (
+ "encoding/json"
+
+ semver "github.com/blang/semver/v4"
+)
+
+// +k8s:openapi-gen=true
+// OperatorVersion is a wrapper around semver.Version which supports correct
+// marshaling to YAML and JSON.
+// +kubebuilder:validation:Type=string
+type OperatorVersion struct {
+ semver.Version `json:"-"`
+}
+
+// DeepCopyInto creates a deep-copy of the Version value.
+func (v *OperatorVersion) DeepCopyInto(out *OperatorVersion) {
+ out.Major = v.Major
+ out.Minor = v.Minor
+ out.Patch = v.Patch
+
+ if v.Pre != nil {
+ pre := make([]semver.PRVersion, len(v.Pre))
+ copy(pre, v.Pre)
+ out.Pre = pre
+ }
+
+ if v.Build != nil {
+ build := make([]string, len(v.Build))
+ copy(build, v.Build)
+ out.Build = build
+ }
+}
+
+// MarshalJSON implements the encoding/json.Marshaler interface.
+func (v OperatorVersion) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
+func (v *OperatorVersion) UnmarshalJSON(data []byte) (err error) {
+ var versionString string
+
+ if err = json.Unmarshal(data, &versionString); err != nil {
+ return
+ }
+
+ version := semver.Version{}
+ version, err = semver.ParseTolerant(versionString)
+ if err != nil {
+ return err
+ }
+ v.Version = version
+ return
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ OperatorVersion) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+// "semver" is not a standard openapi format but tooling may use the value regardless
+func (_ OperatorVersion) OpenAPISchemaFormat() string { return "semver" }
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/doc.go
new file mode 100644
index 000000000000..7eba794488ba
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/doc.go
@@ -0,0 +1,4 @@
+// +kubebuilder:skip
+
+// Package operators contains all resource types of the operators.coreos.com API group.
+package operators
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/register.go b/vendor/github.com/operator-framework/api/pkg/operators/register.go
new file mode 100644
index 000000000000..e3c31d51ac22
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/register.go
@@ -0,0 +1,31 @@
+package operators
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+ // GroupName is the group name used in this package.
+ GroupName = "operators.coreos.com"
+ // GroupVersion is the group version used in this package.
+ GroupVersion = runtime.APIVersionInternal
+
+ // LEGACY: Exported kind names, remove after major version bump
+
+ // ClusterServiceVersionKind is the kind name for ClusterServiceVersion resources.
+ ClusterServiceVersionKind = "ClusterServiceVersion"
+ // CatalogSourceKind is the kind name for CatalogSource resources.
+ CatalogSourceKind = "CatalogSource"
+ // InstallPlanKind is the kind name for InstallPlan resources.
+ InstallPlanKind = "InstallPlan"
+ // SubscriptionKind is the kind name for Subscription resources.
+ SubscriptionKind = "Subscription"
+ // OperatorKind is the kind name for Operator resources.
+ OperatorKind = "Operator"
+ // OperatorGroupKind is the kind name for OperatorGroup resources.
+ OperatorGroupKind = "OperatorGroup"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/v1/doc.go
new file mode 100644
index 000000000000..dec83277bba2
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1/doc.go
@@ -0,0 +1,4 @@
+// +groupName=operators.coreos.com
+
+// Package v1 contains resources types for version v1 of the operators.coreos.com API group.
+package v1
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1/groupversion_info.go b/vendor/github.com/operator-framework/api/pkg/operators/v1/groupversion_info.go
new file mode 100644
index 000000000000..089ec878399e
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1/groupversion_info.go
@@ -0,0 +1,28 @@
+// +kubebuilder:object:generate=true
+
+// Package v1 contains API Schema definitions for the operator v1 API group.
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects.
+ GroupVersion = schema.GroupVersion{Group: "operators.coreos.com", Version: "v1"}
+
+ // SchemeGroupVersion is required for compatibility with client generation.
+ SchemeGroupVersion = GroupVersion
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme.
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return GroupVersion.WithResource(resource).GroupResource()
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1/olmconfig_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1/olmconfig_types.go
new file mode 100644
index 000000000000..c15b5114fc07
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1/olmconfig_types.go
@@ -0,0 +1,90 @@
+package v1
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ DisabledCopiedCSVsConditionType = "DisabledCopiedCSVs"
+)
+
+// OLMConfigSpec is the spec for an OLMConfig resource.
+type OLMConfigSpec struct {
+ Features *Features `json:"features,omitempty"`
+}
+
+// Features contains the list of configurable OLM features.
+type Features struct {
+
+ // DisableCopiedCSVs is used to disable OLM's "Copied CSV" feature
+ // for operators installed at the cluster scope, where a cluster
+ // scoped operator is one that has been installed in an
+ // OperatorGroup that targets all namespaces.
+ // When reenabled, OLM will recreate the "Copied CSVs" for each
+ // cluster scoped operator.
+ DisableCopiedCSVs *bool `json:"disableCopiedCSVs,omitempty"`
+ // PackageServerSyncInterval is used to define the sync interval for
+ // packagerserver pods. Packageserver pods periodically check the
+ // status of CatalogSources; this specifies the period using duration
+ // format (e.g. "60m"). For this parameter, only hours ("h"), minutes
+ // ("m"), and seconds ("s") may be specified. When not specified, the
+ // period defaults to the value specified within the packageserver.
+ // +optional
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(s|m|h))+$"
+ PackageServerSyncInterval *metav1.Duration `json:"packageServerSyncInterval,omitempty"`
+}
+
+// OLMConfigStatus is the status for an OLMConfig resource.
+type OLMConfigStatus struct {
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +genclient:nonNamespaced
+// +kubebuilder:storageversion
+// +kubebuilder:resource:categories=olm,scope=Cluster
+// +kubebuilder:subresource:status
+
+// OLMConfig is a resource responsible for configuring OLM.
+type OLMConfig struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec OLMConfigSpec `json:"spec,omitempty"`
+ Status OLMConfigStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OLMConfigList is a list of OLMConfig resources.
+type OLMConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+ // +listType=set
+ Items []OLMConfig `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&OLMConfig{}, &OLMConfigList{})
+}
+
+// CopiedCSVsAreEnabled returns true if and only if the olmConfigs DisableCopiedCSVs is set and true,
+// otherwise false is returned
+func (config *OLMConfig) CopiedCSVsAreEnabled() bool {
+ if config == nil || config.Spec.Features == nil || config.Spec.Features.DisableCopiedCSVs == nil {
+ return true
+ }
+
+ return !*config.Spec.Features.DisableCopiedCSVs
+}
+
+func (config *OLMConfig) PackageServerSyncInterval() *time.Duration {
+ if config == nil || config.Spec.Features == nil || config.Spec.Features.PackageServerSyncInterval == nil {
+ return nil
+ }
+ return &config.Spec.Features.PackageServerSyncInterval.Duration
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1/operator_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1/operator_types.go
new file mode 100644
index 000000000000..af735950f571
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1/operator_types.go
@@ -0,0 +1,88 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// OperatorSpec defines the desired state of Operator
+type OperatorSpec struct{}
+
+// OperatorStatus defines the observed state of an Operator and its components
+type OperatorStatus struct {
+ // Components describes resources that compose the operator.
+ // +optional
+ Components *Components `json:"components,omitempty"`
+}
+
+// ConditionType codifies a condition's type.
+type ConditionType string
+
+// Condition represent the latest available observations of an component's state.
+type Condition struct {
+ // Type of condition.
+ Type ConditionType `json:"type"`
+ // Status of the condition, one of True, False, Unknown.
+ Status corev1.ConditionStatus `json:"status"`
+ // The reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ // +optional
+ Message string `json:"message,omitempty"`
+ // Last time the condition was probed
+ // +optional
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+}
+
+// Components tracks the resources that compose an operator.
+type Components struct {
+ // LabelSelector is a label query over a set of resources used to select the operator's components
+ LabelSelector *metav1.LabelSelector `json:"labelSelector"`
+ // Refs are a set of references to the operator's component resources, selected with LabelSelector.
+ // +optional
+ Refs []RichReference `json:"refs,omitempty"`
+}
+
+// RichReference is a reference to a resource, enriched with its status conditions.
+type RichReference struct {
+ *corev1.ObjectReference `json:",inline"`
+ // Conditions represents the latest state of the component.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +kubebuilder:object:root=true
+// +kubebuilder:storageversion
+// +kubebuilder:resource:categories=olm,scope=Cluster
+// +kubebuilder:subresource:status
+
+// Operator represents a cluster operator.
+type Operator struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec OperatorSpec `json:"spec,omitempty"`
+ Status OperatorStatus `json:"status,omitempty"`
+}
+
+// +genclient:nonNamespaced
+// +kubebuilder:object:root=true
+
+// OperatorList contains a list of Operators.
+type OperatorList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Operator `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Operator{}, &OperatorList{})
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1/operatorcondition_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1/operatorcondition_types.go
new file mode 100644
index 000000000000..8647b227e5c1
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1/operatorcondition_types.go
@@ -0,0 +1,49 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // Upgradeable indicates that the operator is upgradeable
+ Upgradeable string = "Upgradeable"
+)
+
+// OperatorConditionSpec allows a cluster admin to convey information about the state of an operator to OLM, potentially overriding state reported by the operator.
+type OperatorConditionSpec struct {
+ ServiceAccounts []string `json:"serviceAccounts,omitempty"`
+ Deployments []string `json:"deployments,omitempty"`
+ Overrides []metav1.Condition `json:"overrides,omitempty"`
+}
+
+// OperatorConditionStatus allows an operator to convey information its state to OLM. The status may trail the actual
+// state of a system.
+type OperatorConditionStatus struct {
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +kubebuilder:resource:shortName=condition,categories=olm
+// +kubebuilder:subresource:status
+// OperatorCondition is a Custom Resource of type `OperatorCondition` which is used to convey information to OLM about the state of an operator.
+type OperatorCondition struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec OperatorConditionSpec `json:"spec,omitempty"`
+ Status OperatorConditionStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// OperatorConditionList represents a list of Conditions.
+type OperatorConditionList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []OperatorCondition `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&OperatorCondition{}, &OperatorConditionList{})
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1/operatorgroup_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1/operatorgroup_types.go
new file mode 100644
index 000000000000..81ad352d4e2d
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1/operatorgroup_types.go
@@ -0,0 +1,214 @@
+package v1
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ OperatorGroupAnnotationKey = "olm.operatorGroup"
+ OperatorGroupNamespaceAnnotationKey = "olm.operatorNamespace"
+ OperatorGroupTargetsAnnotationKey = "olm.targetNamespaces"
+ OperatorGroupProvidedAPIsAnnotationKey = "olm.providedAPIs"
+
+ OperatorGroupKind = "OperatorGroup"
+
+ OperatorGroupLabelPrefix = "olm.operatorgroup.uid/"
+ OperatorGroupLabelTemplate = OperatorGroupLabelPrefix + "%s"
+
+ OperatorGroupServiceAccountCondition = "OperatorGroupServiceAccount"
+ MutlipleOperatorGroupCondition = "MultipleOperatorGroup"
+ MultipleOperatorGroupsReason = "MultipleOperatorGroupsFound"
+ OperatorGroupServiceAccountReason = "ServiceAccountNotFound"
+
+ // UpgradeStrategyDefault configures OLM such that it will only allow
+ // clusterServiceVersions to move to the replacing phase to the succeeded
+ // phase. This effectively means that OLM will not allow operators to move
+ // to the next version if an installation or upgrade has failed.
+ UpgradeStrategyDefault UpgradeStrategy = "Default"
+
+ // UpgradeStrategyUnsafeFailForward configures OLM such that it will allow
+ // clusterServiceVersions to move to the replacing phase from the succeeded
+ // phase or from the failed phase. Additionally, OLM will generate new
+ // installPlans when a subscription references a failed installPlan and the
+ // catalog has been updated with a new upgrade for the existing set of
+ // operators.
+ //
+ // WARNING: The UpgradeStrategyUnsafeFailForward upgrade strategy is unsafe
+ // and may result in unexpected behavior or unrecoverable data loss unless
+ // you have deep understanding of the set of operators being managed in the
+ // namespace.
+ UpgradeStrategyUnsafeFailForward UpgradeStrategy = "TechPreviewUnsafeFailForward"
+)
+
+type UpgradeStrategy string
+
+// OperatorGroupSpec is the spec for an OperatorGroup resource.
+type OperatorGroupSpec struct {
+ // Selector selects the OperatorGroup's target namespaces.
+ // +optional
+ Selector *metav1.LabelSelector `json:"selector,omitempty"`
+
+ // TargetNamespaces is an explicit set of namespaces to target.
+ // If it is set, Selector is ignored.
+ // +optional
+ // +listType=set
+ TargetNamespaces []string `json:"targetNamespaces,omitempty"`
+
+ // ServiceAccountName is the admin specified service account which will be
+ // used to deploy operator(s) in this operator group.
+ ServiceAccountName string `json:"serviceAccountName,omitempty"`
+
+ // Static tells OLM not to update the OperatorGroup's providedAPIs annotation
+ // +optional
+ StaticProvidedAPIs bool `json:"staticProvidedAPIs,omitempty"`
+
+ // UpgradeStrategy defines the upgrade strategy for operators in the namespace.
+ // There are currently two supported upgrade strategies:
+ //
+ // Default: OLM will only allow clusterServiceVersions to move to the replacing
+ // phase from the succeeded phase. This effectively means that OLM will not
+ // allow operators to move to the next version if an installation or upgrade
+ // has failed.
+ //
+ // TechPreviewUnsafeFailForward: OLM will allow clusterServiceVersions to move to the
+ // replacing phase from the succeeded phase or from the failed phase.
+ // Additionally, OLM will generate new installPlans when a subscription references
+ // a failed installPlan and the catalog has been updated with a new upgrade for
+ // the existing set of operators.
+ //
+ // WARNING: The TechPreviewUnsafeFailForward upgrade strategy is unsafe and may result
+ // in unexpected behavior or unrecoverable data loss unless you have deep
+ // understanding of the set of operators being managed in the namespace.
+ //
+ // +kubebuilder:validation:Enum=Default;TechPreviewUnsafeFailForward
+ // +kubebuilder:default=Default
+ // +optional
+ UpgradeStrategy UpgradeStrategy `json:"upgradeStrategy,omitempty"`
+}
+
+// OperatorGroupStatus is the status for an OperatorGroupResource.
+type OperatorGroupStatus struct {
+ // Namespaces is the set of target namespaces for the OperatorGroup.
+ // +listType=set
+ Namespaces []string `json:"namespaces,omitempty"`
+
+ // ServiceAccountRef references the service account object specified.
+ ServiceAccountRef *corev1.ObjectReference `json:"serviceAccountRef,omitempty"`
+
+ // LastUpdated is a timestamp of the last time the OperatorGroup's status was Updated.
+ LastUpdated *metav1.Time `json:"lastUpdated"`
+
+ // Conditions is an array of the OperatorGroup's conditions.
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +kubebuilder:storageversion
+// +kubebuilder:resource:shortName=og,categories=olm
+// +kubebuilder:subresource:status
+
+// OperatorGroup is the unit of multitenancy for OLM managed operators.
+// It constrains the installation of operators in its namespace to a specified set of target namespaces.
+type OperatorGroup struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +optional
+ // +kubebuilder:default={upgradeStrategy:Default}
+ Spec OperatorGroupSpec `json:"spec"`
+ Status OperatorGroupStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OperatorGroupList is a list of OperatorGroup resources.
+type OperatorGroupList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+ // +listType=set
+ Items []OperatorGroup `json:"items"`
+}
+
+// BuildTargetNamespaces returns the set of target namespaces as a sorted, comma-delimited string
+func (o *OperatorGroup) BuildTargetNamespaces() string {
+ ns := make([]string, len(o.Status.Namespaces))
+ copy(ns, o.Status.Namespaces)
+ sort.Strings(ns)
+ return strings.Join(ns, ",")
+}
+
+// UpgradeStrategy returns the UpgradeStrategy specified or the default value otherwise.
+func (o *OperatorGroup) UpgradeStrategy() UpgradeStrategy {
+ strategyName := o.Spec.UpgradeStrategy
+ switch {
+ case strategyName == UpgradeStrategyUnsafeFailForward:
+ return strategyName
+ default:
+ return UpgradeStrategyDefault
+ }
+}
+
+// IsServiceAccountSpecified returns true if the spec has a service account name specified.
+func (o *OperatorGroup) IsServiceAccountSpecified() bool {
+ if o.Spec.ServiceAccountName == "" {
+ return false
+ }
+
+ return true
+}
+
+// HasServiceAccountSynced returns true if the service account specified has been synced.
+func (o *OperatorGroup) HasServiceAccountSynced() bool {
+ if o.IsServiceAccountSpecified() && o.Status.ServiceAccountRef != nil {
+ return true
+ }
+
+ return false
+}
+
+// OGLabelKeyAndValue returns a key and value that should be applied to namespaces listed in the OperatorGroup.
+// If the UID is not set an error is returned.
+func (o *OperatorGroup) OGLabelKeyAndValue() (string, string, error) {
+ if string(o.GetUID()) == "" {
+ return "", "", fmt.Errorf("Missing UID")
+ }
+ return fmt.Sprintf(OperatorGroupLabelTemplate, o.GetUID()), "", nil
+}
+
+// NamespaceLabelSelector provides a selector that can be used to filter namespaces that belong to the OperatorGroup.
+func (o *OperatorGroup) NamespaceLabelSelector() (*metav1.LabelSelector, error) {
+ if len(o.Spec.TargetNamespaces) == 0 {
+ // If no target namespaces are set, check if a selector exists.
+ if o.Spec.Selector != nil {
+ return o.Spec.Selector, nil
+ }
+ // No selector exists, return nil which should be used to select EVERYTHING.
+ return nil, nil
+ }
+ // Return a label that should be present on all namespaces defined in the OperatorGroup.Spec.TargetNamespaces field.
+ ogKey, ogValue, err := o.OGLabelKeyAndValue()
+ if err != nil {
+ return nil, err
+ }
+
+ return &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ ogKey: ogValue,
+ },
+ }, nil
+}
+
+// IsOperatorGroupLabel returns true if the label is an OperatorGroup label.
+func IsOperatorGroupLabel(label string) bool {
+ return strings.HasPrefix(label, OperatorGroupLabelPrefix)
+}
+
+func init() {
+ SchemeBuilder.Register(&OperatorGroup{}, &OperatorGroupList{})
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000000..d6f89ba40183
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1/zz_generated.deepcopy.go
@@ -0,0 +1,556 @@
+//go:build !ignore_autogenerated
+
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Components) DeepCopyInto(out *Components) {
+ *out = *in
+ if in.LabelSelector != nil {
+ in, out := &in.LabelSelector, &out.LabelSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Refs != nil {
+ in, out := &in.Refs, &out.Refs
+ *out = make([]RichReference, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Components.
+func (in *Components) DeepCopy() *Components {
+ if in == nil {
+ return nil
+ }
+ out := new(Components)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Condition) DeepCopyInto(out *Condition) {
+ *out = *in
+ if in.LastUpdateTime != nil {
+ in, out := &in.LastUpdateTime, &out.LastUpdateTime
+ *out = (*in).DeepCopy()
+ }
+ if in.LastTransitionTime != nil {
+ in, out := &in.LastTransitionTime, &out.LastTransitionTime
+ *out = (*in).DeepCopy()
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
+func (in *Condition) DeepCopy() *Condition {
+ if in == nil {
+ return nil
+ }
+ out := new(Condition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Features) DeepCopyInto(out *Features) {
+ *out = *in
+ if in.DisableCopiedCSVs != nil {
+ in, out := &in.DisableCopiedCSVs, &out.DisableCopiedCSVs
+ *out = new(bool)
+ **out = **in
+ }
+ if in.PackageServerSyncInterval != nil {
+ in, out := &in.PackageServerSyncInterval, &out.PackageServerSyncInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Features.
+func (in *Features) DeepCopy() *Features {
+ if in == nil {
+ return nil
+ }
+ out := new(Features)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OLMConfig) DeepCopyInto(out *OLMConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMConfig.
+func (in *OLMConfig) DeepCopy() *OLMConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OLMConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OLMConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OLMConfigList) DeepCopyInto(out *OLMConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OLMConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMConfigList.
+func (in *OLMConfigList) DeepCopy() *OLMConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(OLMConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OLMConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OLMConfigSpec) DeepCopyInto(out *OLMConfigSpec) {
+ *out = *in
+ if in.Features != nil {
+ in, out := &in.Features, &out.Features
+ *out = new(Features)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMConfigSpec.
+func (in *OLMConfigSpec) DeepCopy() *OLMConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OLMConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OLMConfigStatus) DeepCopyInto(out *OLMConfigStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMConfigStatus.
+func (in *OLMConfigStatus) DeepCopy() *OLMConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OLMConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Operator) DeepCopyInto(out *Operator) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Operator.
+func (in *Operator) DeepCopy() *Operator {
+ if in == nil {
+ return nil
+ }
+ out := new(Operator)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Operator) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition.
+func (in *OperatorCondition) DeepCopy() *OperatorCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorCondition) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorConditionList) DeepCopyInto(out *OperatorConditionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OperatorCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConditionList.
+func (in *OperatorConditionList) DeepCopy() *OperatorConditionList {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorConditionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorConditionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorConditionSpec) DeepCopyInto(out *OperatorConditionSpec) {
+ *out = *in
+ if in.ServiceAccounts != nil {
+ in, out := &in.ServiceAccounts, &out.ServiceAccounts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Deployments != nil {
+ in, out := &in.Deployments, &out.Deployments
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Overrides != nil {
+ in, out := &in.Overrides, &out.Overrides
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConditionSpec.
+func (in *OperatorConditionSpec) DeepCopy() *OperatorConditionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorConditionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorConditionStatus) DeepCopyInto(out *OperatorConditionStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConditionStatus.
+func (in *OperatorConditionStatus) DeepCopy() *OperatorConditionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorConditionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorGroup) DeepCopyInto(out *OperatorGroup) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroup.
+func (in *OperatorGroup) DeepCopy() *OperatorGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorGroup) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorGroupList) DeepCopyInto(out *OperatorGroupList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OperatorGroup, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupList.
+func (in *OperatorGroupList) DeepCopy() *OperatorGroupList {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorGroupList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorGroupList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorGroupSpec) DeepCopyInto(out *OperatorGroupSpec) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TargetNamespaces != nil {
+ in, out := &in.TargetNamespaces, &out.TargetNamespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupSpec.
+func (in *OperatorGroupSpec) DeepCopy() *OperatorGroupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorGroupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorGroupStatus) DeepCopyInto(out *OperatorGroupStatus) {
+ *out = *in
+ if in.Namespaces != nil {
+ in, out := &in.Namespaces, &out.Namespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ServiceAccountRef != nil {
+ in, out := &in.ServiceAccountRef, &out.ServiceAccountRef
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.LastUpdated != nil {
+ in, out := &in.LastUpdated, &out.LastUpdated
+ *out = (*in).DeepCopy()
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupStatus.
+func (in *OperatorGroupStatus) DeepCopy() *OperatorGroupStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorGroupStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorList) DeepCopyInto(out *OperatorList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Operator, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorList.
+func (in *OperatorList) DeepCopy() *OperatorList {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec.
+func (in *OperatorSpec) DeepCopy() *OperatorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) {
+ *out = *in
+ if in.Components != nil {
+ in, out := &in.Components, &out.Components
+ *out = new(Components)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus.
+func (in *OperatorStatus) DeepCopy() *OperatorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RichReference) DeepCopyInto(out *RichReference) {
+ *out = *in
+ if in.ObjectReference != nil {
+ in, out := &in.ObjectReference, &out.ObjectReference
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RichReference.
+func (in *RichReference) DeepCopy() *RichReference {
+ if in == nil {
+ return nil
+ }
+ out := new(RichReference)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go
new file mode 100644
index 000000000000..b5f5e3b7e5a5
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go
@@ -0,0 +1,364 @@
+package v1alpha1
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+const (
+ CatalogSourceCRDAPIVersion = GroupName + "/" + GroupVersion
+ CatalogSourceKind = "CatalogSource"
+ DefaultRegistryPollDuration = 15 * time.Minute
+)
+
+// SourceType indicates the type of backing store for a CatalogSource
+type SourceType string
+
+const (
+ // SourceTypeInternal (deprecated) specifies a CatalogSource of type SourceTypeConfigmap
+ SourceTypeInternal SourceType = "internal"
+
+ // SourceTypeConfigmap specifies a CatalogSource that generates a configmap-server registry
+ SourceTypeConfigmap SourceType = "configmap"
+
+ // SourceTypeGrpc specifies a CatalogSource that can use an operator registry image to generate a
+ // registry-server or connect to a pre-existing registry at an address.
+ SourceTypeGrpc SourceType = "grpc"
+)
+
+const (
+ // CatalogSourceSpecInvalidError denotes when fields on the spec of the CatalogSource are not valid.
+ CatalogSourceSpecInvalidError ConditionReason = "SpecInvalidError"
+ // CatalogSourceConfigMapError denotes when there is an issue extracting manifests from the specified ConfigMap.
+ CatalogSourceConfigMapError ConditionReason = "ConfigMapError"
+ // CatalogSourceRegistryServerError denotes when there is an issue querying the specified registry server.
+ CatalogSourceRegistryServerError ConditionReason = "RegistryServerError"
+ // CatalogSourceIntervalInvalidError denotes if the registry polling interval is invalid.
+ CatalogSourceIntervalInvalidError ConditionReason = "InvalidIntervalError"
+)
+
+type CatalogSourceSpec struct {
+ // SourceType is the type of source
+ SourceType SourceType `json:"sourceType"`
+
+ // Priority field assigns a weight to the catalog source to prioritize them so that it can be consumed by the dependency resolver.
+ // Usage:
+ // Higher weight indicates that this catalog source is preferred over lower weighted catalog sources during dependency resolution.
+ // The range of the priority value can go from positive to negative in the range of int32.
+ // The default value to a catalog source with unassigned priority would be 0.
+ // The catalog source with the same priority values will be ranked lexicographically based on its name.
+ // +optional
+ Priority int `json:"priority,omitempty"`
+
+ // ConfigMap is the name of the ConfigMap to be used to back a configmap-server registry.
+ // Only used when SourceType = SourceTypeConfigmap or SourceTypeInternal.
+ // +optional
+ ConfigMap string `json:"configMap,omitempty"`
+
+ // Address is a host that OLM can use to connect to a pre-existing registry.
+ // Format: :
+ // Only used when SourceType = SourceTypeGrpc.
+ // Ignored when the Image field is set.
+ // +optional
+ Address string `json:"address,omitempty"`
+
+ // Image is an operator-registry container image to instantiate a registry-server with.
+ // Only used when SourceType = SourceTypeGrpc.
+ // If present, the address field is ignored.
+ // +optional
+ Image string `json:"image,omitempty"`
+
+ // GrpcPodConfig exposes different overrides for the pod spec of the CatalogSource Pod.
+ // Only used when SourceType = SourceTypeGrpc and Image is set.
+ // +optional
+ GrpcPodConfig *GrpcPodConfig `json:"grpcPodConfig,omitempty"`
+
+ // UpdateStrategy defines how updated catalog source images can be discovered
+ // Consists of an interval that defines polling duration and an embedded strategy type
+ // +optional
+ UpdateStrategy *UpdateStrategy `json:"updateStrategy,omitempty"`
+
+ // Secrets represent set of secrets that can be used to access the contents of the catalog.
+ // It is best to keep this list small, since each will need to be tried for every catalog entry.
+ // +optional
+ Secrets []string `json:"secrets,omitempty"`
+
+ // Metadata
+ DisplayName string `json:"displayName,omitempty"`
+ Description string `json:"description,omitempty"`
+ Publisher string `json:"publisher,omitempty"`
+ Icon Icon `json:"icon,omitempty"`
+}
+
+type SecurityConfig string
+
+const (
+ Legacy SecurityConfig = "legacy"
+ Restricted SecurityConfig = "restricted"
+)
+
+// GrpcPodConfig contains configuration specified for a catalog source
+type GrpcPodConfig struct {
+ // NodeSelector is a selector which must be true for the pod to fit on a node.
+ // Selector which must match a node's labels for the pod to be scheduled on that node.
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // Tolerations are the catalog source's pod's tolerations.
+ // +optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+
+ // Affinity is the catalog source's pod's affinity.
+ // +optional
+ Affinity *corev1.Affinity `json:"affinity,omitempty"`
+
+ // If specified, indicates the pod's priority.
+ // If not specified, the pod priority will be default or zero if there is no
+ // default.
+ // +optional
+ PriorityClassName *string `json:"priorityClassName,omitempty"`
+
+ // SecurityContextConfig can be one of `legacy` or `restricted`. The CatalogSource's pod is either injected with the
+ // right pod.spec.securityContext and pod.spec.container[*].securityContext values to allow the pod to run in Pod
+ // Security Admission (PSA) `restricted` mode, or doesn't set these values at all, in which case the pod can only be
+ // run in PSA `baseline` or `privileged` namespaces. If the SecurityContextConfig is unspecified, the mode will be
+ // determined by the namespace's PSA configuration. If the namespace is enforcing `restricted` mode, then the pod
+ // will be configured as if `restricted` was specified. Otherwise, it will be configured as if `legacy` was
+ // specified. Specifying a value other than `legacy` or `restricted` result in a validation error. When using older
+ // catalog images, which can not run in `restricted` mode, the SecurityContextConfig should be set to `legacy`.
+ //
+ // More information about PSA can be found here: https://kubernetes.io/docs/concepts/security/pod-security-admission/'
+ // +optional
+ // +kubebuilder:validation:Enum=legacy;restricted
+ SecurityContextConfig SecurityConfig `json:"securityContextConfig,omitempty"`
+
+ // MemoryTarget configures the $GOMEMLIMIT value for the gRPC catalog Pod. This is a soft memory limit for the server,
+ // which the runtime will attempt to meet but makes no guarantees that it will do so. If this value is set, the Pod
+ // will have the following modifications made to the container running the server:
+ // - the $GOMEMLIMIT environment variable will be set to this value in bytes
+ // - the memory request will be set to this value
+ //
+ // This field should be set if it's desired to reduce the footprint of a catalog server as much as possible, or if
+ // a catalog being served is very large and needs more than the default allocation. If your index image has a file-
+ // system cache, determine a good approximation for this value by doubling the size of the package cache at
+ // /tmp/cache/cache/packages.json in the index image.
+ //
+ // This field is best-effort; if unset, no default will be used and no Pod memory limit or $GOMEMLIMIT value will be set.
+ // +optional
+ MemoryTarget *resource.Quantity `json:"memoryTarget,omitempty"`
+
+ // ExtractContent configures the gRPC catalog Pod to extract catalog metadata from the provided index image and
+ // use a well-known version of the `opm` server to expose it. The catalog index image that this CatalogSource is
+ // configured to use *must* be using the file-based catalogs in order to utilize this feature.
+ // +optional
+ ExtractContent *ExtractContentConfig `json:"extractContent,omitempty"`
+}
+
+// ExtractContentConfig configures context extraction from a file-based catalog index image.
+type ExtractContentConfig struct {
+ // CacheDir is the directory storing the pre-calculated API cache.
+ CacheDir string `json:"cacheDir"`
+ // CatalogDir is the directory storing the file-based catalog contents.
+ CatalogDir string `json:"catalogDir"`
+}
+
+// UpdateStrategy holds all the different types of catalog source update strategies
+// Currently only registry polling strategy is implemented
+type UpdateStrategy struct {
+ *RegistryPoll `json:"registryPoll,omitempty"`
+}
+
+type RegistryPoll struct {
+ // Interval is used to determine the time interval between checks of the latest catalog source version.
+ // The catalog operator polls to see if a new version of the catalog source is available.
+ // If available, the latest image is pulled and gRPC traffic is directed to the latest catalog source.
+ RawInterval string `json:"interval,omitempty"`
+ Interval *metav1.Duration `json:"-"`
+ ParsingError string `json:"-"`
+}
+
+// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
+func (u *UpdateStrategy) UnmarshalJSON(data []byte) (err error) {
+ type alias struct {
+ *RegistryPoll `json:"registryPoll,omitempty"`
+ }
+ us := alias{}
+ if err = json.Unmarshal(data, &us); err != nil {
+ return err
+ }
+ registryPoll := &RegistryPoll{
+ RawInterval: us.RegistryPoll.RawInterval,
+ }
+ duration, err := time.ParseDuration(registryPoll.RawInterval)
+ if err != nil {
+ registryPoll.ParsingError = fmt.Sprintf("error parsing spec.updateStrategy.registryPoll.interval. Using the default value of %s instead. Error: %s", DefaultRegistryPollDuration, err)
+ registryPoll.Interval = &metav1.Duration{Duration: DefaultRegistryPollDuration}
+ } else {
+ registryPoll.Interval = &metav1.Duration{Duration: duration}
+ }
+ u.RegistryPoll = registryPoll
+ return nil
+}
+
+type RegistryServiceStatus struct {
+ Protocol string `json:"protocol,omitempty"`
+ ServiceName string `json:"serviceName,omitempty"`
+ ServiceNamespace string `json:"serviceNamespace,omitempty"`
+ Port string `json:"port,omitempty"`
+ CreatedAt metav1.Time `json:"createdAt,omitempty"`
+}
+
+func (s *RegistryServiceStatus) Address() string {
+ return fmt.Sprintf("%s.%s.svc:%s", s.ServiceName, s.ServiceNamespace, s.Port)
+}
+
+type GRPCConnectionState struct {
+ Address string `json:"address,omitempty"`
+ LastObservedState string `json:"lastObservedState"`
+ LastConnectTime metav1.Time `json:"lastConnect,omitempty"`
+}
+
+type CatalogSourceStatus struct {
+ // A human readable message indicating details about why the CatalogSource is in this condition.
+ // +optional
+ Message string `json:"message,omitempty"`
+ // Reason is the reason the CatalogSource was transitioned to its current state.
+ // +optional
+ Reason ConditionReason `json:"reason,omitempty"`
+
+ // The last time the CatalogSource image registry has been polled to ensure the image is up-to-date
+ LatestImageRegistryPoll *metav1.Time `json:"latestImageRegistryPoll,omitempty"`
+
+ // ConfigMapReference (deprecated) is the reference to the ConfigMap containing the catalog source's configuration, when the catalog source is a ConfigMap
+ ConfigMapResource *ConfigMapResourceReference `json:"configMapReference,omitempty"`
+ // RegistryService represents the current state of the GRPC service used to serve the catalog
+ RegistryServiceStatus *RegistryServiceStatus `json:"registryService,omitempty"`
+ // ConnectionState represents the current state of the CatalogSource's connection to the registry
+ GRPCConnectionState *GRPCConnectionState `json:"connectionState,omitempty"`
+
+ // Represents the state of a CatalogSource. Note that Message and Reason represent the original
+ // status information, which may be migrated to be conditions based in the future. Any new features
+ // introduced will use conditions.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+}
+
+type ConfigMapResourceReference struct {
+ Name string `json:"name"`
+ Namespace string `json:"namespace"`
+ UID types.UID `json:"uid,omitempty"`
+ ResourceVersion string `json:"resourceVersion,omitempty"`
+ LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
+}
+
+func (r *ConfigMapResourceReference) IsAMatch(object *metav1.ObjectMeta) bool {
+ return r.UID == object.GetUID() && r.ResourceVersion == object.GetResourceVersion()
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +kubebuilder:resource:shortName=catsrc,categories=olm
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Display",type=string,JSONPath=`.spec.displayName`,description="The pretty name of the catalog"
+// +kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.spec.sourceType`,description="The type of the catalog"
+// +kubebuilder:printcolumn:name="Publisher",type=string,JSONPath=`.spec.publisher`,description="The publisher of the catalog"
+// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
+
+// CatalogSource is a repository of CSVs, CRDs, and operator packages.
+type CatalogSource struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec CatalogSourceSpec `json:"spec"`
+ // +optional
+ Status CatalogSourceStatus `json:"status"`
+}
+
+func (c *CatalogSource) Address() string {
+ if c.Spec.Address != "" {
+ return c.Spec.Address
+ }
+ return c.Status.RegistryServiceStatus.Address()
+}
+
+func (c *CatalogSource) SetError(reason ConditionReason, err error) {
+ c.Status.Reason = reason
+ c.Status.Message = ""
+ if err != nil {
+ c.Status.Message = err.Error()
+ }
+}
+
+func (c *CatalogSource) SetLastUpdateTime() {
+ now := metav1.Now()
+ c.Status.LatestImageRegistryPoll = &now
+}
+
+// Check if it is time to update based on polling setting
+func (c *CatalogSource) Update() bool {
+ if !c.Poll() {
+ return false
+ }
+ interval := c.Spec.UpdateStrategy.Interval.Duration
+ latest := c.Status.LatestImageRegistryPoll
+ if latest == nil {
+ logrus.WithField("CatalogSource", c.Name).Debugf("latest poll %v", latest)
+ } else {
+ logrus.WithField("CatalogSource", c.Name).Debugf("latest poll %v", *c.Status.LatestImageRegistryPoll)
+ }
+
+ if c.Status.LatestImageRegistryPoll.IsZero() {
+ logrus.WithField("CatalogSource", c.Name).Debugf("creation timestamp plus interval before now %t", c.CreationTimestamp.Add(interval).Before(time.Now()))
+ if c.CreationTimestamp.Add(interval).Before(time.Now()) {
+ return true
+ }
+ } else {
+ logrus.WithField("CatalogSource", c.Name).Debugf("latest poll plus interval before now %t", c.Status.LatestImageRegistryPoll.Add(interval).Before(time.Now()))
+ if c.Status.LatestImageRegistryPoll.Add(interval).Before(time.Now()) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Poll determines whether the polling feature is enabled on the particular catalog source
+func (c *CatalogSource) Poll() bool {
+ if c.Spec.UpdateStrategy == nil {
+ return false
+ }
+ // if polling interval is zero polling will not be done
+ if c.Spec.UpdateStrategy.RegistryPoll == nil {
+ return false
+ }
+ // if catalog source is not backed by an image polling will not be done
+ if c.Spec.Image == "" {
+ return false
+ }
+ // if image is not type gRPC polling will not be done
+ if c.Spec.SourceType != SourceTypeGrpc {
+ return false
+ }
+ return true
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CatalogSourceList is a repository of CSVs, CRDs, and operator packages.
+type CatalogSourceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []CatalogSource `json:"items"`
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go
new file mode 100644
index 000000000000..a4c8d1746960
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go
@@ -0,0 +1,215 @@
+package v1alpha1
+
+import (
+ "fmt"
+
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/tools/record"
+)
+
+const (
+ CopiedLabelKey = "olm.copiedFrom"
+
+ // ConditionsLengthLimit is the maximum length of Status.Conditions of a
+ // given ClusterServiceVersion object. The oldest condition(s) are removed
+ // from the list as it grows over time to keep it at limit.
+ ConditionsLengthLimit = 20
+)
+
+// obsoleteReasons are the set of reasons that mean a CSV should no longer be processed as active
+var obsoleteReasons = map[ConditionReason]struct{}{
+ CSVReasonReplaced: {},
+ CSVReasonBeingReplaced: {},
+}
+
+// uncopiableReasons are the set of reasons that should prevent a CSV from being copied to target namespaces
+var uncopiableReasons = map[ConditionReason]struct{}{
+ CSVReasonCopied: {},
+ CSVReasonInvalidInstallModes: {},
+ CSVReasonNoTargetNamespaces: {},
+ CSVReasonUnsupportedOperatorGroup: {},
+ CSVReasonNoOperatorGroup: {},
+ CSVReasonTooManyOperatorGroups: {},
+ CSVReasonInterOperatorGroupOwnerConflict: {},
+ CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs: {},
+}
+
+// safeToAnnotateOperatorGroupReasons are the set of reasons that it's safe to attempt to update the operatorgroup
+// annotations
+var safeToAnnotateOperatorGroupReasons = map[ConditionReason]struct{}{
+ CSVReasonOwnerConflict: {},
+ CSVReasonInstallSuccessful: {},
+ CSVReasonInvalidInstallModes: {},
+ CSVReasonNoTargetNamespaces: {},
+ CSVReasonUnsupportedOperatorGroup: {},
+ CSVReasonNoOperatorGroup: {},
+ CSVReasonTooManyOperatorGroups: {},
+ CSVReasonInterOperatorGroupOwnerConflict: {},
+ CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs: {},
+}
+
+// SetPhaseWithEventIfChanged emits a Kubernetes event with details of a phase change and sets the current phase if phase, reason, or message would changed
+func (c *ClusterServiceVersion) SetPhaseWithEventIfChanged(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time, recorder record.EventRecorder) {
+ if c.Status.Phase == phase && c.Status.Reason == reason && c.Status.Message == message {
+ return
+ }
+
+ c.SetPhaseWithEvent(phase, reason, message, now, recorder)
+}
+
+// SetPhaseWithEvent generates a Kubernetes event with details about the phase change and sets the current phase
+func (c *ClusterServiceVersion) SetPhaseWithEvent(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time, recorder record.EventRecorder) {
+ var eventtype string
+ if phase == CSVPhaseFailed {
+ eventtype = v1.EventTypeWarning
+ } else {
+ eventtype = v1.EventTypeNormal
+ }
+ go recorder.Event(c, eventtype, string(reason), message)
+ c.SetPhase(phase, reason, message, now)
+}
+
+// SetPhase sets the current phase and adds a condition if necessary
+func (c *ClusterServiceVersion) SetPhase(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time) {
+ newCondition := func() ClusterServiceVersionCondition {
+ return ClusterServiceVersionCondition{
+ Phase: c.Status.Phase,
+ LastTransitionTime: c.Status.LastTransitionTime,
+ LastUpdateTime: c.Status.LastUpdateTime,
+ Message: message,
+ Reason: reason,
+ }
+ }
+
+ defer c.TrimConditionsIfLimitExceeded()
+
+ c.Status.LastUpdateTime = now
+ if c.Status.Phase != phase {
+ c.Status.Phase = phase
+ c.Status.LastTransitionTime = now
+ }
+ c.Status.Message = message
+ c.Status.Reason = reason
+ if len(c.Status.Conditions) == 0 {
+ c.Status.Conditions = append(c.Status.Conditions, newCondition())
+ return
+ }
+
+ previousCondition := c.Status.Conditions[len(c.Status.Conditions)-1]
+ if previousCondition.Phase != c.Status.Phase || previousCondition.Reason != c.Status.Reason {
+ c.Status.Conditions = append(c.Status.Conditions, newCondition())
+ }
+}
+
+// SetRequirementStatus adds the status of all requirements to the CSV status
+func (c *ClusterServiceVersion) SetRequirementStatus(statuses []RequirementStatus) {
+ c.Status.RequirementStatus = statuses
+}
+
+// IsObsolete returns if this CSV is being replaced or is marked for deletion
+func (c *ClusterServiceVersion) IsObsolete() bool {
+ for _, condition := range c.Status.Conditions {
+ _, ok := obsoleteReasons[condition.Reason]
+ if ok {
+ return true
+ }
+ }
+ return false
+}
+
+// IsCopied returns true if the CSV has been copied and false otherwise.
+func (c *ClusterServiceVersion) IsCopied() bool {
+ return c.Status.Reason == CSVReasonCopied || IsCopied(c)
+}
+
+func IsCopied(o metav1.Object) bool {
+ annotations := o.GetAnnotations()
+ if annotations != nil {
+ operatorNamespace, ok := annotations[OperatorGroupNamespaceAnnotationKey]
+ if ok && o.GetNamespace() != operatorNamespace {
+ return true
+ }
+ }
+
+ if labels := o.GetLabels(); labels != nil {
+ if _, ok := labels[CopiedLabelKey]; ok {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *ClusterServiceVersion) IsUncopiable() bool {
+ if c.Status.Phase == CSVPhaseNone {
+ return true
+ }
+ _, ok := uncopiableReasons[c.Status.Reason]
+ return ok
+}
+
+func (c *ClusterServiceVersion) IsSafeToUpdateOperatorGroupAnnotations() bool {
+ _, ok := safeToAnnotateOperatorGroupReasons[c.Status.Reason]
+ return ok
+}
+
+// NewInstallModeSet returns an InstallModeSet instantiated from the given list of InstallModes.
+// If the given list is not a set, an error is returned.
+func NewInstallModeSet(modes []InstallMode) (InstallModeSet, error) {
+ set := InstallModeSet{}
+ for _, mode := range modes {
+ if _, exists := set[mode.Type]; exists {
+ return nil, fmt.Errorf("InstallMode list contains duplicates, cannot make set: %v", modes)
+ }
+ set[mode.Type] = mode.Supported
+ }
+
+ return set, nil
+}
+
+// Supports returns an error if the InstallModeSet does not support configuration for
+// the given operatorNamespace and list of target namespaces.
+func (set InstallModeSet) Supports(operatorNamespace string, namespaces []string) error {
+ numNamespaces := len(namespaces)
+ switch {
+ case numNamespaces == 0:
+ return fmt.Errorf("operatorgroup has invalid selected namespaces, cannot configure to watch zero namespaces")
+ case numNamespaces == 1:
+ switch namespaces[0] {
+ case operatorNamespace:
+ if !set[InstallModeTypeOwnNamespace] {
+ return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch own namespace", InstallModeTypeOwnNamespace)
+ }
+ case v1.NamespaceAll:
+ if !set[InstallModeTypeAllNamespaces] {
+ return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch all namespaces", InstallModeTypeAllNamespaces)
+ }
+ default:
+ if !set[InstallModeTypeSingleNamespace] {
+ return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch one namespace", InstallModeTypeSingleNamespace)
+ }
+ }
+ case numNamespaces > 1 && !set[InstallModeTypeMultiNamespace]:
+ return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch %d namespaces", InstallModeTypeMultiNamespace, numNamespaces)
+ case numNamespaces > 1:
+ for _, namespace := range namespaces {
+ if namespace == operatorNamespace && !set[InstallModeTypeOwnNamespace] {
+ return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch own namespace", InstallModeTypeOwnNamespace)
+ }
+ if namespace == v1.NamespaceAll {
+ return fmt.Errorf("operatorgroup has invalid selected namespaces, NamespaceAll found when |selected namespaces| > 1")
+ }
+ }
+ }
+
+ return nil
+}
+
+func (c *ClusterServiceVersion) TrimConditionsIfLimitExceeded() {
+ if len(c.Status.Conditions) <= ConditionsLengthLimit {
+ return
+ }
+
+ firstIndex := len(c.Status.Conditions) - ConditionsLengthLimit
+ c.Status.Conditions = c.Status.Conditions[firstIndex:len(c.Status.Conditions)]
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go
new file mode 100644
index 000000000000..3e6d3248037e
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go
@@ -0,0 +1,737 @@
+package v1alpha1
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ appsv1 "k8s.io/api/apps/v1"
+ rbac "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/intstr"
+
+ "github.com/operator-framework/api/pkg/lib/version"
+)
+
+const (
+ ClusterServiceVersionAPIVersion = GroupName + "/" + GroupVersion
+ ClusterServiceVersionKind = "ClusterServiceVersion"
+ OperatorGroupNamespaceAnnotationKey = "olm.operatorNamespace"
+ InstallStrategyNameDeployment = "deployment"
+ SkipRangeAnnotationKey = "olm.skipRange"
+)
+
+// InstallModeType is a supported type of install mode for CSV installation
+type InstallModeType string
+
+const (
+ // InstallModeTypeOwnNamespace indicates that the operator can be a member of an `OperatorGroup` that selects its own namespace.
+ InstallModeTypeOwnNamespace InstallModeType = "OwnNamespace"
+ // InstallModeTypeSingleNamespace indicates that the operator can be a member of an `OperatorGroup` that selects one namespace.
+ InstallModeTypeSingleNamespace InstallModeType = "SingleNamespace"
+ // InstallModeTypeMultiNamespace indicates that the operator can be a member of an `OperatorGroup` that selects more than one namespace.
+ InstallModeTypeMultiNamespace InstallModeType = "MultiNamespace"
+ // InstallModeTypeAllNamespaces indicates that the operator can be a member of an `OperatorGroup` that selects all namespaces (target namespace set is the empty string "").
+ InstallModeTypeAllNamespaces InstallModeType = "AllNamespaces"
+)
+
+// InstallMode associates an InstallModeType with a flag representing if the CSV supports it
+// +k8s:openapi-gen=true
+type InstallMode struct {
+ Type InstallModeType `json:"type"`
+ Supported bool `json:"supported"`
+}
+
+// InstallModeSet is a mapping of unique InstallModeTypes to whether they are supported.
+type InstallModeSet map[InstallModeType]bool
+
+// NamedInstallStrategy represents the block of an ClusterServiceVersion resource
+// where the install strategy is specified.
+// +k8s:openapi-gen=true
+type NamedInstallStrategy struct {
+ StrategyName string `json:"strategy"`
+ StrategySpec StrategyDetailsDeployment `json:"spec,omitempty"`
+}
+
+// StrategyDeploymentPermissions describe the rbac rules and service account needed by the install strategy
+// +k8s:openapi-gen=true
+type StrategyDeploymentPermissions struct {
+ ServiceAccountName string `json:"serviceAccountName"`
+ Rules []rbac.PolicyRule `json:"rules"`
+}
+
+// StrategyDeploymentSpec contains the name, spec and labels for the deployment ALM should create
+// +k8s:openapi-gen=true
+type StrategyDeploymentSpec struct {
+ Name string `json:"name"`
+ Spec appsv1.DeploymentSpec `json:"spec"`
+ Label labels.Set `json:"label,omitempty"`
+}
+
+// StrategyDetailsDeployment represents the parsed details of a Deployment
+// InstallStrategy.
+// +k8s:openapi-gen=true
+type StrategyDetailsDeployment struct {
+ DeploymentSpecs []StrategyDeploymentSpec `json:"deployments"`
+ Permissions []StrategyDeploymentPermissions `json:"permissions,omitempty"`
+ ClusterPermissions []StrategyDeploymentPermissions `json:"clusterPermissions,omitempty"`
+}
+
+func (d *StrategyDetailsDeployment) GetStrategyName() string {
+ return InstallStrategyNameDeployment
+}
+
+// StatusDescriptor describes a field in a status block of a CRD so that OLM can consume it
+// +k8s:openapi-gen=true
+type StatusDescriptor struct {
+ Path string `json:"path"`
+ DisplayName string `json:"displayName,omitempty"`
+ Description string `json:"description,omitempty"`
+ XDescriptors []string `json:"x-descriptors,omitempty"`
+ Value json.RawMessage `json:"value,omitempty"`
+}
+
+// SpecDescriptor describes a field in a spec block of a CRD so that OLM can consume it
+// +k8s:openapi-gen=true
+type SpecDescriptor struct {
+ Path string `json:"path"`
+ DisplayName string `json:"displayName,omitempty"`
+ Description string `json:"description,omitempty"`
+ XDescriptors []string `json:"x-descriptors,omitempty"`
+ Value json.RawMessage `json:"value,omitempty"`
+}
+
+// ActionDescriptor describes a declarative action that can be performed on a custom resource instance
+// +k8s:openapi-gen=true
+type ActionDescriptor struct {
+ Path string `json:"path"`
+ DisplayName string `json:"displayName,omitempty"`
+ Description string `json:"description,omitempty"`
+ XDescriptors []string `json:"x-descriptors,omitempty"`
+ Value json.RawMessage `json:"value,omitempty"`
+}
+
+// CRDDescription provides details to OLM about the CRDs
+// +k8s:openapi-gen=true
+type CRDDescription struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Kind string `json:"kind"`
+ DisplayName string `json:"displayName,omitempty"`
+ Description string `json:"description,omitempty"`
+ Resources []APIResourceReference `json:"resources,omitempty"`
+ StatusDescriptors []StatusDescriptor `json:"statusDescriptors,omitempty"`
+ SpecDescriptors []SpecDescriptor `json:"specDescriptors,omitempty"`
+ ActionDescriptor []ActionDescriptor `json:"actionDescriptors,omitempty"`
+}
+
+// APIServiceDescription provides details to OLM about apis provided via aggregation
+// +k8s:openapi-gen=true
+type APIServiceDescription struct {
+ Name string `json:"name"`
+ Group string `json:"group"`
+ Version string `json:"version"`
+ Kind string `json:"kind"`
+ DeploymentName string `json:"deploymentName,omitempty"`
+ ContainerPort int32 `json:"containerPort,omitempty"`
+ DisplayName string `json:"displayName,omitempty"`
+ Description string `json:"description,omitempty"`
+ Resources []APIResourceReference `json:"resources,omitempty"`
+ StatusDescriptors []StatusDescriptor `json:"statusDescriptors,omitempty"`
+ SpecDescriptors []SpecDescriptor `json:"specDescriptors,omitempty"`
+ ActionDescriptor []ActionDescriptor `json:"actionDescriptors,omitempty"`
+}
+
+// APIResourceReference is a reference to a Kubernetes resource type that the referrer utilizes.
+// +k8s:openapi-gen=true
+type APIResourceReference struct {
+ // Plural name of the referenced resource type (CustomResourceDefinition.Spec.Names[].Plural). Empty string if the referenced resource type is not a custom resource.
+ Name string `json:"name"`
+ // Kind of the referenced resource type.
+ Kind string `json:"kind"`
+ // API Version of the referenced resource type.
+ Version string `json:"version"`
+}
+
+// GetName returns the name of an APIService as derived from its group and version.
+func (d APIServiceDescription) GetName() string {
+ return fmt.Sprintf("%s.%s", d.Version, d.Group)
+}
+
+// WebhookAdmissionType is the type of admission webhooks supported by OLM
+type WebhookAdmissionType string
+
+const (
+ // ValidatingAdmissionWebhook is for validating admission webhooks
+ ValidatingAdmissionWebhook WebhookAdmissionType = "ValidatingAdmissionWebhook"
+ // MutatingAdmissionWebhook is for mutating admission webhooks
+ MutatingAdmissionWebhook WebhookAdmissionType = "MutatingAdmissionWebhook"
+ // ConversionWebhook is for conversion webhooks
+ ConversionWebhook WebhookAdmissionType = "ConversionWebhook"
+)
+
+// WebhookDescription provides details to OLM about required webhooks
+// +k8s:openapi-gen=true
+type WebhookDescription struct {
+ GenerateName string `json:"generateName"`
+ // +kubebuilder:validation:Enum=ValidatingAdmissionWebhook;MutatingAdmissionWebhook;ConversionWebhook
+ Type WebhookAdmissionType `json:"type"`
+ DeploymentName string `json:"deploymentName,omitempty"`
+ // +kubebuilder:validation:Maximum=65535
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:default=443
+ ContainerPort int32 `json:"containerPort,omitempty"`
+ TargetPort *intstr.IntOrString `json:"targetPort,omitempty"`
+ Rules []admissionregistrationv1.RuleWithOperations `json:"rules,omitempty"`
+ FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
+ ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty"`
+ SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects"`
+ TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
+ AdmissionReviewVersions []string `json:"admissionReviewVersions"`
+ ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
+ WebhookPath *string `json:"webhookPath,omitempty"`
+ ConversionCRDs []string `json:"conversionCRDs,omitempty"`
+}
+
+// GetValidatingWebhook returns a ValidatingWebhook generated from the WebhookDescription
+func (w *WebhookDescription) GetValidatingWebhook(namespace string, namespaceSelector *metav1.LabelSelector, caBundle []byte) admissionregistrationv1.ValidatingWebhook {
+ return admissionregistrationv1.ValidatingWebhook{
+ Name: w.GenerateName,
+ Rules: w.Rules,
+ FailurePolicy: w.FailurePolicy,
+ MatchPolicy: w.MatchPolicy,
+ NamespaceSelector: namespaceSelector,
+ ObjectSelector: w.ObjectSelector,
+ SideEffects: w.SideEffects,
+ TimeoutSeconds: w.TimeoutSeconds,
+ AdmissionReviewVersions: w.AdmissionReviewVersions,
+ ClientConfig: admissionregistrationv1.WebhookClientConfig{
+ Service: &admissionregistrationv1.ServiceReference{
+ Name: w.DomainName() + "-service",
+ Namespace: namespace,
+ Path: w.WebhookPath,
+ Port: &w.ContainerPort,
+ },
+ CABundle: caBundle,
+ },
+ }
+}
+
+// GetMutatingWebhook returns a MutatingWebhook generated from the WebhookDescription
+func (w *WebhookDescription) GetMutatingWebhook(namespace string, namespaceSelector *metav1.LabelSelector, caBundle []byte) admissionregistrationv1.MutatingWebhook {
+ return admissionregistrationv1.MutatingWebhook{
+ Name: w.GenerateName,
+ Rules: w.Rules,
+ FailurePolicy: w.FailurePolicy,
+ MatchPolicy: w.MatchPolicy,
+ NamespaceSelector: namespaceSelector,
+ ObjectSelector: w.ObjectSelector,
+ SideEffects: w.SideEffects,
+ TimeoutSeconds: w.TimeoutSeconds,
+ AdmissionReviewVersions: w.AdmissionReviewVersions,
+ ClientConfig: admissionregistrationv1.WebhookClientConfig{
+ Service: &admissionregistrationv1.ServiceReference{
+ Name: w.DomainName() + "-service",
+ Namespace: namespace,
+ Path: w.WebhookPath,
+ Port: &w.ContainerPort,
+ },
+ CABundle: caBundle,
+ },
+ ReinvocationPolicy: w.ReinvocationPolicy,
+ }
+}
+
+// DomainName returns the result of replacing all periods in the given Webhook name with hyphens
+func (w *WebhookDescription) DomainName() string {
+ // Replace all '.'s with "-"s to convert to a DNS-1035 label
+ return strings.Replace(w.DeploymentName, ".", "-", -1)
+}
+
+// CustomResourceDefinitions declares all of the CRDs managed or required by
+// an operator being ran by ClusterServiceVersion.
+//
+// If the CRD is present in the Owned list, it is implicitly required.
+// +k8s:openapi-gen=true
+type CustomResourceDefinitions struct {
+ Owned []CRDDescription `json:"owned,omitempty"`
+ Required []CRDDescription `json:"required,omitempty"`
+}
+
+// APIServiceDefinitions declares all of the extension apis managed or required by
+// an operator being ran by ClusterServiceVersion.
+// +k8s:openapi-gen=true
+type APIServiceDefinitions struct {
+ Owned []APIServiceDescription `json:"owned,omitempty"`
+ Required []APIServiceDescription `json:"required,omitempty"`
+}
+
+// ClusterServiceVersionSpec declarations tell OLM how to install an operator
+// that can manage apps for a given version.
+// +k8s:openapi-gen=true
+type ClusterServiceVersionSpec struct {
+ InstallStrategy NamedInstallStrategy `json:"install"`
+ Version version.OperatorVersion `json:"version,omitempty"`
+ Maturity string `json:"maturity,omitempty"`
+ CustomResourceDefinitions CustomResourceDefinitions `json:"customresourcedefinitions,omitempty"`
+ APIServiceDefinitions APIServiceDefinitions `json:"apiservicedefinitions,omitempty"`
+ WebhookDefinitions []WebhookDescription `json:"webhookdefinitions,omitempty"`
+ NativeAPIs []metav1.GroupVersionKind `json:"nativeAPIs,omitempty"`
+ MinKubeVersion string `json:"minKubeVersion,omitempty"`
+
+ // The name of the operator in display format.
+ DisplayName string `json:"displayName"`
+
+ // Description of the operator. Can include the features, limitations or use-cases of the
+ // operator.
+ // +optional
+ Description string `json:"description,omitempty"`
+
+ // A list of keywords describing the operator.
+ // +optional
+ Keywords []string `json:"keywords,omitempty"`
+
+ // A list of organizational entities maintaining the operator.
+ // +optional
+ Maintainers []Maintainer `json:"maintainers,omitempty"`
+
+ // The publishing entity behind the operator.
+ // +optional
+ Provider AppLink `json:"provider,omitempty"`
+
+ // A list of links related to the operator.
+ // +optional
+ Links []AppLink `json:"links,omitempty"`
+
+ // The icon for this operator.
+ // +optional
+ Icon []Icon `json:"icon,omitempty"`
+
+ // InstallModes specify supported installation types
+ // +optional
+ InstallModes []InstallMode `json:"installModes,omitempty"`
+
+ // The name of a CSV this one replaces. Should match the `metadata.Name` field of the old CSV.
+ // +optional
+ Replaces string `json:"replaces,omitempty"`
+
+ // Map of string keys and values that can be used to organize and categorize
+ // (scope and select) objects.
+ // +optional
+ Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
+
+ // Annotations is an unstructured key value map stored with a resource that may be
+ // set by external tools to store and retrieve arbitrary metadata.
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
+
+ // Label selector for related resources.
+ // +optional
+ Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+
+ // Cleanup specifies the cleanup behaviour when the CSV gets deleted
+ // +optional
+ Cleanup CleanupSpec `json:"cleanup,omitempty"`
+
+ // The name(s) of one or more CSV(s) that should be skipped in the upgrade graph.
+ // Should match the `metadata.Name` field of the CSV that should be skipped.
+ // This field is only used during catalog creation and plays no part in cluster runtime.
+ // +optional
+ Skips []string `json:"skips,omitempty"`
+
+ // List any related images, or other container images that your Operator might require to perform their functions.
+ // This list should also include operand images as well. All image references should be specified by
+ // digest (SHA) and not by tag. This field is only used during catalog creation and plays no part in cluster runtime.
+ // +optional
+ RelatedImages []RelatedImage `json:"relatedImages,omitempty"`
+}
+
+// +k8s:openapi-gen=true
+type CleanupSpec struct {
+ Enabled bool `json:"enabled"`
+}
+
+// +k8s:openapi-gen=true
+type Maintainer struct {
+ Name string `json:"name,omitempty"`
+ Email string `json:"email,omitempty"`
+}
+
+// +k8s:openapi-gen=true
+type AppLink struct {
+ Name string `json:"name,omitempty"`
+ URL string `json:"url,omitempty"`
+}
+
+// +k8s:openapi-gen=true
+type Icon struct {
+ Data string `json:"base64data"`
+ MediaType string `json:"mediatype"`
+}
+
+// +k8s:openapi-gen=true
+type RelatedImage struct {
+ Name string `json:"name"`
+ Image string `json:"image"`
+}
+
+// ClusterServiceVersionPhase is a label for the condition of a ClusterServiceVersion at the current time.
+type ClusterServiceVersionPhase string
+
+// These are the valid phases of ClusterServiceVersion
+const (
+ CSVPhaseNone = ""
+ // CSVPhasePending means the csv has been accepted by the system, but the install strategy has not been attempted.
+ // This is likely because there are unmet requirements.
+ CSVPhasePending ClusterServiceVersionPhase = "Pending"
+ // CSVPhaseInstallReady means that the requirements are met but the install strategy has not been run.
+ CSVPhaseInstallReady ClusterServiceVersionPhase = "InstallReady"
+ // CSVPhaseInstalling means that the install strategy has been initiated but not completed.
+ CSVPhaseInstalling ClusterServiceVersionPhase = "Installing"
+ // CSVPhaseSucceeded means that the resources in the CSV were created successfully.
+ CSVPhaseSucceeded ClusterServiceVersionPhase = "Succeeded"
+ // CSVPhaseFailed means that the install strategy could not be successfully completed.
+ CSVPhaseFailed ClusterServiceVersionPhase = "Failed"
+ // CSVPhaseUnknown means that for some reason the state of the csv could not be obtained.
+ CSVPhaseUnknown ClusterServiceVersionPhase = "Unknown"
+ // CSVPhaseReplacing means that a newer CSV has been created and the csv's resources will be transitioned to a new owner.
+ CSVPhaseReplacing ClusterServiceVersionPhase = "Replacing"
+ // CSVPhaseDeleting means that a CSV has been replaced by a new one and will be checked for safety before being deleted
+ CSVPhaseDeleting ClusterServiceVersionPhase = "Deleting"
+ // CSVPhaseAny matches all other phases in CSV queries
+ CSVPhaseAny ClusterServiceVersionPhase = ""
+)
+
+// ConditionReason is a camelcased reason for the state transition
+type ConditionReason string
+
+const (
+ CSVReasonRequirementsUnknown ConditionReason = "RequirementsUnknown"
+ CSVReasonRequirementsNotMet ConditionReason = "RequirementsNotMet"
+ CSVReasonRequirementsMet ConditionReason = "AllRequirementsMet"
+ CSVReasonOwnerConflict ConditionReason = "OwnerConflict"
+ CSVReasonComponentFailed ConditionReason = "InstallComponentFailed"
+ CSVReasonComponentFailedNoRetry ConditionReason = "InstallComponentFailedNoRetry"
+ CSVReasonInvalidStrategy ConditionReason = "InvalidInstallStrategy"
+ CSVReasonWaiting ConditionReason = "InstallWaiting"
+ CSVReasonInstallSuccessful ConditionReason = "InstallSucceeded"
+ CSVReasonInstallCheckFailed ConditionReason = "InstallCheckFailed"
+ CSVReasonComponentUnhealthy ConditionReason = "ComponentUnhealthy"
+ CSVReasonBeingReplaced ConditionReason = "BeingReplaced"
+ CSVReasonReplaced ConditionReason = "Replaced"
+ CSVReasonNeedsReinstall ConditionReason = "NeedsReinstall"
+ CSVReasonNeedsCertRotation ConditionReason = "NeedsCertRotation"
+ CSVReasonAPIServiceResourceIssue ConditionReason = "APIServiceResourceIssue"
+ CSVReasonAPIServiceResourcesNeedReinstall ConditionReason = "APIServiceResourcesNeedReinstall"
+ CSVReasonAPIServiceInstallFailed ConditionReason = "APIServiceInstallFailed"
+ CSVReasonCopied ConditionReason = "Copied"
+ CSVReasonInvalidInstallModes ConditionReason = "InvalidInstallModes"
+ CSVReasonNoTargetNamespaces ConditionReason = "NoTargetNamespaces"
+ CSVReasonUnsupportedOperatorGroup ConditionReason = "UnsupportedOperatorGroup"
+ CSVReasonNoOperatorGroup ConditionReason = "NoOperatorGroup"
+ CSVReasonTooManyOperatorGroups ConditionReason = "TooManyOperatorGroups"
+ CSVReasonInterOperatorGroupOwnerConflict ConditionReason = "InterOperatorGroupOwnerConflict"
+ CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs ConditionReason = "CannotModifyStaticOperatorGroupProvidedAPIs"
+ CSVReasonDetectedClusterChange ConditionReason = "DetectedClusterChange"
+ CSVReasonInvalidWebhookDescription ConditionReason = "InvalidWebhookDescription"
+ CSVReasonOperatorConditionNotUpgradeable ConditionReason = "OperatorConditionNotUpgradeable"
+ CSVReasonWaitingForCleanupToComplete ConditionReason = "WaitingOnCleanup"
+)
+
+// HasCaResources returns true if the CSV has owned APIServices or Webhooks.
+func (c *ClusterServiceVersion) HasCAResources() bool {
+ // Return early if there are no owned APIServices
+ if len(c.Spec.APIServiceDefinitions.Owned)+len(c.Spec.WebhookDefinitions) == 0 {
+ return false
+ }
+ return true
+}
+
+// Conditions appear in the status as a record of state transitions on the ClusterServiceVersion
+// +k8s:openapi-gen=true
+type ClusterServiceVersionCondition struct {
+ // Condition of the ClusterServiceVersion
+ Phase ClusterServiceVersionPhase `json:"phase,omitempty"`
+ // A human readable message indicating details about why the ClusterServiceVersion is in this condition.
+ // +optional
+ Message string `json:"message,omitempty"`
+ // A brief CamelCase message indicating details about why the ClusterServiceVersion is in this state.
+ // e.g. 'RequirementsNotMet'
+ // +optional
+ Reason ConditionReason `json:"reason,omitempty"`
+ // Last time we updated the status
+ // +optional
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ // Last time the status transitioned from one status to another.
+ // +optional
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+}
+
+// OwnsCRD determines whether the current CSV owns a particular CRD.
+func (csv ClusterServiceVersion) OwnsCRD(name string) bool {
+ for _, desc := range csv.Spec.CustomResourceDefinitions.Owned {
+ if desc.Name == name {
+ return true
+ }
+ }
+
+ return false
+}
+
+// OwnsAPIService determines whether the current CSV owns a particular APIService.
+func (csv ClusterServiceVersion) OwnsAPIService(name string) bool {
+ for _, desc := range csv.Spec.APIServiceDefinitions.Owned {
+ apiServiceName := fmt.Sprintf("%s.%s", desc.Version, desc.Group)
+ if apiServiceName == name {
+ return true
+ }
+ }
+
+ return false
+}
+
+// StatusReason is a camelcased reason for the status of a RequirementStatus or DependentStatus
+type StatusReason string
+
+const (
+ RequirementStatusReasonPresent StatusReason = "Present"
+ RequirementStatusReasonNotPresent StatusReason = "NotPresent"
+ RequirementStatusReasonPresentNotSatisfied StatusReason = "PresentNotSatisfied"
+ // The CRD is present but the Established condition is False (not available)
+ RequirementStatusReasonNotAvailable StatusReason = "PresentNotAvailable"
+ DependentStatusReasonSatisfied StatusReason = "Satisfied"
+ DependentStatusReasonNotSatisfied StatusReason = "NotSatisfied"
+)
+
+// DependentStatus is the status for a dependent requirement (to prevent infinite nesting)
+// +k8s:openapi-gen=true
+type DependentStatus struct {
+ Group string `json:"group"`
+ Version string `json:"version"`
+ Kind string `json:"kind"`
+ Status StatusReason `json:"status"`
+ UUID string `json:"uuid,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+// +k8s:openapi-gen=true
+type RequirementStatus struct {
+ Group string `json:"group"`
+ Version string `json:"version"`
+ Kind string `json:"kind"`
+ Name string `json:"name"`
+ Status StatusReason `json:"status"`
+ Message string `json:"message"`
+ UUID string `json:"uuid,omitempty"`
+ Dependents []DependentStatus `json:"dependents,omitempty"`
+}
+
+// ClusterServiceVersionStatus represents information about the status of a CSV. Status may trail the actual
+// state of a system.
+// +k8s:openapi-gen=true
+type ClusterServiceVersionStatus struct {
+ // Current condition of the ClusterServiceVersion
+ Phase ClusterServiceVersionPhase `json:"phase,omitempty"`
+ // A human readable message indicating details about why the ClusterServiceVersion is in this condition.
+ // +optional
+ Message string `json:"message,omitempty"`
+ // A brief CamelCase message indicating details about why the ClusterServiceVersion is in this state.
+ // e.g. 'RequirementsNotMet'
+ // +optional
+ Reason ConditionReason `json:"reason,omitempty"`
+ // Last time we updated the status
+ // +optional
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ // Last time the status transitioned from one status to another.
+ // +optional
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // List of conditions, a history of state transitions
+ Conditions []ClusterServiceVersionCondition `json:"conditions,omitempty"`
+ // The status of each requirement for this CSV
+ RequirementStatus []RequirementStatus `json:"requirementStatus,omitempty"`
+ // Last time the owned APIService certs were updated
+ // +optional
+ CertsLastUpdated *metav1.Time `json:"certsLastUpdated,omitempty"`
+ // Time the owned APIService certs will rotate next
+ // +optional
+ CertsRotateAt *metav1.Time `json:"certsRotateAt,omitempty"`
+ // CleanupStatus represents information about the status of cleanup while a CSV is pending deletion
+ // +optional
+ Cleanup CleanupStatus `json:"cleanup,omitempty"`
+}
+
+// CleanupStatus represents information about the status of cleanup while a CSV is pending deletion
+// +k8s:openapi-gen=true
+type CleanupStatus struct {
+ // PendingDeletion is the list of custom resource objects that are pending deletion and blocked on finalizers.
+ // This indicates the progress of cleanup that is blocking CSV deletion or operator uninstall.
+ // +optional
+ PendingDeletion []ResourceList `json:"pendingDeletion,omitempty"`
+}
+
+// ResourceList represents a list of resources which are of the same Group/Kind
+// +k8s:openapi-gen=true
+type ResourceList struct {
+ Group string `json:"group"`
+ Kind string `json:"kind"`
+ Instances []ResourceInstance `json:"instances"`
+}
+
+// +k8s:openapi-gen=true
+type ResourceInstance struct {
+ Name string `json:"name"`
+ // Namespace can be empty for cluster-scoped resources
+ Namespace string `json:"namespace,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +kubebuilder:storageversion
+// +kubebuilder:resource:shortName={csv, csvs},categories=olm
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Display",type=string,JSONPath=`.spec.displayName`,description="The name of the CSV"
+// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`,description="The version of the CSV"
+// +kubebuilder:printcolumn:name="Replaces",type=string,JSONPath=`.spec.replaces`,description="The name of a CSV that this one replaces"
+// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
+
+// ClusterServiceVersion is a Custom Resource of type `ClusterServiceVersionSpec`.
+type ClusterServiceVersion struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec ClusterServiceVersionSpec `json:"spec"`
+ // +optional
+ Status ClusterServiceVersionStatus `json:"status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterServiceVersionList represents a list of ClusterServiceVersions.
+type ClusterServiceVersionList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ClusterServiceVersion `json:"items"`
+}
+
+// GetAllCRDDescriptions returns a deduplicated set of CRDDescriptions that is
+// the union of the owned and required CRDDescriptions.
+//
+// Descriptions with the same name prefer the value in Owned.
+// Descriptions are returned in alphabetical order.
+func (csv ClusterServiceVersion) GetAllCRDDescriptions() []CRDDescription {
+ set := make(map[string]CRDDescription)
+ for _, required := range csv.Spec.CustomResourceDefinitions.Required {
+ set[required.Name] = required
+ }
+
+ for _, owned := range csv.Spec.CustomResourceDefinitions.Owned {
+ set[owned.Name] = owned
+ }
+
+ keys := make([]string, 0)
+ for key := range set {
+ keys = append(keys, key)
+ }
+ sort.StringSlice(keys).Sort()
+
+ descs := make([]CRDDescription, 0)
+ for _, key := range keys {
+ descs = append(descs, set[key])
+ }
+
+ return descs
+}
+
+// GetAllAPIServiceDescriptions returns a deduplicated set of APIServiceDescriptions that is
+// the union of the owned and required APIServiceDescriptions.
+//
+// Descriptions with the same name prefer the value in Owned.
+// Descriptions are returned in alphabetical order.
+func (csv ClusterServiceVersion) GetAllAPIServiceDescriptions() []APIServiceDescription {
+ set := make(map[string]APIServiceDescription)
+ for _, required := range csv.Spec.APIServiceDefinitions.Required {
+ name := fmt.Sprintf("%s.%s", required.Version, required.Group)
+ set[name] = required
+ }
+
+ for _, owned := range csv.Spec.APIServiceDefinitions.Owned {
+ name := fmt.Sprintf("%s.%s", owned.Version, owned.Group)
+ set[name] = owned
+ }
+
+ keys := make([]string, 0)
+ for key := range set {
+ keys = append(keys, key)
+ }
+ sort.StringSlice(keys).Sort()
+
+ descs := make([]APIServiceDescription, 0)
+ for _, key := range keys {
+ descs = append(descs, set[key])
+ }
+
+ return descs
+}
+
+// GetRequiredAPIServiceDescriptions returns a deduplicated set of required APIServiceDescriptions
+// with the intersection of required and owned removed
+// Equivalent to the set subtraction required - owned
+//
+// Descriptions are returned in alphabetical order.
+func (csv ClusterServiceVersion) GetRequiredAPIServiceDescriptions() []APIServiceDescription {
+ set := make(map[string]APIServiceDescription)
+ for _, required := range csv.Spec.APIServiceDefinitions.Required {
+ name := fmt.Sprintf("%s.%s", required.Version, required.Group)
+ set[name] = required
+ }
+
+ // Remove any shared owned from the set
+ for _, owned := range csv.Spec.APIServiceDefinitions.Owned {
+ name := fmt.Sprintf("%s.%s", owned.Version, owned.Group)
+ if _, ok := set[name]; ok {
+ delete(set, name)
+ }
+ }
+
+ keys := make([]string, 0)
+ for key := range set {
+ keys = append(keys, key)
+ }
+ sort.StringSlice(keys).Sort()
+
+ descs := make([]APIServiceDescription, 0)
+ for _, key := range keys {
+ descs = append(descs, set[key])
+ }
+
+ return descs
+}
+
+// GetOwnedAPIServiceDescriptions returns a deduplicated set of owned APIServiceDescriptions
+//
+// Descriptions are returned in alphabetical order.
+func (csv ClusterServiceVersion) GetOwnedAPIServiceDescriptions() []APIServiceDescription {
+ set := make(map[string]APIServiceDescription)
+ for _, owned := range csv.Spec.APIServiceDefinitions.Owned {
+ name := owned.GetName()
+ set[name] = owned
+ }
+
+ keys := make([]string, 0)
+ for key := range set {
+ keys = append(keys, key)
+ }
+ sort.StringSlice(keys).Sort()
+
+ descs := make([]APIServiceDescription, 0)
+ for _, key := range keys {
+ descs = append(descs, set[key])
+ }
+
+ return descs
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go
new file mode 100644
index 000000000000..74bc9b819a40
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go
@@ -0,0 +1,6 @@
+// +groupName=operators.coreos.com
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators
+
+// Package v1alpha1 contains resources types for version v1alpha1 of the operators.coreos.com API group.
+package v1alpha1
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go
new file mode 100644
index 000000000000..09deba525b7c
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go
@@ -0,0 +1,389 @@
+package v1alpha1
+
+import (
+ "errors"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ InstallPlanKind = "InstallPlan"
+ InstallPlanAPIVersion = GroupName + "/" + GroupVersion
+)
+
+// Approval is the user approval policy for an InstallPlan.
+// It must be one of "Automatic" or "Manual".
+type Approval string
+
+const (
+ ApprovalAutomatic Approval = "Automatic"
+ ApprovalManual Approval = "Manual"
+)
+
+// InstallPlanSpec defines a set of Application resources to be installed
+type InstallPlanSpec struct {
+ CatalogSource string `json:"source,omitempty"`
+ CatalogSourceNamespace string `json:"sourceNamespace,omitempty"`
+ ClusterServiceVersionNames []string `json:"clusterServiceVersionNames"`
+ Approval Approval `json:"approval"`
+ Approved bool `json:"approved"`
+ Generation int `json:"generation,omitempty"`
+}
+
+// InstallPlanPhase is the current status of a InstallPlan as a whole.
+type InstallPlanPhase string
+
+const (
+ InstallPlanPhaseNone InstallPlanPhase = ""
+ InstallPlanPhasePlanning InstallPlanPhase = "Planning"
+ InstallPlanPhaseRequiresApproval InstallPlanPhase = "RequiresApproval"
+ InstallPlanPhaseInstalling InstallPlanPhase = "Installing"
+ InstallPlanPhaseComplete InstallPlanPhase = "Complete"
+ InstallPlanPhaseFailed InstallPlanPhase = "Failed"
+)
+
+// InstallPlanConditionType describes the state of an InstallPlan at a certain point as a whole.
+type InstallPlanConditionType string
+
+const (
+ InstallPlanResolved InstallPlanConditionType = "Resolved"
+ InstallPlanInstalled InstallPlanConditionType = "Installed"
+)
+
+// ConditionReason is a camelcased reason for the state transition.
+type InstallPlanConditionReason string
+
+const (
+ InstallPlanReasonPlanUnknown InstallPlanConditionReason = "PlanUnknown"
+ InstallPlanReasonInstallCheckFailed InstallPlanConditionReason = "InstallCheckFailed"
+ InstallPlanReasonDependencyConflict InstallPlanConditionReason = "DependenciesConflict"
+ InstallPlanReasonComponentFailed InstallPlanConditionReason = "InstallComponentFailed"
+)
+
+// StepStatus is the current status of a particular resource an in
+// InstallPlan
+type StepStatus string
+
+const (
+ StepStatusUnknown StepStatus = "Unknown"
+ StepStatusNotPresent StepStatus = "NotPresent"
+ StepStatusPresent StepStatus = "Present"
+ StepStatusCreated StepStatus = "Created"
+ StepStatusNotCreated StepStatus = "NotCreated"
+ StepStatusWaitingForAPI StepStatus = "WaitingForApi"
+ StepStatusUnsupportedResource StepStatus = "UnsupportedResource"
+)
+
+// ErrInvalidInstallPlan is the error returned by functions that operate on
+// InstallPlans when the InstallPlan does not contain totally valid data.
+var ErrInvalidInstallPlan = errors.New("the InstallPlan contains invalid data")
+
+// InstallPlanStatus represents the information about the status of
+// steps required to complete installation.
+//
+// Status may trail the actual state of a system.
+type InstallPlanStatus struct {
+ Phase InstallPlanPhase `json:"phase"`
+ Conditions []InstallPlanCondition `json:"conditions,omitempty"`
+ CatalogSources []string `json:"catalogSources"`
+ Plan []*Step `json:"plan,omitempty"`
+ // BundleLookups is the set of in-progress requests to pull and unpackage bundle content to the cluster.
+ // +optional
+ BundleLookups []BundleLookup `json:"bundleLookups,omitempty"`
+ // AttenuatedServiceAccountRef references the service account that is used
+ // to do scoped operator install.
+ AttenuatedServiceAccountRef *corev1.ObjectReference `json:"attenuatedServiceAccountRef,omitempty"`
+
+ // StartTime is the time when the controller began applying
+ // the resources listed in the plan to the cluster.
+ // +optional
+ StartTime *metav1.Time `json:"startTime,omitempty"`
+
+ // Message is a human-readable message containing detailed
+ // information that may be important to understanding why the
+ // plan has its current status.
+ // +optional
+ Message string `json:"message,omitempty"`
+}
+
+// InstallPlanCondition represents the overall status of the execution of
+// an InstallPlan.
+type InstallPlanCondition struct {
+ Type InstallPlanConditionType `json:"type,omitempty"`
+ Status corev1.ConditionStatus `json:"status,omitempty"` // True, False, or Unknown
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason InstallPlanConditionReason `json:"reason,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+// allow overwriting `now` function for deterministic tests
+var now = metav1.Now
+
+// GetCondition returns the InstallPlanCondition of the given type if it exists in the InstallPlanStatus' Conditions.
+// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found.
+func (s InstallPlanStatus) GetCondition(conditionType InstallPlanConditionType) InstallPlanCondition {
+ for _, cond := range s.Conditions {
+ if cond.Type == conditionType {
+ return cond
+ }
+ }
+
+ return InstallPlanCondition{
+ Type: conditionType,
+ Status: corev1.ConditionUnknown,
+ }
+}
+
+// SetCondition adds or updates a condition, using `Type` as merge key.
+func (s *InstallPlanStatus) SetCondition(cond InstallPlanCondition) InstallPlanCondition {
+ for i, existing := range s.Conditions {
+ if existing.Type != cond.Type {
+ continue
+ }
+ if existing.Status == cond.Status {
+ cond.LastTransitionTime = existing.LastTransitionTime
+ }
+ s.Conditions[i] = cond
+ return cond
+ }
+ s.Conditions = append(s.Conditions, cond)
+ return cond
+}
+
+func OrderSteps(steps []*Step) []*Step {
+ // CSVs must be applied first
+ csvList := []*Step{}
+
+ // CRDs must be applied second
+ crdList := []*Step{}
+
+ // Other resources may be applied in any order
+ remainingResources := []*Step{}
+ for _, step := range steps {
+ switch step.Resource.Kind {
+ case crdKind:
+ crdList = append(crdList, step)
+ case ClusterServiceVersionKind:
+ csvList = append(csvList, step)
+ default:
+ remainingResources = append(remainingResources, step)
+ }
+ }
+
+ result := make([]*Step, len(steps))
+ i := 0
+
+ for j := range csvList {
+ result[i] = csvList[j]
+ i++
+ }
+
+ for j := range crdList {
+ result[i] = crdList[j]
+ i++
+ }
+
+ for j := range remainingResources {
+ result[i] = remainingResources[j]
+ i++
+ }
+
+ return result
+}
+
+func (s InstallPlanStatus) NeedsRequeue() bool {
+ for _, step := range s.Plan {
+ switch step.Status {
+ case StepStatusWaitingForAPI:
+ return true
+ }
+ }
+
+ return false
+}
+func ConditionFailed(cond InstallPlanConditionType, reason InstallPlanConditionReason, message string, now *metav1.Time) InstallPlanCondition {
+ return InstallPlanCondition{
+ Type: cond,
+ Status: corev1.ConditionFalse,
+ Reason: reason,
+ Message: message,
+ LastUpdateTime: now,
+ LastTransitionTime: now,
+ }
+}
+
+func ConditionMet(cond InstallPlanConditionType, now *metav1.Time) InstallPlanCondition {
+ return InstallPlanCondition{
+ Type: cond,
+ Status: corev1.ConditionTrue,
+ LastUpdateTime: now,
+ LastTransitionTime: now,
+ }
+}
+
+// Step represents the status of an individual step in an InstallPlan.
+type Step struct {
+ Resolving string `json:"resolving"`
+ Resource StepResource `json:"resource"`
+ Optional bool `json:"optional,omitempty"`
+ Status StepStatus `json:"status"`
+}
+
+// BundleLookupConditionType is a category of the overall state of a BundleLookup.
+type BundleLookupConditionType string
+
+const (
+ // BundleLookupPending describes BundleLookups that are not complete.
+ BundleLookupPending BundleLookupConditionType = "BundleLookupPending"
+
+ // BundleLookupFailed describes conditions types for when BundleLookups fail
+ BundleLookupFailed BundleLookupConditionType = "BundleLookupFailed"
+
+ crdKind = "CustomResourceDefinition"
+)
+
+type BundleLookupCondition struct {
+ // Type of condition.
+ Type BundleLookupConditionType `json:"type"`
+ // Status of the condition, one of True, False, Unknown.
+ Status corev1.ConditionStatus `json:"status"`
+ // The reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ // +optional
+ Message string `json:"message,omitempty"`
+ // Last time the condition was probed.
+ // +optional
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+}
+
+// BundleLookup is a request to pull and unpackage the content of a bundle to the cluster.
+type BundleLookup struct {
+ // Path refers to the location of a bundle to pull.
+ // It's typically an image reference.
+ Path string `json:"path"`
+ // Identifier is the catalog-unique name of the operator (the name of the CSV for bundles that contain CSVs)
+ Identifier string `json:"identifier"`
+ // Replaces is the name of the bundle to replace with the one found at Path.
+ Replaces string `json:"replaces"`
+ // CatalogSourceRef is a reference to the CatalogSource the bundle path was resolved from.
+ CatalogSourceRef *corev1.ObjectReference `json:"catalogSourceRef"`
+ // Conditions represents the overall state of a BundleLookup.
+ // +optional
+ Conditions []BundleLookupCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+ // The effective properties of the unpacked bundle.
+ // +optional
+ Properties string `json:"properties,omitempty"`
+}
+
+// GetCondition returns the BundleLookupCondition of the given type if it exists in the BundleLookup's Conditions.
+// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found.
+func (b BundleLookup) GetCondition(conditionType BundleLookupConditionType) BundleLookupCondition {
+ for _, cond := range b.Conditions {
+ if cond.Type == conditionType {
+ return cond
+ }
+ }
+
+ return BundleLookupCondition{
+ Type: conditionType,
+ Status: corev1.ConditionUnknown,
+ }
+}
+
+// RemoveCondition removes the BundleLookupCondition of the given type from the BundleLookup's Conditions if it exists.
+func (b *BundleLookup) RemoveCondition(conditionType BundleLookupConditionType) {
+ for i, cond := range b.Conditions {
+ if cond.Type == conditionType {
+ b.Conditions = append(b.Conditions[:i], b.Conditions[i+1:]...)
+ if len(b.Conditions) == 0 {
+ b.Conditions = nil
+ }
+ return
+ }
+ }
+}
+
+// SetCondition replaces the existing BundleLookupCondition of the same type, or adds it if it was not found.
+func (b *BundleLookup) SetCondition(cond BundleLookupCondition) BundleLookupCondition {
+ for i, existing := range b.Conditions {
+ if existing.Type != cond.Type {
+ continue
+ }
+ if existing.Status == cond.Status {
+ cond.LastTransitionTime = existing.LastTransitionTime
+ }
+ b.Conditions[i] = cond
+ return cond
+ }
+ b.Conditions = append(b.Conditions, cond)
+
+ return cond
+}
+
+func (s *Step) String() string {
+ return fmt.Sprintf("%s: %s (%s)", s.Resolving, s.Resource, s.Status)
+}
+
+// StepResource represents the status of a resource to be tracked by an
+// InstallPlan.
+type StepResource struct {
+ CatalogSource string `json:"sourceName"`
+ CatalogSourceNamespace string `json:"sourceNamespace"`
+ Group string `json:"group"`
+ Version string `json:"version"`
+ Kind string `json:"kind"`
+ Name string `json:"name"`
+ Manifest string `json:"manifest,omitempty"`
+}
+
+func (r StepResource) String() string {
+ return fmt.Sprintf("%s[%s/%s/%s (%s/%s)]", r.Name, r.Group, r.Version, r.Kind, r.CatalogSource, r.CatalogSourceNamespace)
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +kubebuilder:resource:shortName=ip,categories=olm
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="CSV",type=string,JSONPath=`.spec.clusterServiceVersionNames[0]`,description="The first CSV in the list of clusterServiceVersionNames"
+// +kubebuilder:printcolumn:name="Approval",type=string,JSONPath=`.spec.approval`,description="The approval mode"
+// +kubebuilder:printcolumn:name="Approved",type=boolean,JSONPath=`.spec.approved`
+
+// InstallPlan defines the installation of a set of operators.
+type InstallPlan struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec InstallPlanSpec `json:"spec"`
+ // +optional
+ Status InstallPlanStatus `json:"status"`
+}
+
+// EnsureCatalogSource ensures that a CatalogSource is present in the Status
+// block of an InstallPlan.
+func (p *InstallPlan) EnsureCatalogSource(sourceName string) {
+ for _, srcName := range p.Status.CatalogSources {
+ if srcName == sourceName {
+ return
+ }
+ }
+
+ p.Status.CatalogSources = append(p.Status.CatalogSources, sourceName)
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InstallPlanList is a list of InstallPlan resources.
+type InstallPlanList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []InstallPlan `json:"items"`
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go
new file mode 100644
index 000000000000..f1cd86f1a372
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go
@@ -0,0 +1,55 @@
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "github.com/operator-framework/api/pkg/operators"
+)
+
+const (
+ // GroupName is the group name used in this package.
+ GroupName = operators.GroupName
+ // GroupVersion is the group version used in this package.
+ GroupVersion = "v1alpha1"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder initializes a scheme builder
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // AddToScheme is a global function that registers this API group & version to a scheme
+ AddToScheme = SchemeBuilder.AddToScheme
+
+ // localSchemeBuilder is expected by generated conversion functions
+ localSchemeBuilder = &SchemeBuilder
+)
+
+// addKnownTypes adds the list of known types to Scheme
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &CatalogSource{},
+ &CatalogSourceList{},
+ &InstallPlan{},
+ &InstallPlanList{},
+ &Subscription{},
+ &SubscriptionList{},
+ &ClusterServiceVersion{},
+ &ClusterServiceVersionList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go
new file mode 100644
index 000000000000..292fedf9b989
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go
@@ -0,0 +1,360 @@
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+const (
+ SubscriptionKind = "Subscription"
+ SubscriptionCRDAPIVersion = GroupName + "/" + GroupVersion
+)
+
+// SubscriptionState tracks when updates are available, installing, or service is up to date
+type SubscriptionState string
+
+const (
+ SubscriptionStateNone = ""
+ SubscriptionStateFailed = "UpgradeFailed"
+ SubscriptionStateUpgradeAvailable = "UpgradeAvailable"
+ SubscriptionStateUpgradePending = "UpgradePending"
+ SubscriptionStateAtLatest = "AtLatestKnown"
+)
+
+const (
+ SubscriptionReasonInvalidCatalog ConditionReason = "InvalidCatalog"
+ SubscriptionReasonUpgradeSucceeded ConditionReason = "UpgradeSucceeded"
+)
+
+// SubscriptionSpec defines an Application that can be installed
+type SubscriptionSpec struct {
+ CatalogSource string `json:"source"`
+ CatalogSourceNamespace string `json:"sourceNamespace"`
+ Package string `json:"name"`
+ Channel string `json:"channel,omitempty"`
+ StartingCSV string `json:"startingCSV,omitempty"`
+ InstallPlanApproval Approval `json:"installPlanApproval,omitempty"`
+ Config *SubscriptionConfig `json:"config,omitempty"`
+}
+
+// SubscriptionConfig contains configuration specified for a subscription.
+type SubscriptionConfig struct {
+ // Selector is the label selector for pods to be configured.
+ // Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ // It must match the pod template's labels.
+ Selector *metav1.LabelSelector `json:"selector,omitempty"`
+
+ // NodeSelector is a selector which must be true for the pod to fit on a node.
+ // Selector which must match a node's labels for the pod to be scheduled on that node.
+ // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // Tolerations are the pod's tolerations.
+ // +optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+
+ // Resources represents compute resources required by this container.
+ // Immutable.
+ // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+ // +optional
+ Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
+
+ // EnvFrom is a list of sources to populate environment variables in the container.
+ // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ // will be reported as an event when the container is starting. When a key exists in multiple
+ // sources, the value associated with the last source will take precedence.
+ // Values defined by an Env with a duplicate key will take precedence.
+ // Immutable.
+ // +optional
+ EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"`
+ // Env is a list of environment variables to set in the container.
+ // Cannot be updated.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge"`
+
+ // List of Volumes to set in the podSpec.
+ // +optional
+ Volumes []corev1.Volume `json:"volumes,omitempty"`
+
+ // List of VolumeMounts to set in the container.
+ // +optional
+ VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
+
+ // If specified, overrides the pod's scheduling constraints.
+ // nil sub-attributes will *not* override the original values in the pod.spec for those sub-attributes.
+ // Use empty object ({}) to erase original sub-attribute values.
+ // +optional
+ Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
+
+ // Annotations is an unstructured key value map stored with each Deployment, Pod, APIService in the Operator.
+ // Typically, annotations may be set by external tools to store and retrieve arbitrary metadata.
+ // Use this field to pre-define annotations that OLM should add to each of the Subscription's
+ // deployments, pods, and apiservices.
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
+}
+
+// SubscriptionConditionType indicates an explicit state condition about a Subscription in "abnormal-true"
+// polarity form (see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties).
+type SubscriptionConditionType string
+
+const (
+ // SubscriptionCatalogSourcesUnhealthy indicates that some or all of the CatalogSources to be used in resolution are unhealthy.
+ SubscriptionCatalogSourcesUnhealthy SubscriptionConditionType = "CatalogSourcesUnhealthy"
+
+ // SubscriptionInstallPlanMissing indicates that a Subscription's InstallPlan is missing.
+ SubscriptionInstallPlanMissing SubscriptionConditionType = "InstallPlanMissing"
+
+ // SubscriptionInstallPlanPending indicates that a Subscription's InstallPlan is pending installation.
+ SubscriptionInstallPlanPending SubscriptionConditionType = "InstallPlanPending"
+
+ // SubscriptionInstallPlanFailed indicates that the installation of a Subscription's InstallPlan has failed.
+ SubscriptionInstallPlanFailed SubscriptionConditionType = "InstallPlanFailed"
+
+ // SubscriptionResolutionFailed indicates that the dependency resolution in the namespace in which the subscription is created has failed
+ SubscriptionResolutionFailed SubscriptionConditionType = "ResolutionFailed"
+
+ // SubscriptionBundleUnpacking indicates that the unpack job is currently running
+ SubscriptionBundleUnpacking SubscriptionConditionType = "BundleUnpacking"
+
+ // SubscriptionBundleUnpackFailed indicates that the unpack job failed
+ SubscriptionBundleUnpackFailed SubscriptionConditionType = "BundleUnpackFailed"
+
+ // SubscriptionDeprecated is a roll-up condition which indicates that the Operator currently installed with this Subscription
+ //has been deprecated. It will be present when any of the three deprecation types (Package, Channel, Bundle) are present.
+ SubscriptionDeprecated SubscriptionConditionType = "Deprecated"
+
+ // SubscriptionOperatorDeprecated indicates that the Package currently installed with this Subscription has been deprecated.
+ SubscriptionPackageDeprecated SubscriptionConditionType = "PackageDeprecated"
+
+ // SubscriptionOperatorDeprecated indicates that the Channel used with this Subscription has been deprecated.
+ SubscriptionChannelDeprecated SubscriptionConditionType = "ChannelDeprecated"
+
+ // SubscriptionOperatorDeprecated indicates that the Bundle currently installed with this Subscription has been deprecated.
+ SubscriptionBundleDeprecated SubscriptionConditionType = "BundleDeprecated"
+)
+
+const (
+ // NoCatalogSourcesFound is a reason string for Subscriptions with unhealthy CatalogSources due to none being available.
+ NoCatalogSourcesFound = "NoCatalogSourcesFound"
+
+ // AllCatalogSourcesHealthy is a reason string for Subscriptions that transitioned due to all CatalogSources being healthy.
+ AllCatalogSourcesHealthy = "AllCatalogSourcesHealthy"
+
+ // CatalogSourcesAdded is a reason string for Subscriptions that transitioned due to CatalogSources being added.
+ CatalogSourcesAdded = "CatalogSourcesAdded"
+
+ // CatalogSourcesUpdated is a reason string for Subscriptions that transitioned due to CatalogSource being updated.
+ CatalogSourcesUpdated = "CatalogSourcesUpdated"
+
+ // CatalogSourcesDeleted is a reason string for Subscriptions that transitioned due to CatalogSources being removed.
+ CatalogSourcesDeleted = "CatalogSourcesDeleted"
+
+ // UnhealthyCatalogSourceFound is a reason string for Subscriptions that transitioned because an unhealthy CatalogSource was found.
+ UnhealthyCatalogSourceFound = "UnhealthyCatalogSourceFound"
+
+ // ReferencedInstallPlanNotFound is a reason string for Subscriptions that transitioned due to a referenced InstallPlan not being found.
+ ReferencedInstallPlanNotFound = "ReferencedInstallPlanNotFound"
+
+ // InstallPlanNotYetReconciled is a reason string for Subscriptions that transitioned due to a referenced InstallPlan not being reconciled yet.
+ InstallPlanNotYetReconciled = "InstallPlanNotYetReconciled"
+
+ // InstallPlanFailed is a reason string for Subscriptions that transitioned due to a referenced InstallPlan failing without setting an explicit failure condition.
+ InstallPlanFailed = "InstallPlanFailed"
+)
+
+// SubscriptionCondition represents the latest available observations of a Subscription's state.
+type SubscriptionCondition struct {
+ // Type is the type of Subscription condition.
+ Type SubscriptionConditionType `json:"type" description:"type of Subscription condition"`
+
+ // Status is the status of the condition, one of True, False, Unknown.
+ Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
+
+ // Reason is a one-word CamelCase reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"`
+
+ // Message is a human-readable message indicating details about last transition.
+ // +optional
+ Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"`
+
+ // LastHeartbeatTime is the last time we got an update on a given condition
+ // +optional
+ LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty" description:"last time we got an update on a given condition"`
+
+ // LastTransitionTime is the last time the condition transit from one status to another
+ // +optional
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another" hash:"ignore"`
+}
+
+// Equals returns true if a SubscriptionCondition equals the one given, false otherwise.
+// Equality is determined by the equality of the type, status, reason, and message fields ONLY.
+func (s SubscriptionCondition) Equals(condition SubscriptionCondition) bool {
+ return s.Type == condition.Type && s.Status == condition.Status && s.Reason == condition.Reason && s.Message == condition.Message
+}
+
+type SubscriptionStatus struct {
+ // CurrentCSV is the CSV the Subscription is progressing to.
+ // +optional
+ CurrentCSV string `json:"currentCSV,omitempty"`
+
+ // InstalledCSV is the CSV currently installed by the Subscription.
+ // +optional
+ InstalledCSV string `json:"installedCSV,omitempty"`
+
+ // Install is a reference to the latest InstallPlan generated for the Subscription.
+ // DEPRECATED: InstallPlanRef
+ // +optional
+ Install *InstallPlanReference `json:"installplan,omitempty"`
+
+ // State represents the current state of the Subscription
+ // +optional
+ State SubscriptionState `json:"state,omitempty"`
+
+ // Reason is the reason the Subscription was transitioned to its current state.
+ // +optional
+ Reason ConditionReason `json:"reason,omitempty"`
+
+ // InstallPlanGeneration is the current generation of the installplan
+ // +optional
+ InstallPlanGeneration int `json:"installPlanGeneration,omitempty"`
+
+ // InstallPlanRef is a reference to the latest InstallPlan that contains the Subscription's current CSV.
+ // +optional
+ InstallPlanRef *corev1.ObjectReference `json:"installPlanRef,omitempty"`
+
+ // CatalogHealth contains the Subscription's view of its relevant CatalogSources' status.
+ // It is used to determine SubscriptionStatusConditions related to CatalogSources.
+ // +optional
+ CatalogHealth []SubscriptionCatalogHealth `json:"catalogHealth,omitempty"`
+
+ // Conditions is a list of the latest available observations about a Subscription's current state.
+ // +optional
+ Conditions []SubscriptionCondition `json:"conditions,omitempty" hash:"set"`
+
+ // LastUpdated represents the last time that the Subscription status was updated.
+ LastUpdated metav1.Time `json:"lastUpdated"`
+}
+
+// GetCondition returns the SubscriptionCondition of the given type if it exists in the SubscriptionStatus' Conditions.
+// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found.
+func (s SubscriptionStatus) GetCondition(conditionType SubscriptionConditionType) SubscriptionCondition {
+ for _, cond := range s.Conditions {
+ if cond.Type == conditionType {
+ return cond
+ }
+ }
+
+ return SubscriptionCondition{
+ Type: conditionType,
+ Status: corev1.ConditionUnknown,
+ }
+}
+
+// SetCondition sets the given SubscriptionCondition in the SubscriptionStatus' Conditions.
+func (s *SubscriptionStatus) SetCondition(condition SubscriptionCondition) {
+ for i, cond := range s.Conditions {
+ if cond.Type == condition.Type {
+ s.Conditions[i] = condition
+ return
+ }
+ }
+
+ s.Conditions = append(s.Conditions, condition)
+}
+
+// RemoveConditions removes any conditions of the given types from the SubscriptionStatus' Conditions.
+func (s *SubscriptionStatus) RemoveConditions(remove ...SubscriptionConditionType) {
+ exclusions := map[SubscriptionConditionType]struct{}{}
+ for _, r := range remove {
+ exclusions[r] = struct{}{}
+ }
+
+ var filtered []SubscriptionCondition
+ for _, cond := range s.Conditions {
+ if _, ok := exclusions[cond.Type]; ok {
+ // Skip excluded condition types
+ continue
+ }
+ filtered = append(filtered, cond)
+ }
+
+ s.Conditions = filtered
+}
+
+type InstallPlanReference struct {
+ APIVersion string `json:"apiVersion"`
+ Kind string `json:"kind"`
+ Name string `json:"name"`
+ UID types.UID `json:"uuid"`
+}
+
+// SubscriptionCatalogHealth describes the health of a CatalogSource the Subscription knows about.
+type SubscriptionCatalogHealth struct {
+ // CatalogSourceRef is a reference to a CatalogSource.
+ CatalogSourceRef *corev1.ObjectReference `json:"catalogSourceRef"`
+
+ // LastUpdated represents the last time that the CatalogSourceHealth changed
+ LastUpdated *metav1.Time `json:"lastUpdated"`
+
+ // Healthy is true if the CatalogSource is healthy; false otherwise.
+ Healthy bool `json:"healthy"`
+}
+
+// Equals returns true if a SubscriptionCatalogHealth equals the one given, false otherwise.
+// Equality is based SOLEY on health and UID.
+func (s SubscriptionCatalogHealth) Equals(health SubscriptionCatalogHealth) bool {
+ return s.Healthy == health.Healthy && s.CatalogSourceRef.UID == health.CatalogSourceRef.UID
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +kubebuilder:resource:shortName={sub, subs},categories=olm
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Package",type=string,JSONPath=`.spec.name`,description="The package subscribed to"
+// +kubebuilder:printcolumn:name="Source",type=string,JSONPath=`.spec.source`,description="The catalog source for the specified package"
+// +kubebuilder:printcolumn:name="Channel",type=string,JSONPath=`.spec.channel`,description="The channel of updates to subscribe to"
+
+// Subscription keeps operators up to date by tracking changes to Catalogs.
+type Subscription struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec *SubscriptionSpec `json:"spec"`
+ // +optional
+ Status SubscriptionStatus `json:"status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SubscriptionList is a list of Subscription resources.
+type SubscriptionList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Subscription `json:"items"`
+}
+
+// GetInstallPlanApproval gets the configured install plan approval or the default
+func (s *Subscription) GetInstallPlanApproval() Approval {
+ if s.Spec.InstallPlanApproval == ApprovalManual {
+ return ApprovalManual
+ }
+ return ApprovalAutomatic
+}
+
+// NewInstallPlanReference returns an InstallPlanReference for the given ObjectReference.
+func NewInstallPlanReference(ref *corev1.ObjectReference) *InstallPlanReference {
+ return &InstallPlanReference{
+ APIVersion: ref.APIVersion,
+ Kind: ref.Kind,
+ Name: ref.Name,
+ UID: ref.UID,
+ }
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 000000000000..684a7432a6e5
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,1632 @@
+//go:build !ignore_autogenerated
+
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "encoding/json"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIResourceReference) DeepCopyInto(out *APIResourceReference) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceReference.
+func (in *APIResourceReference) DeepCopy() *APIResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(APIResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceDefinitions) DeepCopyInto(out *APIServiceDefinitions) {
+ *out = *in
+ if in.Owned != nil {
+ in, out := &in.Owned, &out.Owned
+ *out = make([]APIServiceDescription, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Required != nil {
+ in, out := &in.Required, &out.Required
+ *out = make([]APIServiceDescription, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDefinitions.
+func (in *APIServiceDefinitions) DeepCopy() *APIServiceDefinitions {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceDefinitions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceDescription) DeepCopyInto(out *APIServiceDescription) {
+ *out = *in
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]APIResourceReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.StatusDescriptors != nil {
+ in, out := &in.StatusDescriptors, &out.StatusDescriptors
+ *out = make([]StatusDescriptor, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SpecDescriptors != nil {
+ in, out := &in.SpecDescriptors, &out.SpecDescriptors
+ *out = make([]SpecDescriptor, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ActionDescriptor != nil {
+ in, out := &in.ActionDescriptor, &out.ActionDescriptor
+ *out = make([]ActionDescriptor, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDescription.
+func (in *APIServiceDescription) DeepCopy() *APIServiceDescription {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceDescription)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ActionDescriptor) DeepCopyInto(out *ActionDescriptor) {
+ *out = *in
+ if in.XDescriptors != nil {
+ in, out := &in.XDescriptors, &out.XDescriptors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = make(json.RawMessage, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDescriptor.
+func (in *ActionDescriptor) DeepCopy() *ActionDescriptor {
+ if in == nil {
+ return nil
+ }
+ out := new(ActionDescriptor)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AppLink) DeepCopyInto(out *AppLink) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppLink.
+func (in *AppLink) DeepCopy() *AppLink {
+ if in == nil {
+ return nil
+ }
+ out := new(AppLink)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BundleLookup) DeepCopyInto(out *BundleLookup) {
+ *out = *in
+ if in.CatalogSourceRef != nil {
+ in, out := &in.CatalogSourceRef, &out.CatalogSourceRef
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]BundleLookupCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookup.
+func (in *BundleLookup) DeepCopy() *BundleLookup {
+ if in == nil {
+ return nil
+ }
+ out := new(BundleLookup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BundleLookupCondition) DeepCopyInto(out *BundleLookupCondition) {
+ *out = *in
+ if in.LastUpdateTime != nil {
+ in, out := &in.LastUpdateTime, &out.LastUpdateTime
+ *out = (*in).DeepCopy()
+ }
+ if in.LastTransitionTime != nil {
+ in, out := &in.LastTransitionTime, &out.LastTransitionTime
+ *out = (*in).DeepCopy()
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookupCondition.
+func (in *BundleLookupCondition) DeepCopy() *BundleLookupCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(BundleLookupCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CRDDescription) DeepCopyInto(out *CRDDescription) {
+ *out = *in
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]APIResourceReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.StatusDescriptors != nil {
+ in, out := &in.StatusDescriptors, &out.StatusDescriptors
+ *out = make([]StatusDescriptor, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SpecDescriptors != nil {
+ in, out := &in.SpecDescriptors, &out.SpecDescriptors
+ *out = make([]SpecDescriptor, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ActionDescriptor != nil {
+ in, out := &in.ActionDescriptor, &out.ActionDescriptor
+ *out = make([]ActionDescriptor, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDDescription.
+func (in *CRDDescription) DeepCopy() *CRDDescription {
+ if in == nil {
+ return nil
+ }
+ out := new(CRDDescription)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CatalogSource) DeepCopyInto(out *CatalogSource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource.
+func (in *CatalogSource) DeepCopy() *CatalogSource {
+ if in == nil {
+ return nil
+ }
+ out := new(CatalogSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CatalogSource) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CatalogSourceList) DeepCopyInto(out *CatalogSourceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CatalogSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceList.
+func (in *CatalogSourceList) DeepCopy() *CatalogSourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(CatalogSourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CatalogSourceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) {
+ *out = *in
+ if in.GrpcPodConfig != nil {
+ in, out := &in.GrpcPodConfig, &out.GrpcPodConfig
+ *out = new(GrpcPodConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.UpdateStrategy != nil {
+ in, out := &in.UpdateStrategy, &out.UpdateStrategy
+ *out = new(UpdateStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Secrets != nil {
+ in, out := &in.Secrets, &out.Secrets
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.Icon = in.Icon
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceSpec.
+func (in *CatalogSourceSpec) DeepCopy() *CatalogSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CatalogSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) {
+ *out = *in
+ if in.LatestImageRegistryPoll != nil {
+ in, out := &in.LatestImageRegistryPoll, &out.LatestImageRegistryPoll
+ *out = (*in).DeepCopy()
+ }
+ if in.ConfigMapResource != nil {
+ in, out := &in.ConfigMapResource, &out.ConfigMapResource
+ *out = new(ConfigMapResourceReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RegistryServiceStatus != nil {
+ in, out := &in.RegistryServiceStatus, &out.RegistryServiceStatus
+ *out = new(RegistryServiceStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GRPCConnectionState != nil {
+ in, out := &in.GRPCConnectionState, &out.GRPCConnectionState
+ *out = new(GRPCConnectionState)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceStatus.
+func (in *CatalogSourceStatus) DeepCopy() *CatalogSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CatalogSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CleanupSpec) DeepCopyInto(out *CleanupSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupSpec.
+func (in *CleanupSpec) DeepCopy() *CleanupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CleanupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CleanupStatus) DeepCopyInto(out *CleanupStatus) {
+ *out = *in
+ if in.PendingDeletion != nil {
+ in, out := &in.PendingDeletion, &out.PendingDeletion
+ *out = make([]ResourceList, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupStatus.
+func (in *CleanupStatus) DeepCopy() *CleanupStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CleanupStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterServiceVersion) DeepCopyInto(out *ClusterServiceVersion) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersion.
+func (in *ClusterServiceVersion) DeepCopy() *ClusterServiceVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterServiceVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterServiceVersion) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterServiceVersionCondition) DeepCopyInto(out *ClusterServiceVersionCondition) {
+ *out = *in
+ if in.LastUpdateTime != nil {
+ in, out := &in.LastUpdateTime, &out.LastUpdateTime
+ *out = (*in).DeepCopy()
+ }
+ if in.LastTransitionTime != nil {
+ in, out := &in.LastTransitionTime, &out.LastTransitionTime
+ *out = (*in).DeepCopy()
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionCondition.
+func (in *ClusterServiceVersionCondition) DeepCopy() *ClusterServiceVersionCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterServiceVersionCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterServiceVersionList) DeepCopyInto(out *ClusterServiceVersionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterServiceVersion, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionList.
+func (in *ClusterServiceVersionList) DeepCopy() *ClusterServiceVersionList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterServiceVersionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterServiceVersionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec) {
+ *out = *in
+ in.InstallStrategy.DeepCopyInto(&out.InstallStrategy)
+ in.Version.DeepCopyInto(&out.Version)
+ in.CustomResourceDefinitions.DeepCopyInto(&out.CustomResourceDefinitions)
+ in.APIServiceDefinitions.DeepCopyInto(&out.APIServiceDefinitions)
+ if in.WebhookDefinitions != nil {
+ in, out := &in.WebhookDefinitions, &out.WebhookDefinitions
+ *out = make([]WebhookDescription, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.NativeAPIs != nil {
+ in, out := &in.NativeAPIs, &out.NativeAPIs
+ *out = make([]metav1.GroupVersionKind, len(*in))
+ copy(*out, *in)
+ }
+ if in.Keywords != nil {
+ in, out := &in.Keywords, &out.Keywords
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Maintainers != nil {
+ in, out := &in.Maintainers, &out.Maintainers
+ *out = make([]Maintainer, len(*in))
+ copy(*out, *in)
+ }
+ out.Provider = in.Provider
+ if in.Links != nil {
+ in, out := &in.Links, &out.Links
+ *out = make([]AppLink, len(*in))
+ copy(*out, *in)
+ }
+ if in.Icon != nil {
+ in, out := &in.Icon, &out.Icon
+ *out = make([]Icon, len(*in))
+ copy(*out, *in)
+ }
+ if in.InstallModes != nil {
+ in, out := &in.InstallModes, &out.InstallModes
+ *out = make([]InstallMode, len(*in))
+ copy(*out, *in)
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ out.Cleanup = in.Cleanup
+ if in.Skips != nil {
+ in, out := &in.Skips, &out.Skips
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.RelatedImages != nil {
+ in, out := &in.RelatedImages, &out.RelatedImages
+ *out = make([]RelatedImage, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionSpec.
+func (in *ClusterServiceVersionSpec) DeepCopy() *ClusterServiceVersionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterServiceVersionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterServiceVersionStatus) DeepCopyInto(out *ClusterServiceVersionStatus) {
+ *out = *in
+ if in.LastUpdateTime != nil {
+ in, out := &in.LastUpdateTime, &out.LastUpdateTime
+ *out = (*in).DeepCopy()
+ }
+ if in.LastTransitionTime != nil {
+ in, out := &in.LastTransitionTime, &out.LastTransitionTime
+ *out = (*in).DeepCopy()
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ClusterServiceVersionCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.RequirementStatus != nil {
+ in, out := &in.RequirementStatus, &out.RequirementStatus
+ *out = make([]RequirementStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.CertsLastUpdated != nil {
+ in, out := &in.CertsLastUpdated, &out.CertsLastUpdated
+ *out = (*in).DeepCopy()
+ }
+ if in.CertsRotateAt != nil {
+ in, out := &in.CertsRotateAt, &out.CertsRotateAt
+ *out = (*in).DeepCopy()
+ }
+ in.Cleanup.DeepCopyInto(&out.Cleanup)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionStatus.
+func (in *ClusterServiceVersionStatus) DeepCopy() *ClusterServiceVersionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterServiceVersionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapResourceReference) DeepCopyInto(out *ConfigMapResourceReference) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapResourceReference.
+func (in *ConfigMapResourceReference) DeepCopy() *ConfigMapResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigMapResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CustomResourceDefinitions) DeepCopyInto(out *CustomResourceDefinitions) {
+ *out = *in
+ if in.Owned != nil {
+ in, out := &in.Owned, &out.Owned
+ *out = make([]CRDDescription, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Required != nil {
+ in, out := &in.Required, &out.Required
+ *out = make([]CRDDescription, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitions.
+func (in *CustomResourceDefinitions) DeepCopy() *CustomResourceDefinitions {
+ if in == nil {
+ return nil
+ }
+ out := new(CustomResourceDefinitions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DependentStatus) DeepCopyInto(out *DependentStatus) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependentStatus.
+func (in *DependentStatus) DeepCopy() *DependentStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DependentStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExtractContentConfig) DeepCopyInto(out *ExtractContentConfig) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtractContentConfig.
+func (in *ExtractContentConfig) DeepCopy() *ExtractContentConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ExtractContentConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GRPCConnectionState) DeepCopyInto(out *GRPCConnectionState) {
+ *out = *in
+ in.LastConnectTime.DeepCopyInto(&out.LastConnectTime)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCConnectionState.
+func (in *GRPCConnectionState) DeepCopy() *GRPCConnectionState {
+ if in == nil {
+ return nil
+ }
+ out := new(GRPCConnectionState)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GrpcPodConfig) DeepCopyInto(out *GrpcPodConfig) {
+ *out = *in
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]v1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Affinity != nil {
+ in, out := &in.Affinity, &out.Affinity
+ *out = new(v1.Affinity)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.PriorityClassName != nil {
+ in, out := &in.PriorityClassName, &out.PriorityClassName
+ *out = new(string)
+ **out = **in
+ }
+ if in.MemoryTarget != nil {
+ in, out := &in.MemoryTarget, &out.MemoryTarget
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.ExtractContent != nil {
+ in, out := &in.ExtractContent, &out.ExtractContent
+ *out = new(ExtractContentConfig)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrpcPodConfig.
+func (in *GrpcPodConfig) DeepCopy() *GrpcPodConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GrpcPodConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Icon) DeepCopyInto(out *Icon) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Icon.
+func (in *Icon) DeepCopy() *Icon {
+ if in == nil {
+ return nil
+ }
+ out := new(Icon)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InstallMode) DeepCopyInto(out *InstallMode) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallMode.
+func (in *InstallMode) DeepCopy() *InstallMode {
+ if in == nil {
+ return nil
+ }
+ out := new(InstallMode)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in InstallModeSet) DeepCopyInto(out *InstallModeSet) {
+ {
+ in := &in
+ *out = make(InstallModeSet, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallModeSet.
+func (in InstallModeSet) DeepCopy() InstallModeSet {
+ if in == nil {
+ return nil
+ }
+ out := new(InstallModeSet)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InstallPlan) DeepCopyInto(out *InstallPlan) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlan.
+func (in *InstallPlan) DeepCopy() *InstallPlan {
+ if in == nil {
+ return nil
+ }
+ out := new(InstallPlan)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InstallPlan) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InstallPlanCondition) DeepCopyInto(out *InstallPlanCondition) {
+ *out = *in
+ if in.LastUpdateTime != nil {
+ in, out := &in.LastUpdateTime, &out.LastUpdateTime
+ *out = (*in).DeepCopy()
+ }
+ if in.LastTransitionTime != nil {
+ in, out := &in.LastTransitionTime, &out.LastTransitionTime
+ *out = (*in).DeepCopy()
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanCondition.
+func (in *InstallPlanCondition) DeepCopy() *InstallPlanCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(InstallPlanCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InstallPlanList) DeepCopyInto(out *InstallPlanList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]InstallPlan, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanList.
+func (in *InstallPlanList) DeepCopy() *InstallPlanList {
+ if in == nil {
+ return nil
+ }
+ out := new(InstallPlanList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InstallPlanList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InstallPlanReference) DeepCopyInto(out *InstallPlanReference) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanReference.
+func (in *InstallPlanReference) DeepCopy() *InstallPlanReference {
+ if in == nil {
+ return nil
+ }
+ out := new(InstallPlanReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InstallPlanSpec) DeepCopyInto(out *InstallPlanSpec) {
+ *out = *in
+ if in.ClusterServiceVersionNames != nil {
+ in, out := &in.ClusterServiceVersionNames, &out.ClusterServiceVersionNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanSpec.
+func (in *InstallPlanSpec) DeepCopy() *InstallPlanSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(InstallPlanSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InstallPlanStatus) DeepCopyInto(out *InstallPlanStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]InstallPlanCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.CatalogSources != nil {
+ in, out := &in.CatalogSources, &out.CatalogSources
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Plan != nil {
+ in, out := &in.Plan, &out.Plan
+ *out = make([]*Step, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Step)
+ **out = **in
+ }
+ }
+ }
+ if in.BundleLookups != nil {
+ in, out := &in.BundleLookups, &out.BundleLookups
+ *out = make([]BundleLookup, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AttenuatedServiceAccountRef != nil {
+ in, out := &in.AttenuatedServiceAccountRef, &out.AttenuatedServiceAccountRef
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ if in.StartTime != nil {
+ in, out := &in.StartTime, &out.StartTime
+ *out = (*in).DeepCopy()
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanStatus.
+func (in *InstallPlanStatus) DeepCopy() *InstallPlanStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(InstallPlanStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Maintainer) DeepCopyInto(out *Maintainer) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintainer.
+func (in *Maintainer) DeepCopy() *Maintainer {
+ if in == nil {
+ return nil
+ }
+ out := new(Maintainer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedInstallStrategy) DeepCopyInto(out *NamedInstallStrategy) {
+ *out = *in
+ in.StrategySpec.DeepCopyInto(&out.StrategySpec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedInstallStrategy.
+func (in *NamedInstallStrategy) DeepCopy() *NamedInstallStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedInstallStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RegistryPoll) DeepCopyInto(out *RegistryPoll) {
+ *out = *in
+ if in.Interval != nil {
+ in, out := &in.Interval, &out.Interval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryPoll.
+func (in *RegistryPoll) DeepCopy() *RegistryPoll {
+ if in == nil {
+ return nil
+ }
+ out := new(RegistryPoll)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RegistryServiceStatus) DeepCopyInto(out *RegistryServiceStatus) {
+ *out = *in
+ in.CreatedAt.DeepCopyInto(&out.CreatedAt)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryServiceStatus.
+func (in *RegistryServiceStatus) DeepCopy() *RegistryServiceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(RegistryServiceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RelatedImage) DeepCopyInto(out *RelatedImage) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedImage.
+func (in *RelatedImage) DeepCopy() *RelatedImage {
+ if in == nil {
+ return nil
+ }
+ out := new(RelatedImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequirementStatus) DeepCopyInto(out *RequirementStatus) {
+ *out = *in
+ if in.Dependents != nil {
+ in, out := &in.Dependents, &out.Dependents
+ *out = make([]DependentStatus, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequirementStatus.
+func (in *RequirementStatus) DeepCopy() *RequirementStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(RequirementStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceInstance) DeepCopyInto(out *ResourceInstance) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceInstance.
+func (in *ResourceInstance) DeepCopy() *ResourceInstance {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceInstance)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceList) DeepCopyInto(out *ResourceList) {
+ *out = *in
+ if in.Instances != nil {
+ in, out := &in.Instances, &out.Instances
+ *out = make([]ResourceInstance, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList.
+func (in *ResourceList) DeepCopy() *ResourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SpecDescriptor) DeepCopyInto(out *SpecDescriptor) {
+ *out = *in
+ if in.XDescriptors != nil {
+ in, out := &in.XDescriptors, &out.XDescriptors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = make(json.RawMessage, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecDescriptor.
+func (in *SpecDescriptor) DeepCopy() *SpecDescriptor {
+ if in == nil {
+ return nil
+ }
+ out := new(SpecDescriptor)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatusDescriptor) DeepCopyInto(out *StatusDescriptor) {
+ *out = *in
+ if in.XDescriptors != nil {
+ in, out := &in.XDescriptors, &out.XDescriptors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = make(json.RawMessage, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDescriptor.
+func (in *StatusDescriptor) DeepCopy() *StatusDescriptor {
+ if in == nil {
+ return nil
+ }
+ out := new(StatusDescriptor)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Step) DeepCopyInto(out *Step) {
+ *out = *in
+ out.Resource = in.Resource
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step.
+func (in *Step) DeepCopy() *Step {
+ if in == nil {
+ return nil
+ }
+ out := new(Step)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StepResource) DeepCopyInto(out *StepResource) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepResource.
+func (in *StepResource) DeepCopy() *StepResource {
+ if in == nil {
+ return nil
+ }
+ out := new(StepResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StrategyDeploymentPermissions) DeepCopyInto(out *StrategyDeploymentPermissions) {
+ *out = *in
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]rbacv1.PolicyRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentPermissions.
+func (in *StrategyDeploymentPermissions) DeepCopy() *StrategyDeploymentPermissions {
+ if in == nil {
+ return nil
+ }
+ out := new(StrategyDeploymentPermissions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StrategyDeploymentSpec) DeepCopyInto(out *StrategyDeploymentSpec) {
+ *out = *in
+ in.Spec.DeepCopyInto(&out.Spec)
+ if in.Label != nil {
+ in, out := &in.Label, &out.Label
+ *out = make(labels.Set, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentSpec.
+func (in *StrategyDeploymentSpec) DeepCopy() *StrategyDeploymentSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StrategyDeploymentSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StrategyDetailsDeployment) DeepCopyInto(out *StrategyDetailsDeployment) {
+ *out = *in
+ if in.DeploymentSpecs != nil {
+ in, out := &in.DeploymentSpecs, &out.DeploymentSpecs
+ *out = make([]StrategyDeploymentSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Permissions != nil {
+ in, out := &in.Permissions, &out.Permissions
+ *out = make([]StrategyDeploymentPermissions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ClusterPermissions != nil {
+ in, out := &in.ClusterPermissions, &out.ClusterPermissions
+ *out = make([]StrategyDeploymentPermissions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDetailsDeployment.
+func (in *StrategyDetailsDeployment) DeepCopy() *StrategyDetailsDeployment {
+ if in == nil {
+ return nil
+ }
+ out := new(StrategyDetailsDeployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Subscription) DeepCopyInto(out *Subscription) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Spec != nil {
+ in, out := &in.Spec, &out.Spec
+ *out = new(SubscriptionSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription.
+func (in *Subscription) DeepCopy() *Subscription {
+ if in == nil {
+ return nil
+ }
+ out := new(Subscription)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Subscription) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionCatalogHealth) DeepCopyInto(out *SubscriptionCatalogHealth) {
+ *out = *in
+ if in.CatalogSourceRef != nil {
+ in, out := &in.CatalogSourceRef, &out.CatalogSourceRef
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ if in.LastUpdated != nil {
+ in, out := &in.LastUpdated, &out.LastUpdated
+ *out = (*in).DeepCopy()
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCatalogHealth.
+func (in *SubscriptionCatalogHealth) DeepCopy() *SubscriptionCatalogHealth {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionCatalogHealth)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionCondition) DeepCopyInto(out *SubscriptionCondition) {
+ *out = *in
+ if in.LastHeartbeatTime != nil {
+ in, out := &in.LastHeartbeatTime, &out.LastHeartbeatTime
+ *out = (*in).DeepCopy()
+ }
+ if in.LastTransitionTime != nil {
+ in, out := &in.LastTransitionTime, &out.LastTransitionTime
+ *out = (*in).DeepCopy()
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCondition.
+func (in *SubscriptionCondition) DeepCopy() *SubscriptionCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]v1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(v1.ResourceRequirements)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EnvFrom != nil {
+ in, out := &in.EnvFrom, &out.EnvFrom
+ *out = make([]v1.EnvFromSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]v1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make([]v1.Volume, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.VolumeMounts != nil {
+ in, out := &in.VolumeMounts, &out.VolumeMounts
+ *out = make([]v1.VolumeMount, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Affinity != nil {
+ in, out := &in.Affinity, &out.Affinity
+ *out = new(v1.Affinity)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionConfig.
+func (in *SubscriptionConfig) DeepCopy() *SubscriptionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Subscription, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList.
+func (in *SubscriptionList) DeepCopy() *SubscriptionList {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SubscriptionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) {
+ *out = *in
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(SubscriptionConfig)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec.
+func (in *SubscriptionSpec) DeepCopy() *SubscriptionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) {
+ *out = *in
+ if in.Install != nil {
+ in, out := &in.Install, &out.Install
+ *out = new(InstallPlanReference)
+ **out = **in
+ }
+ if in.InstallPlanRef != nil {
+ in, out := &in.InstallPlanRef, &out.InstallPlanRef
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ if in.CatalogHealth != nil {
+ in, out := &in.CatalogHealth, &out.CatalogHealth
+ *out = make([]SubscriptionCatalogHealth, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]SubscriptionCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.LastUpdated.DeepCopyInto(&out.LastUpdated)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus.
+func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) {
+ *out = *in
+ if in.RegistryPoll != nil {
+ in, out := &in.RegistryPoll, &out.RegistryPoll
+ *out = new(RegistryPoll)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy.
+func (in *UpdateStrategy) DeepCopy() *UpdateStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(UpdateStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookDescription) DeepCopyInto(out *WebhookDescription) {
+ *out = *in
+ if in.TargetPort != nil {
+ in, out := &in.TargetPort, &out.TargetPort
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]admissionregistrationv1.RuleWithOperations, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FailurePolicy != nil {
+ in, out := &in.FailurePolicy, &out.FailurePolicy
+ *out = new(admissionregistrationv1.FailurePolicyType)
+ **out = **in
+ }
+ if in.MatchPolicy != nil {
+ in, out := &in.MatchPolicy, &out.MatchPolicy
+ *out = new(admissionregistrationv1.MatchPolicyType)
+ **out = **in
+ }
+ if in.ObjectSelector != nil {
+ in, out := &in.ObjectSelector, &out.ObjectSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SideEffects != nil {
+ in, out := &in.SideEffects, &out.SideEffects
+ *out = new(admissionregistrationv1.SideEffectClass)
+ **out = **in
+ }
+ if in.TimeoutSeconds != nil {
+ in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ if in.AdmissionReviewVersions != nil {
+ in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ReinvocationPolicy != nil {
+ in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy
+ *out = new(admissionregistrationv1.ReinvocationPolicyType)
+ **out = **in
+ }
+ if in.WebhookPath != nil {
+ in, out := &in.WebhookPath, &out.WebhookPath
+ *out = new(string)
+ **out = **in
+ }
+ if in.ConversionCRDs != nil {
+ in, out := &in.ConversionCRDs, &out.ConversionCRDs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookDescription.
+func (in *WebhookDescription) DeepCopy() *WebhookDescription {
+ if in == nil {
+ return nil
+ }
+ out := new(WebhookDescription)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/doc.go
new file mode 100644
index 000000000000..b881240adfe9
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/doc.go
@@ -0,0 +1,6 @@
+// +groupName=operators.coreos.com
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators
+
+// Package v1alpha2 contains resources types for version v1alpha2 of the operators.coreos.com API group.
+package v1alpha2
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/groupversion_info.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/groupversion_info.go
new file mode 100644
index 000000000000..637dc4dfc802
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/groupversion_info.go
@@ -0,0 +1,42 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +kubebuilder:object:generate=true
+
+// Package v1alpha2 contains API Schema definitions for the discovery v1alpha2 API group.
+package v1alpha2
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects.
+ GroupVersion = schema.GroupVersion{Group: "operators.coreos.com", Version: "v1alpha2"}
+
+ // SchemeGroupVersion is required for compatibility with client generation.
+ SchemeGroupVersion = GroupVersion
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme.
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return GroupVersion.WithResource(resource).GroupResource()
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/operatorgroup_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/operatorgroup_types.go
new file mode 100644
index 000000000000..2e67773f5c52
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/operatorgroup_types.go
@@ -0,0 +1,99 @@
+package v1alpha2
+
+import (
+ "sort"
+ "strings"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ OperatorGroupAnnotationKey = "olm.operatorGroup"
+ OperatorGroupNamespaceAnnotationKey = "olm.operatorNamespace"
+ OperatorGroupTargetsAnnotationKey = "olm.targetNamespaces"
+ OperatorGroupProvidedAPIsAnnotationKey = "olm.providedAPIs"
+
+ OperatorGroupKind = "OperatorGroup"
+)
+
+// OperatorGroupSpec is the spec for an OperatorGroup resource.
+type OperatorGroupSpec struct {
+ // Selector selects the OperatorGroup's target namespaces.
+ // +optional
+ Selector *metav1.LabelSelector `json:"selector,omitempty"`
+
+ // TargetNamespaces is an explicit set of namespaces to target.
+ // If it is set, Selector is ignored.
+ // +optional
+ TargetNamespaces []string `json:"targetNamespaces,omitempty"`
+
+ // ServiceAccountName is the admin specified service account which will be
+ // used to deploy operator(s) in this operator group.
+ ServiceAccountName string `json:"serviceAccountName,omitempty"`
+
+ // Static tells OLM not to update the OperatorGroup's providedAPIs annotation
+ // +optional
+ StaticProvidedAPIs bool `json:"staticProvidedAPIs,omitempty"`
+}
+
+// OperatorGroupStatus is the status for an OperatorGroupResource.
+type OperatorGroupStatus struct {
+ // Namespaces is the set of target namespaces for the OperatorGroup.
+ Namespaces []string `json:"namespaces,omitempty"`
+
+ // ServiceAccountRef references the service account object specified.
+ ServiceAccountRef *corev1.ObjectReference `json:"serviceAccountRef,omitempty"`
+
+ // LastUpdated is a timestamp of the last time the OperatorGroup's status was Updated.
+ LastUpdated *metav1.Time `json:"lastUpdated"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +kubebuilder:resource:shortName=og,categories=olm
+// +kubebuilder:subresource:status
+
+// OperatorGroup is the unit of multitenancy for OLM managed operators.
+// It constrains the installation of operators in its namespace to a specified set of target namespaces.
+type OperatorGroup struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +optional
+ Spec OperatorGroupSpec `json:"spec"`
+ Status OperatorGroupStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OperatorGroupList is a list of OperatorGroup resources.
+type OperatorGroupList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []OperatorGroup `json:"items"`
+}
+
+func (o *OperatorGroup) BuildTargetNamespaces() string {
+ sort.Strings(o.Status.Namespaces)
+ return strings.Join(o.Status.Namespaces, ",")
+}
+
+// IsServiceAccountSpecified returns true if the spec has a service account name specified.
+func (o *OperatorGroup) IsServiceAccountSpecified() bool {
+ if o.Spec.ServiceAccountName == "" {
+ return false
+ }
+
+ return true
+}
+
+// HasServiceAccountSynced returns true if the service account specified has been synced.
+func (o *OperatorGroup) HasServiceAccountSynced() bool {
+ if o.IsServiceAccountSpecified() && o.Status.ServiceAccountRef != nil {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/zz_generated.deepcopy.go
new file mode 100644
index 000000000000..885643cb75d8
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha2/zz_generated.deepcopy.go
@@ -0,0 +1,139 @@
+//go:build !ignore_autogenerated
+
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1alpha2
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorGroup) DeepCopyInto(out *OperatorGroup) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroup.
+func (in *OperatorGroup) DeepCopy() *OperatorGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorGroup) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorGroupList) DeepCopyInto(out *OperatorGroupList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OperatorGroup, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupList.
+func (in *OperatorGroupList) DeepCopy() *OperatorGroupList {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorGroupList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorGroupList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorGroupSpec) DeepCopyInto(out *OperatorGroupSpec) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TargetNamespaces != nil {
+ in, out := &in.TargetNamespaces, &out.TargetNamespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupSpec.
+func (in *OperatorGroupSpec) DeepCopy() *OperatorGroupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorGroupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorGroupStatus) DeepCopyInto(out *OperatorGroupStatus) {
+ *out = *in
+ if in.Namespaces != nil {
+ in, out := &in.Namespaces, &out.Namespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ServiceAccountRef != nil {
+ in, out := &in.ServiceAccountRef, &out.ServiceAccountRef
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.LastUpdated != nil {
+ in, out := &in.LastUpdated, &out.LastUpdated
+ *out = (*in).DeepCopy()
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupStatus.
+func (in *OperatorGroupStatus) DeepCopy() *OperatorGroupStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorGroupStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v2/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/v2/doc.go
new file mode 100644
index 000000000000..f85f7924252e
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v2/doc.go
@@ -0,0 +1,4 @@
+// +groupName=operators.coreos.com
+
+// Package v2 contains resources types for version v2 of the operators.coreos.com API group.
+package v2
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v2/groupversion_info.go b/vendor/github.com/operator-framework/api/pkg/operators/v2/groupversion_info.go
new file mode 100644
index 000000000000..2d2d923d1be6
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v2/groupversion_info.go
@@ -0,0 +1,28 @@
+// +kubebuilder:object:generate=true
+
+// Package v2 contains API Schema definitions for the operator v2 API group.
+package v2
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects.
+ GroupVersion = schema.GroupVersion{Group: "operators.coreos.com", Version: "v2"}
+
+ // SchemeGroupVersion is required for compatibility with client generation.
+ SchemeGroupVersion = GroupVersion
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme.
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return GroupVersion.WithResource(resource).GroupResource()
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v2/operatorcondition_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v2/operatorcondition_types.go
new file mode 100644
index 000000000000..ef1c56de61be
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v2/operatorcondition_types.go
@@ -0,0 +1,54 @@
+package v2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // Upgradeable indicates that the operator is upgradeable
+ Upgradeable string = "Upgradeable"
+)
+
+// ConditionType codifies a condition's type.
+type ConditionType string
+
+// OperatorConditionSpec allows an operator to report state to OLM and provides
+// cluster admin with the ability to manually override state reported by the operator.
+type OperatorConditionSpec struct {
+ ServiceAccounts []string `json:"serviceAccounts,omitempty"`
+ Deployments []string `json:"deployments,omitempty"`
+ Overrides []metav1.Condition `json:"overrides,omitempty"`
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// OperatorConditionStatus allows OLM to convey which conditions have been observed.
+type OperatorConditionStatus struct {
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +kubebuilder:storageversion
+// +kubebuilder:resource:shortName=condition,categories=olm
+// +kubebuilder:subresource:status
+// OperatorCondition is a Custom Resource of type `OperatorCondition` which is used to convey information to OLM about the state of an operator.
+type OperatorCondition struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec OperatorConditionSpec `json:"spec,omitempty"`
+ Status OperatorConditionStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// OperatorConditionList represents a list of Conditions.
+type OperatorConditionList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []OperatorCondition `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&OperatorCondition{}, &OperatorConditionList{})
+}
diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v2/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v2/zz_generated.deepcopy.go
new file mode 100644
index 000000000000..92ecc812ac29
--- /dev/null
+++ b/vendor/github.com/operator-framework/api/pkg/operators/v2/zz_generated.deepcopy.go
@@ -0,0 +1,145 @@
+//go:build !ignore_autogenerated
+
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition.
+func (in *OperatorCondition) DeepCopy() *OperatorCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorCondition) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorConditionList) DeepCopyInto(out *OperatorConditionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OperatorCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConditionList.
+func (in *OperatorConditionList) DeepCopy() *OperatorConditionList {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorConditionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorConditionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorConditionSpec) DeepCopyInto(out *OperatorConditionSpec) {
+ *out = *in
+ if in.ServiceAccounts != nil {
+ in, out := &in.ServiceAccounts, &out.ServiceAccounts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Deployments != nil {
+ in, out := &in.Deployments, &out.Deployments
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Overrides != nil {
+ in, out := &in.Overrides, &out.Overrides
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConditionSpec.
+func (in *OperatorConditionSpec) DeepCopy() *OperatorConditionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorConditionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorConditionStatus) DeepCopyInto(out *OperatorConditionStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConditionStatus.
+func (in *OperatorConditionStatus) DeepCopy() *OperatorConditionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorConditionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/LICENSE b/vendor/github.com/operator-framework/operator-lifecycle-manager/LICENSE
new file mode 100644
index 000000000000..261eeb9e9f8b
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/doc.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/doc.go
new file mode 100644
index 000000000000..251358bb4a5d
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright Red Hat, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/register.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/register.go
new file mode 100644
index 000000000000..7c1a1137dd86
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme/register.go
@@ -0,0 +1,62 @@
+/*
+Copyright Red Hat, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
+ operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
+ operatorsv1alpha2 "github.com/operator-framework/api/pkg/operators/v1alpha2"
+ operatorsv2 "github.com/operator-framework/api/pkg/operators/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ operatorsv1.AddToScheme,
+ operatorsv1alpha1.AddToScheme,
+ operatorsv1alpha2.AddToScheme,
+ operatorsv2.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/doc.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/doc.go
new file mode 100644
index 000000000000..d84e927bcdb1
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright Red Hat, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/generated_expansion.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/generated_expansion.go
new file mode 100644
index 000000000000..357fc8aae01f
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/generated_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright Red Hat, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type OLMConfigExpansion interface{}
+
+type OperatorExpansion interface{}
+
+type OperatorConditionExpansion interface{}
+
+type OperatorGroupExpansion interface{}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/olmconfig.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/olmconfig.go
new file mode 100644
index 000000000000..804cfd5681dc
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/olmconfig.go
@@ -0,0 +1,70 @@
+/*
+Copyright Red Hat, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ context "context"
+
+ operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
+ scheme "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ gentype "k8s.io/client-go/gentype"
+)
+
+// OLMConfigsGetter has a method to return a OLMConfigInterface.
+// A group's client should implement this interface.
+type OLMConfigsGetter interface {
+ OLMConfigs() OLMConfigInterface
+}
+
+// OLMConfigInterface has methods to work with OLMConfig resources.
+type OLMConfigInterface interface {
+ Create(ctx context.Context, oLMConfig *operatorsv1.OLMConfig, opts metav1.CreateOptions) (*operatorsv1.OLMConfig, error)
+ Update(ctx context.Context, oLMConfig *operatorsv1.OLMConfig, opts metav1.UpdateOptions) (*operatorsv1.OLMConfig, error)
+ // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+ UpdateStatus(ctx context.Context, oLMConfig *operatorsv1.OLMConfig, opts metav1.UpdateOptions) (*operatorsv1.OLMConfig, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*operatorsv1.OLMConfig, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*operatorsv1.OLMConfigList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *operatorsv1.OLMConfig, err error)
+ OLMConfigExpansion
+}
+
+// oLMConfigs implements OLMConfigInterface
+type oLMConfigs struct {
+ *gentype.ClientWithList[*operatorsv1.OLMConfig, *operatorsv1.OLMConfigList]
+}
+
+// newOLMConfigs returns a OLMConfigs
+func newOLMConfigs(c *OperatorsV1Client) *oLMConfigs {
+ return &oLMConfigs{
+ gentype.NewClientWithList[*operatorsv1.OLMConfig, *operatorsv1.OLMConfigList](
+ "olmconfigs",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ "",
+ func() *operatorsv1.OLMConfig { return &operatorsv1.OLMConfig{} },
+ func() *operatorsv1.OLMConfigList { return &operatorsv1.OLMConfigList{} },
+ ),
+ }
+}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operator.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operator.go
new file mode 100644
index 000000000000..9d7176670166
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operator.go
@@ -0,0 +1,70 @@
+/*
+Copyright Red Hat, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ context "context"
+
+ operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
+ scheme "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ gentype "k8s.io/client-go/gentype"
+)
+
+// OperatorsGetter has a method to return a OperatorInterface.
+// A group's client should implement this interface.
+type OperatorsGetter interface {
+ Operators() OperatorInterface
+}
+
+// OperatorInterface has methods to work with Operator resources.
+type OperatorInterface interface {
+ Create(ctx context.Context, operator *operatorsv1.Operator, opts metav1.CreateOptions) (*operatorsv1.Operator, error)
+ Update(ctx context.Context, operator *operatorsv1.Operator, opts metav1.UpdateOptions) (*operatorsv1.Operator, error)
+ // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+ UpdateStatus(ctx context.Context, operator *operatorsv1.Operator, opts metav1.UpdateOptions) (*operatorsv1.Operator, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*operatorsv1.Operator, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*operatorsv1.OperatorList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *operatorsv1.Operator, err error)
+ OperatorExpansion
+}
+
+// operators implements OperatorInterface
+type operators struct {
+ *gentype.ClientWithList[*operatorsv1.Operator, *operatorsv1.OperatorList]
+}
+
+// newOperators returns a Operators
+func newOperators(c *OperatorsV1Client) *operators {
+ return &operators{
+ gentype.NewClientWithList[*operatorsv1.Operator, *operatorsv1.OperatorList](
+ "operators",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ "",
+ func() *operatorsv1.Operator { return &operatorsv1.Operator{} },
+ func() *operatorsv1.OperatorList { return &operatorsv1.OperatorList{} },
+ ),
+ }
+}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorcondition.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorcondition.go
new file mode 100644
index 000000000000..9d11723fb51e
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorcondition.go
@@ -0,0 +1,70 @@
+/*
+Copyright Red Hat, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ context "context"
+
+ operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
+ scheme "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ gentype "k8s.io/client-go/gentype"
+)
+
+// OperatorConditionsGetter has a method to return a OperatorConditionInterface.
+// A group's client should implement this interface.
+type OperatorConditionsGetter interface {
+ OperatorConditions(namespace string) OperatorConditionInterface
+}
+
+// OperatorConditionInterface has methods to work with OperatorCondition resources.
+type OperatorConditionInterface interface {
+ Create(ctx context.Context, operatorCondition *operatorsv1.OperatorCondition, opts metav1.CreateOptions) (*operatorsv1.OperatorCondition, error)
+ Update(ctx context.Context, operatorCondition *operatorsv1.OperatorCondition, opts metav1.UpdateOptions) (*operatorsv1.OperatorCondition, error)
+ // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+ UpdateStatus(ctx context.Context, operatorCondition *operatorsv1.OperatorCondition, opts metav1.UpdateOptions) (*operatorsv1.OperatorCondition, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*operatorsv1.OperatorCondition, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*operatorsv1.OperatorConditionList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *operatorsv1.OperatorCondition, err error)
+ OperatorConditionExpansion
+}
+
+// operatorConditions implements OperatorConditionInterface
+type operatorConditions struct {
+ *gentype.ClientWithList[*operatorsv1.OperatorCondition, *operatorsv1.OperatorConditionList]
+}
+
+// newOperatorConditions returns a OperatorConditions
+func newOperatorConditions(c *OperatorsV1Client, namespace string) *operatorConditions {
+ return &operatorConditions{
+ gentype.NewClientWithList[*operatorsv1.OperatorCondition, *operatorsv1.OperatorConditionList](
+ "operatorconditions",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ namespace,
+ func() *operatorsv1.OperatorCondition { return &operatorsv1.OperatorCondition{} },
+ func() *operatorsv1.OperatorConditionList { return &operatorsv1.OperatorConditionList{} },
+ ),
+ }
+}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorgroup.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorgroup.go
new file mode 100644
index 000000000000..7df6bc50ad9c
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operatorgroup.go
@@ -0,0 +1,70 @@
+/*
+Copyright Red Hat, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ context "context"
+
+ operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
+ scheme "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ gentype "k8s.io/client-go/gentype"
+)
+
+// OperatorGroupsGetter has a method to return a OperatorGroupInterface.
+// A group's client should implement this interface.
+type OperatorGroupsGetter interface {
+ OperatorGroups(namespace string) OperatorGroupInterface
+}
+
+// OperatorGroupInterface has methods to work with OperatorGroup resources.
+type OperatorGroupInterface interface {
+ Create(ctx context.Context, operatorGroup *operatorsv1.OperatorGroup, opts metav1.CreateOptions) (*operatorsv1.OperatorGroup, error)
+ Update(ctx context.Context, operatorGroup *operatorsv1.OperatorGroup, opts metav1.UpdateOptions) (*operatorsv1.OperatorGroup, error)
+ // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+ UpdateStatus(ctx context.Context, operatorGroup *operatorsv1.OperatorGroup, opts metav1.UpdateOptions) (*operatorsv1.OperatorGroup, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*operatorsv1.OperatorGroup, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*operatorsv1.OperatorGroupList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *operatorsv1.OperatorGroup, err error)
+ OperatorGroupExpansion
+}
+
+// operatorGroups implements OperatorGroupInterface
+type operatorGroups struct {
+ *gentype.ClientWithList[*operatorsv1.OperatorGroup, *operatorsv1.OperatorGroupList]
+}
+
+// newOperatorGroups returns a OperatorGroups
+func newOperatorGroups(c *OperatorsV1Client, namespace string) *operatorGroups {
+ return &operatorGroups{
+ gentype.NewClientWithList[*operatorsv1.OperatorGroup, *operatorsv1.OperatorGroupList](
+ "operatorgroups",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ namespace,
+ func() *operatorsv1.OperatorGroup { return &operatorsv1.OperatorGroup{} },
+ func() *operatorsv1.OperatorGroupList { return &operatorsv1.OperatorGroupList{} },
+ ),
+ }
+}
diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operators_client.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operators_client.go
new file mode 100644
index 000000000000..d355cd94127a
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1/operators_client.go
@@ -0,0 +1,122 @@
+/*
+Copyright Red Hat, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ http "net/http"
+
+ operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
+ scheme "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type OperatorsV1Interface interface {
+ RESTClient() rest.Interface
+ OLMConfigsGetter
+ OperatorsGetter
+ OperatorConditionsGetter
+ OperatorGroupsGetter
+}
+
+// OperatorsV1Client is used to interact with features provided by the operators.coreos.com group.
+type OperatorsV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *OperatorsV1Client) OLMConfigs() OLMConfigInterface {
+ return newOLMConfigs(c)
+}
+
+func (c *OperatorsV1Client) Operators() OperatorInterface {
+ return newOperators(c)
+}
+
+func (c *OperatorsV1Client) OperatorConditions(namespace string) OperatorConditionInterface {
+ return newOperatorConditions(c, namespace)
+}
+
+func (c *OperatorsV1Client) OperatorGroups(namespace string) OperatorGroupInterface {
+ return newOperatorGroups(c, namespace)
+}
+
+// NewForConfig creates a new OperatorsV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*OperatorsV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new OperatorsV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*OperatorsV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &OperatorsV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new OperatorsV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *OperatorsV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new OperatorsV1Client for the given RESTClient.
+func New(c rest.Interface) *OperatorsV1Client {
+ return &OperatorsV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := operatorsv1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *OperatorsV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d80bca6a56e1..5cc205102625 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -49,8 +49,6 @@ cloud.google.com/go/storage/internal/apiv2/storagepb
# git.sr.ht/~sbinet/gg v0.5.0
## explicit; go 1.19
git.sr.ht/~sbinet/gg
-# github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
-## explicit; go 1.20
# github.com/Azure/azure-pipeline-go v0.2.3
## explicit; go 1.14
github.com/Azure/azure-pipeline-go/pipeline
@@ -542,7 +540,7 @@ github.com/distribution/distribution/v3/reference
# github.com/distribution/reference v0.6.0
## explicit; go 1.20
github.com/distribution/reference
-# github.com/docker/docker v27.1.2+incompatible
+# github.com/docker/docker v27.3.1+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
@@ -657,9 +655,10 @@ github.com/felixge/fgprof
# github.com/felixge/httpsnoop v1.0.4
## explicit; go 1.13
github.com/felixge/httpsnoop
-# github.com/fsnotify/fsnotify v1.7.0
+# github.com/fsnotify/fsnotify v1.8.0
## explicit; go 1.17
github.com/fsnotify/fsnotify
+github.com/fsnotify/fsnotify/internal
# github.com/fsouza/go-dockerclient v1.12.0
## explicit; go 1.22
github.com/fsouza/go-dockerclient
@@ -1432,7 +1431,7 @@ github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo
github.com/openshift-eng/openshift-tests-extension/pkg/junit
github.com/openshift-eng/openshift-tests-extension/pkg/util/sets
github.com/openshift-eng/openshift-tests-extension/pkg/version
-# github.com/openshift/api v0.0.0-20250710004639-926605d3338b
+# github.com/openshift/api v3.9.0+incompatible => github.com/openshift/api v0.0.0-20250710004639-926605d3338b
## explicit; go 1.24.0
github.com/openshift/api
github.com/openshift/api/annotations
@@ -1748,6 +1747,18 @@ github.com/openshift/library-go/test/library/metrics
github.com/opentracing/opentracing-go
github.com/opentracing/opentracing-go/ext
github.com/opentracing/opentracing-go/log
+# github.com/operator-framework/api v0.27.0
+## explicit; go 1.22.0
+github.com/operator-framework/api/pkg/lib/version
+github.com/operator-framework/api/pkg/operators
+github.com/operator-framework/api/pkg/operators/v1
+github.com/operator-framework/api/pkg/operators/v1alpha1
+github.com/operator-framework/api/pkg/operators/v1alpha2
+github.com/operator-framework/api/pkg/operators/v2
+# github.com/operator-framework/operator-lifecycle-manager v0.30.1-0.20250114164243-1b6752ec65fa
+## explicit; go 1.23.0
+github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme
+github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1
# github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417
## explicit; go 1.22.0
github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1
@@ -2223,8 +2234,8 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
-# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
-## explicit; go 1.20
+# golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
+## explicit; go 1.22.0
golang.org/x/exp/constraints
golang.org/x/exp/slices
# golang.org/x/image v0.11.0
@@ -4670,7 +4681,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient
# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16
## explicit; go 1.22.0
sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader
-# sigs.k8s.io/controller-runtime v0.19.0
+# sigs.k8s.io/controller-runtime v0.19.4
## explicit; go 1.22.0
sigs.k8s.io/controller-runtime/pkg/conversion
sigs.k8s.io/controller-runtime/pkg/scheme
@@ -4828,3 +4839,4 @@ sigs.k8s.io/yaml/goyaml.v3
# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250906192346-6efb6a95323f
# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250906192346-6efb6a95323f
# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250906192346-6efb6a95323f
+# github.com/openshift/api => github.com/openshift/api v0.0.0-20250710004639-926605d3338b