diff --git a/bootstrap/bootstrap-pod.yaml b/bootstrap/bootstrap-pod.yaml index bc9374628..ca42facd1 100644 --- a/bootstrap/bootstrap-pod.yaml +++ b/bootstrap/bootstrap-pod.yaml @@ -37,6 +37,7 @@ spec: fieldRef: fieldPath: spec.nodeName hostNetwork: true + terminationGracePeriodSeconds: 130 volumes: - name: kubeconfig hostPath: diff --git a/cmd/start.go b/cmd/start.go index 20fae2301..f38462c1e 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -1,6 +1,8 @@ package main import ( + "context" + "github.com/spf13/cobra" "k8s.io/klog" @@ -16,11 +18,12 @@ func init() { Long: "", Run: func(cmd *cobra.Command, args []string) { // To help debugging, immediately log version - klog.Infof("%s", version.String) + klog.Info(version.String) - if err := opts.Run(); err != nil { + if err := opts.Run(context.Background()); err != nil { klog.Fatalf("error: %v", err) } + klog.Infof("Graceful shutdown complete for %s.", version.String) }, } diff --git a/docs/user/reconciliation.md b/docs/user/reconciliation.md index d5172b9ed..42116bab6 100644 --- a/docs/user/reconciliation.md +++ b/docs/user/reconciliation.md @@ -93,22 +93,22 @@ So the graph nodes are all parallelized with the by-number ordering flattened ou For the usual reconciliation loop (neither an upgrade between releases nor a fresh install), the flattened graph is also randomly permuted to avoid hanging on ordering bugs. -## Synchronizing the graph +## Reconciling the graph The cluster-version operator spawns worker goroutines that walk the graph, pushing manifests in their queue. -For each manifest in the node, the worker synchronizes the cluster with the manifest using a resource builder. +For each manifest in the node, the worker reconciles the cluster with the manifest using a resource builder. On error (or timeout), the worker abandons the manifest, graph node, and any dependencies of that graph node. On success, the worker proceeds to the next manifest in the graph node. ## Resource builders -Resource builders synchronize the cluster with a manifest from the release image. +Resource builders reconcile a cluster object with a manifest from the release image. The general approach is to generates a merged manifest combining critical spec properties from the release-image manifest with data from a preexisting in-cluster object, if any. If the merged manifest differs from the in-cluster object, the merged manifest is pushed back into the cluster. Some types have additional logic, as described in the following subsections. Note that this logic only applies to manifests included in the release image itself. -For example, only [ClusterOperator](../dev/clusteroperator.md) from the release image will have the blocking logic described [below](#clusteroperator); if an admin or secondary operator pushed a ClusterOperator object, it would not impact the cluster-version operator's graph synchronization. +For example, only [ClusterOperator](../dev/clusteroperator.md) from the release image will have the blocking logic described [below](#clusteroperator); if an admin or secondary operator pushed a ClusterOperator object, it would not impact the cluster-version operator's graph reconciliation. ### ClusterOperator diff --git a/docs/user/status.md b/docs/user/status.md index 62278f8a2..26a187554 100644 --- a/docs/user/status.md +++ b/docs/user/status.md @@ -3,6 +3,27 @@ [The ClusterVersion object](../dev/clusterversion.md) sets `conditions` describing the state of the cluster-version operator (CVO). This document describes those conditions and, where appropriate, suggests possible mitigations. +## Failing + +When `Failing` is True, the CVO is failing to reconcile the cluster with the desired release image. +In all cases, the impact on the cluster will be that dependent nodes in [the manifest graph](reconciliation.md#manifest-graph) may not be [reconciled](reconciliation.md#reconciling-the-graph). +Note that the graph [may be flattened](reconciliation.md#manifest-graph), in which case there are no dependent nodes. + +Most reconciliation errors will result in `Failing=True`, although [`ClusterOperatorNotAvailable`](#clusteroperatornotavailable) has special handling. + +### NoDesiredImage + +The CVO has not been given a release image to reconcile. + +If this happens it is a CVO coding error, because clearing [`desiredUpdate`][api-desired-update] should return you to the current CVO's release image. + +### ClusterOperatorNotAvailable + +`ClusterOperatorNotAvailable` (or the consolidated `ClusterOperatorsNotAvailable`) is set when the CVO fails to retrieve the ClusterOperator from the cluster or when the retrieved ClusterOperator does not satisfy [the reconciliation conditions](reconciliation.md#clusteroperator). + +Unlike most manifest-reconciliation failures, this error does not immediately result in `Failing=True`. +Under some conditions during installs and updates, the CVO will treat this condition as a `Progressing=True` condition and give the operator up to twenty minutes to level before reporting `Failing=True`. + ## RetrievedUpdates When `RetrievedUpdates` is `True`, the CVO is succesfully retrieving updates, which is good. @@ -107,5 +128,6 @@ If this error occurs because you forced an update to a release that is not in an If this happens it is a CVO coding error. There is no mitigation short of updating to a new release image with a fixed CVO. +[api-desired-update]: https://github.com/openshift/api/blob/34f54f12813aaed8822bb5bc56e97cbbfa92171d/config/v1/types_cluster_version.go#L40-L54 [channels]: https://docs.openshift.com/container-platform/4.3/updating/updating-cluster-between-minor.html#understanding-upgrade-channels_updating-cluster-between-minor [Cincinnati]: https://github.com/openshift/cincinnati/blob/master/docs/design/openshift.md diff --git a/go.mod b/go.mod index 81e2e449f..4941d7ebe 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,8 @@ module github.com/openshift/cluster-version-operator go 1.13 +replace golang.org/x/text => golang.org/x/text v0.3.3 + require ( github.com/blang/semver v3.5.0+incompatible github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 12db6f67c..7e68b5ff1 100644 --- a/go.sum +++ b/go.sum @@ -397,11 +397,8 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c h1:6Zx7DRlKXf79yfxuQ/7GqV3w2y7aDsk6bGg0MzF5RVU= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/install/0000_00_cluster-version-operator_03_deployment.yaml b/install/0000_00_cluster-version-operator_03_deployment.yaml index bc0ba3240..76ae8a980 100644 --- a/install/0000_00_cluster-version-operator_03_deployment.yaml +++ b/install/0000_00_cluster-version-operator_03_deployment.yaml @@ -52,6 +52,7 @@ spec: nodeSelector: node-role.kubernetes.io/master: "" priorityClassName: "system-cluster-critical" + terminationGracePeriodSeconds: 130 tolerations: - key: "node-role.kubernetes.io/master" operator: Exists diff --git a/lib/resourcemerge/core.go b/lib/resourcemerge/core.go index 155351930..aaad5b59f 100644 --- a/lib/resourcemerge/core.go +++ b/lib/resourcemerge/core.go @@ -164,6 +164,10 @@ func ensureProbePtr(modified *bool, existing **corev1.Probe, required *corev1.Pr func ensureProbe(modified *bool, existing *corev1.Probe, required corev1.Probe) { setInt32(modified, &existing.InitialDelaySeconds, required.InitialDelaySeconds) + setInt32(modified, &existing.TimeoutSeconds, required.TimeoutSeconds) + setInt32(modified, &existing.PeriodSeconds, required.PeriodSeconds) + setInt32(modified, &existing.SuccessThreshold, required.SuccessThreshold) + setInt32(modified, &existing.FailureThreshold, required.FailureThreshold) ensureProbeHandler(modified, &existing.Handler, required.Handler) } diff --git a/lib/resourcemerge/core_test.go b/lib/resourcemerge/core_test.go index d9cac7a2d..eda22e119 100644 --- a/lib/resourcemerge/core_test.go +++ b/lib/resourcemerge/core_test.go @@ -359,6 +359,98 @@ func TestEnsurePodSpec(t *testing.T) { }, }, }, + { + name: "modify container readiness probe", + existing: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + ReadinessProbe: &corev1.Probe{ + InitialDelaySeconds: 1, + TimeoutSeconds: 2, + PeriodSeconds: 3, + SuccessThreshold: 4, + FailureThreshold: 5, + }, + }, + }, + }, + input: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + ReadinessProbe: &corev1.Probe{ + InitialDelaySeconds: 7, + TimeoutSeconds: 8, + PeriodSeconds: 9, + SuccessThreshold: 10, + FailureThreshold: 11, + }, + }, + }, + }, + expectedModified: true, + expected: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + ReadinessProbe: &corev1.Probe{ + InitialDelaySeconds: 7, + TimeoutSeconds: 8, + PeriodSeconds: 9, + SuccessThreshold: 10, + FailureThreshold: 11, + }, + }, + }, + }, + }, + { + name: "modify container liveness probe", + existing: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + LivenessProbe: &corev1.Probe{ + InitialDelaySeconds: 1, + TimeoutSeconds: 2, + PeriodSeconds: 3, + SuccessThreshold: 4, + FailureThreshold: 5, + }, + }, + }, + }, + input: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + LivenessProbe: &corev1.Probe{ + InitialDelaySeconds: 7, + TimeoutSeconds: 8, + PeriodSeconds: 9, + SuccessThreshold: 10, + FailureThreshold: 11, + }, + }, + }, + }, + expectedModified: true, + expected: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + LivenessProbe: &corev1.Probe{ + InitialDelaySeconds: 7, + TimeoutSeconds: 8, + PeriodSeconds: 9, + SuccessThreshold: 10, + FailureThreshold: 11, + }, + }, + }, + }, + }, } for _, test := range tests { diff --git a/pkg/autoupdate/autoupdate.go b/pkg/autoupdate/autoupdate.go index 2b6937802..6e74634b3 100644 --- a/pkg/autoupdate/autoupdate.go +++ b/pkg/autoupdate/autoupdate.go @@ -87,7 +87,7 @@ func New( } // Run runs the autoupdate controller. -func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { +func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer ctrl.queue.ShutDown() @@ -95,15 +95,16 @@ func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { defer klog.Info("Shutting down AutoUpdateController") if !cache.WaitForCacheSync(stopCh, ctrl.cacheSynced...) { - klog.Info("Caches never synchronized") - return + return fmt.Errorf("caches never synchronized") } for i := 0; i < workers; i++ { + // FIXME: actually wait until these complete if the Context is canceled. And possibly add utilruntime.HandleCrash. go wait.Until(ctrl.worker, time.Second, stopCh) } <-stopCh + return nil } func (ctrl *Controller) eventHandler() cache.ResourceEventHandler { diff --git a/pkg/cvo/availableupdates.go b/pkg/cvo/availableupdates.go index 66dd6d6bb..72aaa8ea5 100644 --- a/pkg/cvo/availableupdates.go +++ b/pkg/cvo/availableupdates.go @@ -2,7 +2,6 @@ package cvo import ( "crypto/tls" - "crypto/x509" "fmt" "net/url" "runtime" @@ -11,7 +10,6 @@ import ( "github.com/blang/semver" "github.com/google/uuid" "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" @@ -197,54 +195,3 @@ func calculateAvailableUpdatesStatus(clusterID string, proxyURL *url.URL, tlsCon LastTransitionTime: metav1.Now(), } } - -// getHTTPSProxyURL returns a url.URL object for the configured -// https proxy only. It can be nil if does not exist or there is an error. -func (optr *Operator) getHTTPSProxyURL() (*url.URL, string, error) { - proxy, err := optr.proxyLister.Get("cluster") - - if errors.IsNotFound(err) { - return nil, "", nil - } - if err != nil { - return nil, "", err - } - - if &proxy.Spec != nil { - if proxy.Spec.HTTPSProxy != "" { - proxyURL, err := url.Parse(proxy.Spec.HTTPSProxy) - if err != nil { - return nil, "", err - } - return proxyURL, proxy.Spec.TrustedCA.Name, nil - } - } - return nil, "", nil -} - -func (optr *Operator) getTLSConfig(cmNameRef string) (*tls.Config, error) { - cm, err := optr.cmConfigLister.Get(cmNameRef) - - if err != nil { - return nil, err - } - - certPool, _ := x509.SystemCertPool() - if certPool == nil { - certPool = x509.NewCertPool() - } - - if cm.Data["ca-bundle.crt"] != "" { - if ok := certPool.AppendCertsFromPEM([]byte(cm.Data["ca-bundle.crt"])); !ok { - return nil, fmt.Errorf("unable to add ca-bundle.crt certificates") - } - } else { - return nil, nil - } - - config := &tls.Config{ - RootCAs: certPool, - } - - return config, nil -} diff --git a/pkg/cvo/cvo.go b/pkg/cvo/cvo.go index c0f3a315e..4bafcf6da 100644 --- a/pkg/cvo/cvo.go +++ b/pkg/cvo/cvo.go @@ -169,7 +169,6 @@ func New( proxyInformer configinformersv1.ProxyInformer, client clientset.Interface, kubeClient kubernetes.Interface, - enableMetrics bool, exclude string, ) *Operator { eventBroadcaster := record.NewBroadcaster() @@ -214,11 +213,6 @@ func New( // make sure this is initialized after all the listers are initialized optr.upgradeableChecks = optr.defaultUpgradeableChecks() - if enableMetrics { - if err := optr.registerMetrics(coInformer.Informer()); err != nil { - panic(err) - } - } return optr } @@ -321,8 +315,7 @@ func loadConfigMapVerifierDataFromUpdate(update *payload.Update, clientBuilder v } // Run runs the cluster version operator until stopCh is completed. Workers is ignored for now. -func (optr *Operator) Run(ctx context.Context, workers int) { - defer utilruntime.HandleCrash() +func (optr *Operator) Run(ctx context.Context, workers int) error { defer optr.queue.ShutDown() stopCh := ctx.Done() workerStopCh := make(chan struct{}) @@ -331,8 +324,7 @@ func (optr *Operator) Run(ctx context.Context, workers int) { defer klog.Info("Shutting down ClusterVersionOperator") if !cache.WaitForCacheSync(stopCh, optr.cacheSynced...) { - klog.Info("Caches never synchronized") - return + return fmt.Errorf("caches never synchronized: %w", ctx.Err()) } // trigger the first cluster version reconcile always @@ -361,6 +353,8 @@ func (optr *Operator) Run(ctx context.Context, workers int) { // stop the queue, then wait for the worker to exit optr.queue.ShutDown() <-workerStopCh + + return nil } func (optr *Operator) queueKey() string { @@ -472,7 +466,10 @@ func (optr *Operator) sync(key string) error { // handle the case of a misconfigured CVO by doing nothing if len(desired.Image) == 0 { return optr.syncStatus(original, config, &SyncWorkerStatus{ - Failure: fmt.Errorf("No configured operator version, unable to update cluster"), + Failure: &payload.UpdateError{ + Reason: "NoDesiredImage", + Message: "No configured operator version, unable to update cluster", + }, }, errs) } diff --git a/pkg/cvo/cvo_scenarios_test.go b/pkg/cvo/cvo_scenarios_test.go index ed2def9d6..8b6f8c133 100644 --- a/pkg/cvo/cvo_scenarios_test.go +++ b/pkg/cvo/cvo_scenarios_test.go @@ -165,8 +165,8 @@ func TestCVO_StartupAndSync(t *testing.T) { Conditions: []configv1.ClusterOperatorStatusCondition{ {Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse}, // report back to the user that we don't have enough info to proceed - {Type: ClusterStatusFailing, Status: configv1.ConditionTrue, Message: "No configured operator version, unable to update cluster"}, - {Type: configv1.OperatorProgressing, Status: configv1.ConditionTrue, Message: "Unable to apply : an error occurred"}, + {Type: ClusterStatusFailing, Status: configv1.ConditionTrue, Reason: "NoDesiredImage", Message: "No configured operator version, unable to update cluster"}, + {Type: configv1.OperatorProgressing, Status: configv1.ConditionTrue, Reason: "NoDesiredImage", Message: "Unable to apply : an unknown error has occurred: NoDesiredImage"}, {Type: configv1.RetrievedUpdates, Status: configv1.ConditionFalse}, }, }, @@ -436,8 +436,8 @@ func TestCVO_StartupAndSyncUnverifiedPayload(t *testing.T) { Conditions: []configv1.ClusterOperatorStatusCondition{ {Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse}, // report back to the user that we don't have enough info to proceed - {Type: ClusterStatusFailing, Status: configv1.ConditionTrue, Message: "No configured operator version, unable to update cluster"}, - {Type: configv1.OperatorProgressing, Status: configv1.ConditionTrue, Message: "Unable to apply : an error occurred"}, + {Type: ClusterStatusFailing, Status: configv1.ConditionTrue, Reason: "NoDesiredImage", Message: "No configured operator version, unable to update cluster"}, + {Type: configv1.OperatorProgressing, Status: configv1.ConditionTrue, Reason: "NoDesiredImage", Message: "Unable to apply : an unknown error has occurred: NoDesiredImage"}, {Type: configv1.RetrievedUpdates, Status: configv1.ConditionFalse}, }, }, @@ -697,8 +697,8 @@ func TestCVO_StartupAndSyncPreconditionFailing(t *testing.T) { Conditions: []configv1.ClusterOperatorStatusCondition{ {Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse}, // report back to the user that we don't have enough info to proceed - {Type: ClusterStatusFailing, Status: configv1.ConditionTrue, Message: "No configured operator version, unable to update cluster"}, - {Type: configv1.OperatorProgressing, Status: configv1.ConditionTrue, Message: "Unable to apply : an error occurred"}, + {Type: ClusterStatusFailing, Status: configv1.ConditionTrue, Reason: "NoDesiredImage", Message: "No configured operator version, unable to update cluster"}, + {Type: configv1.OperatorProgressing, Status: configv1.ConditionTrue, Reason: "NoDesiredImage", Message: "Unable to apply : an unknown error has occurred: NoDesiredImage"}, {Type: configv1.RetrievedUpdates, Status: configv1.ConditionFalse}, }, }, diff --git a/pkg/cvo/egress.go b/pkg/cvo/egress.go new file mode 100644 index 000000000..75cfa607c --- /dev/null +++ b/pkg/cvo/egress.go @@ -0,0 +1,61 @@ +package cvo + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net/url" + + "k8s.io/apimachinery/pkg/api/errors" +) + +// getHTTPSProxyURL returns a url.URL object for the configured +// https proxy only. It can be nil if does not exist or there is an error. +func (optr *Operator) getHTTPSProxyURL() (*url.URL, string, error) { + proxy, err := optr.proxyLister.Get("cluster") + + if errors.IsNotFound(err) { + return nil, "", nil + } + if err != nil { + return nil, "", err + } + + if &proxy.Spec != nil { + if proxy.Spec.HTTPSProxy != "" { + proxyURL, err := url.Parse(proxy.Spec.HTTPSProxy) + if err != nil { + return nil, "", err + } + return proxyURL, proxy.Spec.TrustedCA.Name, nil + } + } + return nil, "", nil +} + +func (optr *Operator) getTLSConfig(cmNameRef string) (*tls.Config, error) { + cm, err := optr.cmConfigLister.Get(cmNameRef) + + if err != nil { + return nil, err + } + + certPool, _ := x509.SystemCertPool() + if certPool == nil { + certPool = x509.NewCertPool() + } + + if cm.Data["ca-bundle.crt"] != "" { + if ok := certPool.AppendCertsFromPEM([]byte(cm.Data["ca-bundle.crt"])); !ok { + return nil, fmt.Errorf("unable to add ca-bundle.crt certificates") + } + } else { + return nil, nil + } + + config := &tls.Config{ + RootCAs: certPool, + } + + return config, nil +} diff --git a/pkg/cvo/metrics.go b/pkg/cvo/metrics.go index db0332869..34f92203f 100644 --- a/pkg/cvo/metrics.go +++ b/pkg/cvo/metrics.go @@ -1,21 +1,28 @@ package cvo import ( + "context" + "net" + "net/http" "time" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/cache" + "k8s.io/klog" configv1 "github.com/openshift/api/config/v1" "github.com/openshift/cluster-version-operator/lib/resourcemerge" "github.com/openshift/cluster-version-operator/pkg/internal" ) -func (optr *Operator) registerMetrics(coInformer cache.SharedInformer) error { +// RegisterMetrics initializes metrics and registers them with the +// Prometheus implementation. +func (optr *Operator) RegisterMetrics(coInformer cache.SharedInformer) error { m := newOperatorMetrics(optr) coInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: m.clusterOperatorChanged, @@ -86,6 +93,67 @@ version for 'cluster', or empty for 'initial'. } } +// RunMetrics launches an server bound to listenAddress serving +// Prometheus metrics at /metrics over HTTP. Continues serving until +// runContext.Done() and then attempts a clean shutdown limited by +// shutdownContext.Done(). Assumes runContext.Done() occurs before or +// simultaneously with shutdownContext.Done(). +func RunMetrics(runContext context.Context, shutdownContext context.Context, listenAddress string) error { + handler := http.NewServeMux() + handler.Handle("/metrics", promhttp.Handler()) + server := &http.Server{ + Handler: handler, + } + + errorChannel := make(chan error, 1) + errorChannelCount := 1 + go func() { + tcpListener, err := net.Listen("tcp", listenAddress) + if err != nil { + errorChannel <- err + return + } + + klog.Infof("Metrics port listening for HTTP on %v", listenAddress) + + errorChannel <- server.Serve(tcpListener) + }() + + shutdown := false + var loopError error + for errorChannelCount > 0 { + if shutdown { + err := <-errorChannel + errorChannelCount-- + if err != nil && err != http.ErrServerClosed { + if loopError == nil { + loopError = err + } else if err != nil { // log the error we are discarding + klog.Errorf("Failed to gracefully shut down metrics server: %s", err) + } + } + } else { + select { + case <-runContext.Done(): // clean shutdown + case err := <-errorChannel: // crashed before a shutdown was requested + errorChannelCount-- + if err != nil && err != http.ErrServerClosed { + loopError = err + } + } + shutdown = true + shutdownError := server.Shutdown(shutdownContext) + if loopError == nil { + loopError = shutdownError + } else if shutdownError != nil { // log the error we are discarding + klog.Errorf("Failed to gracefully shut down metrics server: %s", shutdownError) + } + } + } + + return loopError +} + type conditionKey struct { Name string Type string diff --git a/pkg/cvo/status.go b/pkg/cvo/status.go index 65e5df406..755f2fd55 100644 --- a/pkg/cvo/status.go +++ b/pkg/cvo/status.go @@ -331,13 +331,13 @@ func (optr *Operator) syncStatus(original, config *configv1.ClusterVersion, stat // convertErrorToProgressing returns true if the provided status indicates a failure condition can be interpreted as // still making internal progress. The general error we try to suppress is an operator or operators still being -// unavailable AND the general payload task making progress towards its goal. An operator is given 10 minutes since +// unavailable AND the general payload task making progress towards its goal. An operator is given 20 minutes since // its last update to go ready, or an hour has elapsed since the update began, before the condition is ignored. func convertErrorToProgressing(history []configv1.UpdateHistory, now time.Time, status *SyncWorkerStatus) (reason string, message string, ok bool) { if len(history) == 0 || status.Failure == nil || status.Reconciling || status.LastProgress.IsZero() { return "", "", false } - if now.Sub(status.LastProgress) > 10*time.Minute || now.Sub(history[0].StartedTime.Time) > time.Hour { + if now.Sub(status.LastProgress) > 20*time.Minute || now.Sub(history[0].StartedTime.Time) > time.Hour { return "", "", false } uErr, ok := status.Failure.(*payload.UpdateError) diff --git a/pkg/cvo/sync_worker.go b/pkg/cvo/sync_worker.go index d03ea2d92..0e8a7db54 100644 --- a/pkg/cvo/sync_worker.go +++ b/pkg/cvo/sync_worker.go @@ -626,8 +626,8 @@ func (w *SyncWorker) apply(ctx context.Context, payloadUpdate *payload.Update, w if precreateObjects { payload.RunGraph(ctx, graph, 8, func(ctx context.Context, tasks []*payload.Task) error { for _, task := range tasks { - if contextIsCancelled(ctx) { - return cr.CancelError() + if err := ctx.Err(); err != nil { + return cr.ContextError(err) } if task.Manifest.GVK != configv1.SchemeGroupVersion.WithKind("ClusterOperator") { continue @@ -645,8 +645,8 @@ func (w *SyncWorker) apply(ctx context.Context, payloadUpdate *payload.Update, w // update each object errs := payload.RunGraph(ctx, graph, maxWorkers, func(ctx context.Context, tasks []*payload.Task) error { for _, task := range tasks { - if contextIsCancelled(ctx) { - return cr.CancelError() + if err := ctx.Err(); err != nil { + return cr.ContextError(err) } cr.Update() @@ -668,8 +668,10 @@ func (w *SyncWorker) apply(ctx context.Context, payloadUpdate *payload.Update, w return nil }) if len(errs) > 0 { - err := cr.Errors(errs) - return err + if err := cr.Errors(errs); err != nil { + return err + } + return errs[0] } // update the status @@ -690,11 +692,11 @@ func init() { ) } -type errCanceled struct { +type errContext struct { err error } -func (e errCanceled) Error() string { return e.err.Error() } +func (e errContext) Error() string { return e.err.Error() } // consistentReporter hides the details of calculating the status based on the progress // of the graph runner. @@ -731,7 +733,7 @@ func (r *consistentReporter) Error(err error) { copied := r.status copied.Step = "ApplyResources" copied.Fraction = float32(r.done) / float32(r.total) - if !isCancelledError(err) { + if !isContextError(err) { copied.Failure = err } r.reporter.Report(copied) @@ -752,10 +754,10 @@ func (r *consistentReporter) Errors(errs []error) error { return err } -func (r *consistentReporter) CancelError() error { +func (r *consistentReporter) ContextError(err error) error { r.lock.Lock() defer r.lock.Unlock() - return errCanceled{fmt.Errorf("update was cancelled at %d of %d", r.done, r.total)} + return errContext{fmt.Errorf("update %s at %d of %d", err, r.done, r.total)} } func (r *consistentReporter) Complete() { @@ -771,11 +773,11 @@ func (r *consistentReporter) Complete() { r.reporter.Report(copied) } -func isCancelledError(err error) bool { +func isContextError(err error) bool { if err == nil { return false } - _, ok := err.(errCanceled) + _, ok := err.(errContext) return ok } @@ -796,11 +798,12 @@ func isImageVerificationError(err error) bool { // not truly an error (cancellation). // TODO: take into account install vs upgrade func summarizeTaskGraphErrors(errs []error) error { - // we ignore cancellation errors since they don't provide good feedback to users and are an internal - // detail of the server - err := errors.FilterOut(errors.NewAggregate(errs), isCancelledError) + // we ignore context errors (canceled or timed out) since they don't + // provide good feedback to users and are an internal detail of the + // server + err := errors.FilterOut(errors.NewAggregate(errs), isContextError) if err == nil { - klog.V(4).Infof("All errors were cancellation errors: %v", errs) + klog.V(4).Infof("All errors were context errors: %v", errs) return nil } agg, ok := err.(errors.Aggregate) @@ -967,16 +970,6 @@ func ownerRefModifier(config *configv1.ClusterVersion) resourcebuilder.MetaV1Obj } } -// contextIsCancelled returns true if the provided context is cancelled. -func contextIsCancelled(ctx context.Context) bool { - select { - case <-ctx.Done(): - return true - default: - return false - } -} - // runThrottledStatusNotifier invokes fn every time ch is updated, but no more often than once // every interval. If bucket is non-zero then the channel is throttled like a rate limiter bucket. func runThrottledStatusNotifier(stopCh <-chan struct{}, interval time.Duration, bucket int, ch <-chan SyncWorkerStatus, fn func()) { diff --git a/pkg/start/start.go b/pkg/start/start.go index c56436f23..97befa3b3 100644 --- a/pkg/start/start.go +++ b/pkg/start/start.go @@ -6,17 +6,15 @@ import ( "context" "fmt" "math/rand" - "net/http" "os" "os/signal" - "sync" "syscall" "time" "github.com/google/uuid" - "github.com/prometheus/client_golang/prometheus/promhttp" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -67,10 +65,14 @@ type Options struct { Name string Namespace string PayloadOverride string - EnableMetrics bool ResyncInterval time.Duration } +type asyncResult struct { + name string + error error +} + func defaultEnv(name, defaultValue string) string { env, ok := os.LookupEnv(name) if !ok { @@ -91,12 +93,11 @@ func NewOptions() *Options { Name: defaultEnv("CVO_NAME", defaultComponentName), PayloadOverride: os.Getenv("PAYLOAD_OVERRIDE"), ResyncInterval: minResyncPeriod, - EnableMetrics: true, Exclude: os.Getenv("EXCLUDE_MANIFESTS"), } } -func (o *Options) Run() error { +func (o *Options) Run(ctx context.Context) error { if o.NodeName == "" { return fmt.Errorf("node-name is required") } @@ -126,90 +127,127 @@ func (o *Options) Run() error { return err } - // TODO: Kube 1.14 will contain a ReleaseOnCancel boolean on - // LeaderElectionConfig that allows us to have the lock code - // release the lease when this context is cancelled. At that - // time we can remove our changes to OnStartedLeading. - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + o.run(ctx, controllerCtx, lock) + return nil +} + +// run launches a number of goroutines to handle manifest application, +// metrics serving, etc. It continues operating until ctx.Done(), +// and then attempts a clean shutdown limited by an internal context +// with a two-minute cap. It returns after it successfully collects all +// launched goroutines. +func (o *Options) run(ctx context.Context, controllerCtx *Context, lock *resourcelock.ConfigMapLock) { + runContext, runCancel := context.WithCancel(ctx) // so we can cancel internally on errors or TERM + defer runCancel() + shutdownContext, shutdownCancel := context.WithCancel(context.Background()) // extends beyond ctx + defer shutdownCancel() + postMainContext, postMainCancel := context.WithCancel(context.Background()) // extends beyond ctx + defer postMainCancel() + ch := make(chan os.Signal, 1) defer func() { signal.Stop(ch) }() signal.Notify(ch, os.Interrupt, syscall.SIGTERM) go func() { + defer utilruntime.HandleCrash() sig := <-ch klog.Infof("Shutting down due to %s", sig) - cancel() - - // exit after 2s no matter what - select { - case <-time.After(5 * time.Second): - klog.Fatalf("Exiting") - case <-ch: - klog.Fatalf("Received shutdown signal twice, exiting") - } + runCancel() + sig = <-ch + klog.Fatalf("Received shutdown signal twice, exiting: %s", sig) }() - o.run(ctx, controllerCtx, lock) - return nil -} - -func (o *Options) run(ctx context.Context, controllerCtx *Context, lock *resourcelock.ConfigMapLock) { - // listen on metrics - if len(o.ListenAddr) > 0 { - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.Handler()) + resultChannel := make(chan asyncResult, 1) + resultChannelCount := 0 + if o.ListenAddr != "" { + resultChannelCount++ go func() { - if err := http.ListenAndServe(o.ListenAddr, mux); err != nil { - klog.Fatalf("Unable to start metrics server: %v", err) - } + defer utilruntime.HandleCrash() + err := cvo.RunMetrics(postMainContext, shutdownContext, o.ListenAddr) + resultChannel <- asyncResult{name: "metrics server", error: err} }() } - exit := make(chan struct{}) - exitClose := sync.Once{} - - // TODO: when we switch to graceful lock shutdown, this can be - // moved back inside RunOrDie - // TODO: properly wire ctx here - go leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{ - Lock: lock, - LeaseDuration: leaseDuration, - RenewDeadline: renewDeadline, - RetryPeriod: retryPeriod, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(localCtx context.Context) { - controllerCtx.Start(ctx) - select { - case <-ctx.Done(): - // WARNING: this is not completely safe until we have Kube 1.14 and ReleaseOnCancel - // and client-go ContextCancelable, which allows us to block new API requests before - // we step down. However, the CVO isn't that sensitive to races and can tolerate - // brief overlap. - klog.Infof("Stepping down as leader") - // give the controllers some time to shut down - time.Sleep(100 * time.Millisecond) - // if we still hold the leader lease, clear the owner identity (other lease watchers - // still have to wait for expiration) like the new ReleaseOnCancel code will do. - if err := lock.Update(resourcelock.LeaderElectionRecord{}); err == nil { - // if we successfully clear the owner identity, we can safely delete the record - if err := lock.Client.ConfigMaps(lock.ConfigMapMeta.Namespace).Delete(lock.ConfigMapMeta.Name, nil); err != nil { - klog.Warningf("Unable to step down cleanly: %v", err) - } + informersDone := postMainContext.Done() + // FIXME: would be nice if there was a way to collect these. + controllerCtx.CVInformerFactory.Start(informersDone) + controllerCtx.OpenshiftConfigInformerFactory.Start(informersDone) + controllerCtx.InformerFactory.Start(informersDone) + + resultChannelCount++ + go func() { + defer utilruntime.HandleCrash() + leaderelection.RunOrDie(postMainContext, leaderelection.LeaderElectionConfig{ + Lock: lock, + ReleaseOnCancel: true, + LeaseDuration: leaseDuration, + RenewDeadline: renewDeadline, + RetryPeriod: retryPeriod, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(_ context.Context) { // no need for this passed-through postMainContext, because goroutines we launch inside will use runContext + resultChannelCount++ + go func() { + defer utilruntime.HandleCrash() + err := controllerCtx.CVO.Run(runContext, 2) + resultChannel <- asyncResult{name: "main operator", error: err} + }() + + if controllerCtx.AutoUpdate != nil { + resultChannelCount++ + go func() { + defer utilruntime.HandleCrash() + err := controllerCtx.AutoUpdate.Run(2, runContext.Done()) + resultChannel <- asyncResult{name: "auto-update controller", error: err} + }() } - klog.Infof("Finished shutdown") - exitClose.Do(func() { close(exit) }) - case <-localCtx.Done(): - // we will exit in OnStoppedLeading - } + }, + OnStoppedLeading: func() { + klog.Info("Stopped leading; shutting down.") + runCancel() + }, }, - OnStoppedLeading: func() { - klog.Warning("leaderelection lost") - exitClose.Do(func() { close(exit) }) - }, - }, - }) + }) + resultChannel <- asyncResult{name: "leader controller", error: nil} + }() - <-exit + var shutdownTimer *time.Timer + for resultChannelCount > 0 { + klog.Infof("Waiting on %d outstanding goroutines.", resultChannelCount) + if shutdownTimer == nil { // running + select { + case <-runContext.Done(): + klog.Info("Run context completed; beginning two-minute graceful shutdown period.") + shutdownTimer = time.NewTimer(2 * time.Minute) + case result := <-resultChannel: + resultChannelCount-- + if result.error == nil { + klog.Infof("Collected %s goroutine.", result.name) + } else { + klog.Errorf("Collected %s goroutine: %v", result.name, result.error) + runCancel() // this will cause shutdownTimer initialization in the next loop + } + if result.name == "main operator" { + postMainCancel() + } + } + } else { // shutting down + select { + case <-shutdownTimer.C: // never triggers after the channel is stopped, although it would not matter much if it did because subsequent cancel calls do nothing. + shutdownCancel() + shutdownTimer.Stop() + case result := <-resultChannel: + resultChannelCount-- + if result.error == nil { + klog.Infof("Collected %s goroutine.", result.name) + } else { + klog.Errorf("Collected %s goroutine: %v", result.name, result.error) + } + if result.name == "main operator" { + postMainCancel() + } + } + } + } + klog.Info("Finished collecting operator goroutines.") } // createResourceLock initializes the lock. @@ -327,6 +365,7 @@ func (o *Options) NewControllerContext(cb *ClientBuilder) *Context { sharedInformers := externalversions.NewSharedInformerFactory(client, resyncPeriod(o.ResyncInterval)()) + coInformer := sharedInformers.Config().V1().ClusterOperators() ctx := &Context{ CVInformerFactory: cvInformer, OpenshiftConfigInformerFactory: openshiftConfigInformer, @@ -340,12 +379,11 @@ func (o *Options) NewControllerContext(cb *ClientBuilder) *Context { o.PayloadOverride, resyncPeriod(o.ResyncInterval)(), cvInformer.Config().V1().ClusterVersions(), - sharedInformers.Config().V1().ClusterOperators(), + coInformer, openshiftConfigInformer.Core().V1().ConfigMaps(), sharedInformers.Config().V1().Proxies(), cb.ClientOrDie(o.Namespace), cb.KubeClientOrDie(o.Namespace, useProtobuf), - o.EnableMetrics, o.Exclude, ), } @@ -358,18 +396,10 @@ func (o *Options) NewControllerContext(cb *ClientBuilder) *Context { cb.KubeClientOrDie(o.Namespace), ) } - return ctx -} - -// Start launches the controllers in the provided context and any supporting -// infrastructure. When ch is closed the controllers will be shut down. -func (c *Context) Start(ctx context.Context) { - ch := ctx.Done() - go c.CVO.Run(ctx, 2) - if c.AutoUpdate != nil { - go c.AutoUpdate.Run(2, ch) + if o.ListenAddr != "" { + if err := ctx.CVO.RegisterMetrics(coInformer.Informer()); err != nil { + panic(err) + } } - c.CVInformerFactory.Start(ch) - c.OpenshiftConfigInformerFactory.Start(ch) - c.InformerFactory.Start(ch) + return ctx } diff --git a/pkg/start/start_integration_test.go b/pkg/start/start_integration_test.go index 446fba9d4..de8e1d7ca 100644 --- a/pkg/start/start_integration_test.go +++ b/pkg/start/start_integration_test.go @@ -238,15 +238,19 @@ func TestIntegrationCVO_initializeAndUpgrade(t *testing.T) { options.NodeName = "test-node" options.ReleaseImage = payloadImage1 options.PayloadOverride = filepath.Join(dir, "ignored") - options.EnableMetrics = false controllers := options.NewControllerContext(cb) worker := cvo.NewSyncWorker(retriever, cvo.NewResourceBuilder(cfg, cfg, nil), 5*time.Second, wait.Backoff{Steps: 3}, "") controllers.CVO.SetSyncWorkerForTesting(worker) + lock, err := createResourceLock(cb, options.Namespace, options.Name) + if err != nil { + t.Fatal(err) + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() - controllers.Start(ctx) + go options.run(ctx, controllers, lock) t.Logf("wait until we observe the cluster version become available") lastCV, err := waitForUpdateAvailable(t, client, ns, false, "0.0.1") @@ -390,16 +394,20 @@ func TestIntegrationCVO_initializeAndHandleError(t *testing.T) { options.NodeName = "test-node" options.ReleaseImage = payloadImage1 options.PayloadOverride = filepath.Join(dir, "ignored") - options.EnableMetrics = false options.ResyncInterval = 3 * time.Second controllers := options.NewControllerContext(cb) worker := cvo.NewSyncWorker(retriever, cvo.NewResourceBuilder(cfg, cfg, nil), 5*time.Second, wait.Backoff{Duration: time.Second, Factor: 1.2}, "") controllers.CVO.SetSyncWorkerForTesting(worker) + lock, err := createResourceLock(cb, options.Namespace, options.Name) + if err != nil { + t.Fatal(err) + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() - controllers.Start(ctx) + go options.run(ctx, controllers, lock) t.Logf("wait until we observe the cluster version become available") lastCV, err := waitForUpdateAvailable(t, client, ns, false, "0.0.1") @@ -497,13 +505,12 @@ func TestIntegrationCVO_gracefulStepDown(t *testing.T) { options.Name = ns options.ListenAddr = "" options.NodeName = "test-node" - options.EnableMetrics = false controllers := options.NewControllerContext(cb) worker := cvo.NewSyncWorker(&mapPayloadRetriever{}, cvo.NewResourceBuilder(cfg, cfg, nil), 5*time.Second, wait.Backoff{Steps: 3}, "") controllers.CVO.SetSyncWorkerForTesting(worker) - lock, err := createResourceLock(cb, ns, ns) + lock, err := createResourceLock(cb, options.Namespace, options.Name) if err != nil { t.Fatal(err) } @@ -519,7 +526,7 @@ func TestIntegrationCVO_gracefulStepDown(t *testing.T) { // wait until the lock record exists err = wait.PollImmediate(200*time.Millisecond, 60*time.Second, func() (bool, error) { - _, err := kc.CoreV1().ConfigMaps(ns).Get(ns, metav1.GetOptions{}) + _, _, err := lock.Get() if err != nil { if errors.IsNotFound(err) { return false, nil @@ -541,26 +548,26 @@ func TestIntegrationCVO_gracefulStepDown(t *testing.T) { t.Fatalf("no leader election events found in\n%#v", events.Items) } - t.Logf("after the context is closed, the lock record should be deleted quickly") + t.Logf("after the context is closed, the lock should be released quickly") cancel() startTime := time.Now() var endTime time.Time // the lock should be deleted immediately err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { - _, err := kc.CoreV1().ConfigMaps(ns).Get(ns, metav1.GetOptions{}) - if errors.IsNotFound(err) { - endTime = time.Now() - return true, nil - } + electionRecord, _, err := lock.Get() if err != nil { + if errors.IsNotFound(err) { + return false, nil + } return false, err } - return false, nil + endTime = time.Now() + return electionRecord.HolderIdentity == "", nil }) if err != nil { t.Fatal(err) } - t.Logf("lock deleted in %s", endTime.Sub(startTime)) + t.Logf("lock released in %s", endTime.Sub(startTime)) select { case <-time.After(time.Second): @@ -667,7 +674,6 @@ metadata: options.NodeName = "test-node" options.ReleaseImage = payloadImage1 options.PayloadOverride = payloadDir - options.EnableMetrics = false controllers := options.NewControllerContext(cb) if err := controllers.CVO.InitializeFromPayload(cb.RestConfig(defaultQPS), cb.RestConfig(highQPS)); err != nil { t.Fatal(err) @@ -676,9 +682,14 @@ metadata: worker := cvo.NewSyncWorker(retriever, cvo.NewResourceBuilder(cfg, cfg, nil), 5*time.Second, wait.Backoff{Steps: 3}, "") controllers.CVO.SetSyncWorkerForTesting(worker) + lock, err := createResourceLock(cb, options.Namespace, options.Name) + if err != nil { + t.Fatal(err) + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() - controllers.Start(ctx) + go options.run(ctx, controllers, lock) t.Logf("wait until we observe the cluster version become available") lastCV, err := waitForUpdateAvailable(t, client, ns, false, "0.0.1") diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go index 520b9ada0..48ec64b40 100644 --- a/vendor/golang.org/x/text/transform/transform.go +++ b/vendor/golang.org/x/text/transform/transform.go @@ -648,7 +648,8 @@ func String(t Transformer, s string) (result string, n int, err error) { // Transform the remaining input, growing dst and src buffers as necessary. for { n := copy(src, s[pSrc:]) - nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) + atEOF := pSrc+n == len(s) + nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], atEOF) pDst += nDst pSrc += nSrc @@ -659,6 +660,9 @@ func String(t Transformer, s string) (result string, n int, err error) { dst = grow(dst, pDst) } } else if err == ErrShortSrc { + if atEOF { + return string(dst[:pDst]), pSrc, err + } if nSrc == 0 { src = grow(src, 0) } diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 48d144008..50deb6600 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -480,15 +480,15 @@ func (s *isolatingRunSequence) resolveWeakTypes() { // Rule W1. // Changes all NSMs. - preceedingCharacterType := s.sos + precedingCharacterType := s.sos for i, t := range s.types { if t == NSM { - s.types[i] = preceedingCharacterType + s.types[i] = precedingCharacterType } else { if t.in(LRI, RLI, FSI, PDI) { - preceedingCharacterType = ON + precedingCharacterType = ON } - preceedingCharacterType = t + precedingCharacterType = t } } diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 022e3c690..16b11db53 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.13 +// +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index 7297cce32..2c58f09ba 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.13 +// +build go1.13,!go1.14 package norm diff --git a/vendor/modules.txt b/vendor/modules.txt index 5d5ed1be1..50742934f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -109,7 +109,7 @@ golang.org/x/oauth2/internal # golang.org/x/sys v0.0.0-20191003212358-c178f38b412c golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.3.2 +# golang.org/x/text v0.3.2 => golang.org/x/text v0.3.3 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi