diff --git a/pkg/controller/clustersync/clustersync_controller.go b/pkg/controller/clustersync/clustersync_controller.go index c424848357a..c2b70a4411c 100644 --- a/pkg/controller/clustersync/clustersync_controller.go +++ b/pkg/controller/clustersync/clustersync_controller.go @@ -924,16 +924,17 @@ func applyToTargetCluster( return err } + logger.WithField("uid", obj.GetUID()).WithField("resource_version", obj.GetResourceVersion()).Info("attempting to apply resource") applyResult, err := applyFn(bytes) // Record the amount of time we took to apply this specific resource. When combined with the metric for duration of // our kube client requests, we can get an idea how much time we're spending cpu bound vs network bound. applyTime := metav1.Now().Sub(startTime).Seconds() if err != nil { - logger.WithError(err).Warn("error applying resource") + logger.WithError(err).WithField("uid", obj.GetUID()).WithField("resource_version", obj.GetResourceVersion()).WithField("applyTime", applyTime).Warn("error applying resource") metricResourcesApplied.WithLabelValues(applyFnMetricLabel, metricResultError).Inc() metricTimeToApplySyncSetResource.WithLabelValues(applyFnMetricLabel, metricResultError).Observe(applyTime) } else { - logger.WithField("applyResult", applyResult).Debug("resource applied") + logger.WithField("applyResult", applyResult).WithField("uid", obj.GetUID()).WithField("resource_version", obj.GetResourceVersion()).WithField("applyTime", applyTime).Info("resource applied") metricResourcesApplied.WithLabelValues(applyFnMetricLabel, metricResultSuccess).Inc() metricTimeToApplySyncSetResource.WithLabelValues(applyFnMetricLabel, metricResultSuccess).Observe(applyTime) }