From 233c14556c4eac6e5008f7bab535b5fa6ceb17fb Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Sun, 31 Mar 2024 17:56:27 -0700 Subject: [PATCH 01/14] Operator 1.0.0-alpha Co-authored-by: Qi Tan --- .editorconfig | 2 + .gitattributes | 8 + .github/.licenserc.yaml | 6 + .github/workflows/build_and_test.yml | 18 +- .gitignore | 67 +- Dockerfile | 47 + LICENSE | 201 + README.md | 64 +- .../spark-kubernetes-operator/.helmignore | 23 + .../helm/spark-kubernetes-operator/Chart.yaml | 25 + .../conf/log4j2.properties | 52 + .../conf/spark-operator.properties | 22 + .../sparkapplications.org.apache.spark-v1.yml | 6947 +++++++++++++++++ .../templates/_helpers.tpl | 155 + .../templates/rbac.yaml | 124 + .../templates/spark-operator.yaml | 211 + .../templates/sparkapps-resource.yaml | 216 + .../spark-kubernetes-operator/values.yaml | 176 + build.gradle | 87 + config/checkstyle/checkstyle-suppressions.xml | 63 + config/checkstyle/checkstyle.xml | 195 + config/pmd/ruleset.xml | 33 + config/spotbugs/exclude.xml | 25 + dev/.rat-excludes | 5 + docker-entrypoint.sh | 36 + .../spark_3_4_1/pyspark-example.yaml | 31 + .../spark_3_4_1/spark-pi_scala_2.12.yaml | 33 + .../spark_3_4_1/sparkr-example.yaml | 46 + .../spark_3_5_0/pyspark-example.yaml | 32 + .../spark_3_5_0/spark-pi_scala_2.12.yaml | 34 + .../spark_3_5_0/sparkr-example.yaml | 46 + .../spark_3_5_1/pyspark-example.yaml | 31 + .../spark_3_5_1/spark-pi_scala_2.12.yaml | 33 + .../spark_3_5_1/sparkr-example.yaml | 46 + gradle.properties | 45 + gradle/wrapper/gradle-wrapper.jar | Bin 0 -> 43462 bytes gradle/wrapper/gradle-wrapper.properties | 7 + gradlew | 249 + gradlew.bat | 92 + settings.gradle | 23 + spark-operator-api/build.gradle | 52 + .../kubernetes/operator/BaseResource.java | 31 + .../spark/kubernetes/operator/Constants.java | 81 + .../kubernetes/operator/SparkApplication.java | 51 + .../operator/SparkApplicationList.java | 26 + .../decorators/ResourceDecorator.java | 25 + .../kubernetes/operator/diff/Diffable.java | 22 + .../operator/spec/ApplicationSpec.java | 57 + .../spec/ApplicationTimeoutConfig.java | 45 + .../operator/spec/ApplicationTolerations.java | 52 + .../spec/BaseApplicationTemplateSpec.java | 37 + .../kubernetes/operator/spec/BaseSpec.java | 34 + .../operator/spec/DeploymentMode.java | 24 + .../operator/spec/InstanceConfig.java | 66 + .../kubernetes/operator/spec/JDKVersion.java | 24 + .../operator/spec/RestartConfig.java | 41 + .../operator/spec/RestartPolicy.java | 43 + .../operator/spec/RuntimeVersions.java | 40 + .../operator/spec/ScalaVersion.java | 24 + .../operator/spec/SparkVersion.java | 36 + .../status/ApplicationAttemptSummary.java | 40 + .../operator/status/ApplicationState.java | 51 + .../status/ApplicationStateSummary.java | 148 + .../operator/status/ApplicationStatus.java | 129 + .../operator/status/AttemptInfo.java | 41 + .../operator/status/BaseAttemptSummary.java | 30 + .../kubernetes/operator/status/BaseState.java | 36 + .../operator/status/BaseStateSummary.java | 25 + .../operator/status/BaseStatus.java | 66 + .../kubernetes/operator/utils/ModelUtils.java | 109 + .../src/main/resources/printer-columns.sh | 27 + .../status/ApplicationStatusTest.java | 47 + spark-operator-docs/.gitignore | 42 + spark-operator-docs/architecture.md | 64 + spark-operator-docs/configuration.md | 99 + spark-operator-docs/developer_guide.md | 84 + spark-operator-docs/getting_started.md | 190 + spark-operator-docs/metrics_logging.md | 110 + spark-operator-docs/operations.md | 122 + spark-operator-docs/operator_probes.md | 82 + spark-operator-docs/resources/prometheus.png | Bin 0 -> 184821 bytes spark-operator-docs/resources/state.png | Bin 0 -> 55173 bytes spark-operator-docs/spark_application.md | 212 + spark-operator-tests/.gitignore | 42 + spark-operator-tests/build.gradle | 30 + .../operator/AppSubmitToSucceedTest.java | 193 + .../kubernetes/operator/CancelAppTest.java | 23 + .../src/test/resources/EcsLayout.json | 49 + .../src/test/resources/log4j2.properties | 52 + spark-operator/.gitignore | 42 + spark-operator/build.gradle | 95 + .../kubernetes/operator/SparkOperator.java | 217 + .../client/KubernetesClientFactory.java | 72 + .../operator/client/RetryInterceptor.java | 104 + .../operator/config/ConfigOption.java | 104 + .../operator/config/SparkOperatorConf.java | 406 + .../config/SparkOperatorConfManager.java | 102 + .../SparkOperatorConfigMapReconciler.java | 81 + .../controller/SparkApplicationContext.java | 91 + .../operator/decorators/DriverDecorator.java | 52 + .../decorators/DriverResourceDecorator.java | 65 + .../operator/health/SentinelManager.java | 209 + .../listeners/ApplicationStatusListener.java | 29 + .../listeners/BaseStatusListener.java | 30 + .../operator/metrics/JVMMetricSet.java | 66 + .../operator/metrics/MetricsService.java | 60 + .../operator/metrics/MetricsSystem.java | 135 + .../metrics/MetricsSystemFactory.java | 98 + .../metrics/sink/PrometheusPullModelSink.java | 77 + .../operator/metrics/source/JVMSource.java | 38 + .../source/KubernetesMetricsInterceptor.java | 175 + .../metrics/source/OperatorJosdkMetrics.java | 276 + .../operator/probe/HealthProbe.java | 113 + .../operator/probe/ProbeService.java | 59 + .../operator/probe/ReadinessProbe.java | 56 + .../reconciler/ReconcileProgress.java | 66 + .../SparkApplicationReconcileUtils.java | 182 + .../SparkApplicationReconciler.java | 225 + .../reconciler/SparkReconcilerUtils.java | 252 + .../observers/AppDriverReadyObserver.java | 49 + .../observers/AppDriverRunningObserver.java | 38 + .../observers/AppDriverStartObserver.java | 46 + .../observers/AppDriverTimeoutObserver.java | 90 + .../observers/BaseAppDriverObserver.java | 145 + .../BaseSecondaryResourceObserver.java | 43 + .../reconcilesteps/AppCleanUpStep.java | 148 + .../reconcilesteps/AppInitStep.java | 137 + .../reconcilesteps/AppReconcileStep.java | 73 + .../AppResourceObserveStep.java | 42 + .../reconcilesteps/AppRunningStep.java | 95 + .../reconcilesteps/AppTerminatedStep.java | 41 + .../reconcilesteps/AppValidateStep.java | 55 + .../reconcilesteps/UnknownStateStep.java | 47 + .../utils/ApplicationStatusUtils.java | 72 + .../operator/utils/LoggingUtils.java | 75 + .../kubernetes/operator/utils/PodPhase.java | 49 + .../kubernetes/operator/utils/PodUtils.java | 81 + .../kubernetes/operator/utils/ProbeUtil.java | 54 + .../operator/utils/SparkExceptionUtils.java | 38 + .../operator/utils/StatusRecorder.java | 200 + .../src/main/resources/EcsLayout.json | 49 + .../src/main/resources/log4j2.properties | 52 + .../src/main/resources/spark-pi.yaml | 35 + .../src/main/resources/streaming.yaml | 91 + .../operator/config/ConfigOptionTest.java | 181 + .../config/SparkOperatorConfManagerTest.java | 72 + .../operator/health/SentinelManagerTest.java | 203 + .../metrics/MetricsSystemFactoryTest.java | 52 + .../operator/metrics/MetricsSystemTest.java | 72 + .../operator/metrics/sink/MockSink.java | 68 + .../KubernetesMetricsInterceptorTest.java | 137 + .../source/OperatorJosdkMetricsTest.java | 200 + .../operator/probe/HealthProbeTest.java | 197 + .../operator/probe/ProbeServiceTest.java | 124 + .../operator/probe/ReadinessProbeTest.java | 68 + .../SparkApplicationReconcileUtilsTest.java | 93 + .../SparkApplicationReconcilerTest.java | 97 + .../kubernetes/operator/utils/TestUtils.java | 61 + .../test/resources/spark-operator.properties | 18 + spark-submission-worker/.gitignore | 42 + spark-submission-worker/build.gradle | 44 + .../operator/ApplicationClientWorker.java | 107 + .../operator/ApplicationDriverConf.java | 65 + .../operator/ApplicationResourceSpec.java | 109 + .../src/main/resources/EcsLayout.json | 49 + .../src/main/resources/log4j2.properties | 52 + .../operator/ApplicationClientWorkerTest.java | 201 + .../operator/ApplicationResourceSpecTest.java | 108 + 168 files changed, 20343 insertions(+), 9 deletions(-) create mode 100644 .editorconfig create mode 100644 .gitattributes create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 build-tools/helm/spark-kubernetes-operator/.helmignore create mode 100644 build-tools/helm/spark-kubernetes-operator/Chart.yaml create mode 100644 build-tools/helm/spark-kubernetes-operator/conf/log4j2.properties create mode 100644 build-tools/helm/spark-kubernetes-operator/conf/spark-operator.properties create mode 100644 build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml create mode 100644 build-tools/helm/spark-kubernetes-operator/templates/_helpers.tpl create mode 100644 build-tools/helm/spark-kubernetes-operator/templates/rbac.yaml create mode 100644 build-tools/helm/spark-kubernetes-operator/templates/spark-operator.yaml create mode 100644 build-tools/helm/spark-kubernetes-operator/templates/sparkapps-resource.yaml create mode 100644 build-tools/helm/spark-kubernetes-operator/values.yaml create mode 100644 build.gradle create mode 100644 config/checkstyle/checkstyle-suppressions.xml create mode 100644 config/checkstyle/checkstyle.xml create mode 100644 config/pmd/ruleset.xml create mode 100644 config/spotbugs/exclude.xml create mode 100755 docker-entrypoint.sh create mode 100644 e2e-tests/spark-apps/spark_3_4_1/pyspark-example.yaml create mode 100644 e2e-tests/spark-apps/spark_3_4_1/spark-pi_scala_2.12.yaml create mode 100644 e2e-tests/spark-apps/spark_3_4_1/sparkr-example.yaml create mode 100644 e2e-tests/spark-apps/spark_3_5_0/pyspark-example.yaml create mode 100644 e2e-tests/spark-apps/spark_3_5_0/spark-pi_scala_2.12.yaml create mode 100644 e2e-tests/spark-apps/spark_3_5_0/sparkr-example.yaml create mode 100644 e2e-tests/spark-apps/spark_3_5_1/pyspark-example.yaml create mode 100644 e2e-tests/spark-apps/spark_3_5_1/spark-pi_scala_2.12.yaml create mode 100644 e2e-tests/spark-apps/spark_3_5_1/sparkr-example.yaml create mode 100644 gradle.properties create mode 100644 gradle/wrapper/gradle-wrapper.jar create mode 100644 gradle/wrapper/gradle-wrapper.properties create mode 100755 gradlew create mode 100644 gradlew.bat create mode 100644 settings.gradle create mode 100644 spark-operator-api/build.gradle create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/BaseResource.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/Constants.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplication.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplicationList.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/decorators/ResourceDecorator.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/diff/Diffable.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTimeoutConfig.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseApplicationTemplateSpec.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseSpec.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/InstanceConfig.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartConfig.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RuntimeVersions.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ScalaVersion.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationAttemptSummary.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationState.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStateSummary.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStatus.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/AttemptInfo.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseAttemptSummary.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseState.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStateSummary.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStatus.java create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/utils/ModelUtils.java create mode 100755 spark-operator-api/src/main/resources/printer-columns.sh create mode 100644 spark-operator-api/src/test/java/org/apache/spark/kubernetes/operator/status/ApplicationStatusTest.java create mode 100644 spark-operator-docs/.gitignore create mode 100644 spark-operator-docs/architecture.md create mode 100644 spark-operator-docs/configuration.md create mode 100644 spark-operator-docs/developer_guide.md create mode 100644 spark-operator-docs/getting_started.md create mode 100644 spark-operator-docs/metrics_logging.md create mode 100644 spark-operator-docs/operations.md create mode 100644 spark-operator-docs/operator_probes.md create mode 100644 spark-operator-docs/resources/prometheus.png create mode 100644 spark-operator-docs/resources/state.png create mode 100644 spark-operator-docs/spark_application.md create mode 100644 spark-operator-tests/.gitignore create mode 100644 spark-operator-tests/build.gradle create mode 100644 spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java create mode 100644 spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/CancelAppTest.java create mode 100644 spark-operator-tests/src/test/resources/EcsLayout.json create mode 100644 spark-operator-tests/src/test/resources/log4j2.properties create mode 100644 spark-operator/.gitignore create mode 100644 spark-operator/build.gradle create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/KubernetesClientFactory.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/RetryInterceptor.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/ConfigOption.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManager.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfigMapReconciler.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverDecorator.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/health/SentinelManager.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/BaseStatusListener.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/JVMMetricSet.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsService.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystem.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactory.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/sink/PrometheusPullModelSink.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/JVMSource.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptor.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetrics.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbe.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkReconcilerUtils.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverReadyObserver.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverRunningObserver.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverStartObserver.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseAppDriverObserver.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseSecondaryResourceObserver.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodPhase.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodUtils.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ProbeUtil.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkExceptionUtils.java create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java create mode 100644 spark-operator/src/main/resources/EcsLayout.json create mode 100644 spark-operator/src/main/resources/log4j2.properties create mode 100644 spark-operator/src/main/resources/spark-pi.yaml create mode 100644 spark-operator/src/main/resources/streaming.yaml create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/ConfigOptionTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManagerTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactoryTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/sink/MockSink.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptorTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/HealthProbeTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbeTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java create mode 100644 spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java create mode 100644 spark-operator/src/test/resources/spark-operator.properties create mode 100644 spark-submission-worker/.gitignore create mode 100644 spark-submission-worker/build.gradle create mode 100644 spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java create mode 100644 spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java create mode 100644 spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java create mode 100644 spark-submission-worker/src/main/resources/EcsLayout.json create mode 100644 spark-submission-worker/src/main/resources/log4j2.properties create mode 100644 spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java create mode 100644 spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..1f808de5 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,2 @@ +[*] +insert_final_newline = true diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..afd59d8f --- /dev/null +++ b/.gitattributes @@ -0,0 +1,8 @@ +# +# https://help.github.com/articles/dealing-with-line-endings/ +# +# Linux start script should use lf +/gradlew text eol=lf + +# These are Windows script files and should use crlf +*.bat text eol=crlf diff --git a/.github/.licenserc.yaml b/.github/.licenserc.yaml index e9d12451..8ce201ca 100644 --- a/.github/.licenserc.yaml +++ b/.github/.licenserc.yaml @@ -9,10 +9,16 @@ header: paths-ignore: - '**/*.md' - '**/.gitignore' + - '**/.gitattributes' - '.github/**' - 'dev/**' - 'LICENSE' - 'NOTICE' - '.asf.yaml' + - '**/.helmignore' + - '**/EcsLayout.json' + - '.editorconfig' + - 'gradle/**' + - '**/sparkapplications.org.apache.spark-v1.yml' comment: on-failure diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 6a5a1475..7de73818 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -26,4 +26,20 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: config: .github/.licenserc.yaml - + test_ci: + name: "Test CI" + runs-on: ubuntu-latest + strategy: + matrix: + java-version: [ 11, 17 ] + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Set up JDK ${{ matrix.java-version }} + uses: actions/setup-java@v2 + with: + java-version: ${{ matrix.java-version }} + distribution: 'adopt' + - name: Build with Gradle + run: | + set -o pipefail; ./gradlew clean build; set +o pipefail diff --git a/.gitignore b/.gitignore index 78213f8a..6334619e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,61 @@ -*.swp +# Compiled source # +################### +*.class +*.dll +*.exe +*.o +*.so +*.pyc + +# Packages # +############ +*.7z +*.dmg +*.gz +*.iso +*.rar +*.tar +*.zip + +# Logs and databases # +###################### +*.log + +# OS generated files # +###################### +.DS_Store* +ehthumbs.db +Icon? +Thumbs.db + +# Editor Files # +################ *~ -.java-version -.DS_Store -.idea/ -.vscode -/lib/ -target/ +*.swp + +# Gradle Files # +################ +.gradle +.m2 + +# IntelliJ specific files/directories +out +.idea +*.ipr +*.iws +*.iml + +# Eclipse specific files/directories +.classpath +.project +.settings +.metadata +**/.cache-main + +# build +**/generated +.out +build/ +**/build/ +lib +target diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..67934e61 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,47 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +FROM gradle:8.6-jdk11-alpine AS builder +ARG BASE_VERSION +WORKDIR /app +COPY . . +RUN ./gradlew clean build -x test + +FROM eclipse-temurin:11-jre-jammy +ARG BASE_VERSION + +ENV SPARK_OPERATOR_HOME=/opt/spark-operator +ENV SPARK_OPERATOR_WORK_DIR=/opt/spark-operator/operator +ENV BASE_VERSION=$BASE_VERSION +ENV OPERATOR_JAR=spark-kubernetes-operator-$BASE_VERSION-all.jar + +WORKDIR $SPARK_OPERATOR_WORK_DIR + +RUN groupadd --system --gid=9999 spark && \ + useradd --system --home-dir $SPARK_OPERATOR_HOME --uid=9999 --gid=spark spark + +COPY --from=builder /app/spark-operator/build/libs/$OPERATOR_JAR . +COPY docker-entrypoint.sh . + +RUN chown -R spark:spark $SPARK_OPERATOR_HOME && \ + chown spark:spark $OPERATOR_JAR && \ + chown spark:spark docker-entrypoint.sh + +USER spark +ENTRYPOINT ["/docker-entrypoint.sh"] +CMD ["help"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index 52a85bad..89422a94 100644 --- a/README.md +++ b/README.md @@ -1 +1,63 @@ -# spark-kubernetes-operator + + +# Spark-Kubernetes-Operator + +Welcome to the **Spark-Kubernetes-Operator**, a Kubernetes operator designed to simplify and +automate the management of Spark applications in Kubernetes environments. + +## Project Status + +As of Apr 1, 2024, Spark-Kubernetes-Operator is under Active Development. + +- We are actively working on new features and improvements. We welcome contributions and + feedback to make the operator even better. Check out the **Issues** section to see what's + currently in progress or suggest new features. +- Current API Version: `v1alpha1` + +## Key Features + +- Deploy and monitor SparkApplications throughout its lifecycle +- Start / stop Spark Apps with simple yaml schema +- Spark version agnostic +- Full logging and metrics integration +- Flexible deployments and native integration with Kubernetes tooling + +Please refer the [design](spark-operator-docs/architecture.md) section for architecture and +design. + +## Quickstart + +[Getting started doc](./spark-operator-docs/getting_started.md) gives an example to install +operator and run Spark Applications locally. + +In addition, [SparkApplication](./spark-operator-docs/spark_application.md) section +describes how to write your own apps, [Operations](./spark-operator-docs/operations.md) section +describes how to install operator with custom config overriding. + + + +## Contributing + +You can learn more about how to contribute in the [Apache Spark website](https://spark. +apache.org/contributing.html). + +## License + +The code in this repository is licensed under the [Apache Software License 2](./LICENSE). diff --git a/build-tools/helm/spark-kubernetes-operator/.helmignore b/build-tools/helm/spark-kubernetes-operator/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/build-tools/helm/spark-kubernetes-operator/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/build-tools/helm/spark-kubernetes-operator/Chart.yaml b/build-tools/helm/spark-kubernetes-operator/Chart.yaml new file mode 100644 index 00000000..e62a167a --- /dev/null +++ b/build-tools/helm/spark-kubernetes-operator/Chart.yaml @@ -0,0 +1,25 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +--- +apiVersion: v2 +name: spark-kubernetes-operator +description: A Helm chart for the Apache Spark Kubernetes Operator +type: application +version: 1.0.0-alpha +appVersion: 1.0.0-alpha diff --git a/build-tools/helm/spark-kubernetes-operator/conf/log4j2.properties b/build-tools/helm/spark-kubernetes-operator/conf/log4j2.properties new file mode 100644 index 00000000..f3ad671b --- /dev/null +++ b/build-tools/helm/spark-kubernetes-operator/conf/log4j2.properties @@ -0,0 +1,52 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +status=debug +strict=true +dest=out +name=PropertiesConfig +property.filename=/opt/spark-operator/logs/spark-operator +filter.threshold.type=ThresholdFilter +filter.threshold.level=debug +# console +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%d %p %X %C{1.} [%t] %m%n +appender.console.filter.threshold.type=ThresholdFilter +appender.console.filter.threshold.level=info +# rolling JSON +appender.rolling.type=RollingFile +appender.rolling.name=RollingFile +appender.rolling.append=true +appender.rolling.fileName=${filename}.log +appender.rolling.filePattern=${filename}-%i.log.gz +appender.rolling.layout.type=JsonTemplateLayout +appender.rolling.layout.eventTemplateUri=classpath:EcsLayout.json +appender.rolling.policies.type=Policies +appender.rolling.policies.size.type=SizeBasedTriggeringPolicy +appender.rolling.policies.size.size=100MB +appender.rolling.strategy.type=DefaultRolloverStrategy +appender.rolling.strategy.max=20 +appender.rolling.immediateFlush=true +# chatty loggers +rootLogger.level=all +logger.netty.name=io.netty +logger.netty.level=warn +log4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector +rootLogger.appenderRef.stdout.ref=STDOUT +rootLogger.appenderRef.rolling.ref=RollingFile diff --git a/build-tools/helm/spark-kubernetes-operator/conf/spark-operator.properties b/build-tools/helm/spark-kubernetes-operator/conf/spark-operator.properties new file mode 100644 index 00000000..c852810b --- /dev/null +++ b/build-tools/helm/spark-kubernetes-operator/conf/spark-operator.properties @@ -0,0 +1,22 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +# Property Overrides. e.g. +# kubernetes.operator.reconcile.interval=15s +# Enable this for hot property loading +# spark.operator.dynamic.config.enabled=false diff --git a/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml b/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml new file mode 100644 index 00000000..ec02b5c3 --- /dev/null +++ b/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml @@ -0,0 +1,6947 @@ +# Generated by Fabric8 CRDGenerator, manual edits might get overwritten! +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sparkapplications.org.apache.spark +spec: + group: org.apache.spark + names: + kind: SparkApplication + plural: sparkapplications + shortNames: + - sparkapp + singular: sparkapplication + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + spec: + properties: + mainClass: + type: string + runtimeVersions: + properties: + sparkVersion: + enum: + - v3_5_1 + - v3_5_0 + - v3_4_2 + - v3_4_1 + - v3_4_0 + - v3_3_3 + - v3_3_2 + - v3_3_1 + - v3_3_0 + - v3_2_0 + type: string + scalaVersion: + enum: + - v2_12 + - v2_13 + type: string + jdkVersion: + enum: + - Java11 + - Java17 + type: string + required: + - sparkVersion + type: object + jars: + type: string + pyFiles: + type: string + sparkRFiles: + type: string + files: + type: string + deploymentMode: + enum: + - CLUSTER_MODE + - CLIENT_MODE + type: string + proxyUser: + type: string + driverArgs: + items: + type: string + type: array + applicationTolerations: + properties: + restartConfig: + properties: + restartPolicy: + enum: + - ALWAYS + - NEVER + - ON_FAILURE + - ON_INFRASTRUCTURE_FAILURE + type: string + maxRestartAttempts: + type: integer + restartBackoffMillis: + type: integer + type: object + applicationTimeoutConfig: + properties: + driverStartTimeoutMillis: + type: integer + sparkSessionStartTimeoutMillis: + type: integer + executorStartTimeoutMillis: + type: integer + forceTerminationGracePeriodMillis: + type: integer + terminationRequeuePeriodMillis: + type: integer + type: object + instanceConfig: + properties: + initExecutors: + type: integer + minExecutors: + type: integer + maxExecutors: + type: integer + type: object + deleteOnTermination: + type: boolean + type: object + driverSpec: + properties: + podTemplateSpec: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + creationTimestamp: + type: string + deletionGracePeriodSeconds: + type: integer + deletionTimestamp: + type: string + finalizers: + items: + type: string + type: array + generateName: + type: string + generation: + type: integer + labels: + additionalProperties: + type: string + type: object + managedFields: + items: + properties: + apiVersion: + type: string + fieldsType: + type: string + fieldsV1: + type: object + manager: + type: string + operation: + type: string + subresource: + type: string + time: + type: string + type: object + type: array + name: + type: string + namespace: + type: string + ownerReferences: + items: + properties: + apiVersion: + type: string + blockOwnerDeletion: + type: boolean + controller: + type: boolean + kind: + type: string + name: + type: string + uid: + type: string + type: object + type: array + resourceVersion: + type: string + selfLink: + type: string + uid: + type: string + type: object + spec: + properties: + activeDeadlineSeconds: + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + type: object + weight: + type: integer + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + type: object + weight: + type: integer + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + type: object + weight: + type: integer + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + type: string + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + type: object + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + type: integer + hostIP: + type: string + hostPort: + type: integer + name: + type: string + protocol: + type: string + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + type: object + type: array + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + type: object + type: array + workingDir: + type: string + type: object + type: array + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + type: string + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + type: object + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + type: integer + hostIP: + type: string + hostPort: + type: integer + name: + type: string + protocol: + type: string + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + type: object + type: array + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + type: object + type: array + workingDir: + type: string + type: object + type: array + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + ip: + type: string + type: object + type: array + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostUsers: + type: boolean + hostname: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + type: string + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + type: object + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + type: integer + hostIP: + type: string + hostPort: + type: integer + name: + type: string + protocol: + type: string + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + type: object + type: array + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + type: object + type: array + workingDir: + type: string + type: object + type: array + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + os: + properties: + name: + type: string + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + preemptionPolicy: + type: string + priority: + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + type: object + type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + type: object + type: array + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + schedulingGates: + items: + properties: + name: + type: string + type: object + type: array + securityContext: + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + type: object + supplementalGroups: + items: + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + setHostnameAsFQDN: + type: boolean + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + type: integer + readOnly: + type: boolean + volumeID: + type: string + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + type: object + configMap: + properties: + defaultMode: + type: integer + items: + items: + properties: + key: + type: string + mode: + type: integer + path: + type: string + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + type: object + downwardAPI: + properties: + defaultMode: + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + type: object + mode: + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + type: string + type: object + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + creationTimestamp: + type: string + deletionGracePeriodSeconds: + type: integer + deletionTimestamp: + type: string + finalizers: + items: + type: string + type: array + generateName: + type: string + generation: + type: integer + labels: + additionalProperties: + type: string + type: object + managedFields: + items: + properties: + apiVersion: + type: string + fieldsType: + type: string + fieldsV1: + type: object + manager: + type: string + operation: + type: string + subresource: + type: string + time: + type: string + type: object + type: array + name: + type: string + namespace: + type: string + ownerReferences: + items: + properties: + apiVersion: + type: string + blockOwnerDeletion: + type: boolean + controller: + type: boolean + kind: + type: string + name: + type: string + uid: + type: string + type: object + type: array + resourceVersion: + type: string + selfLink: + type: string + uid: + type: string + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + type: object + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + type: object + type: array + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + type: integer + pdName: + type: string + readOnly: + type: boolean + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + type: object + hostPath: + properties: + path: + type: string + type: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + type: object + projected: + properties: + defaultMode: + type: integer + sources: + items: + properties: + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + type: integer + path: + type: string + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + type: object + mode: + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + type: string + type: object + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + type: integer + path: + type: string + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + type: integer + path: + type: string + type: object + type: object + type: array + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + type: object + secret: + properties: + defaultMode: + type: integer + items: + items: + properties: + key: + type: string + mode: + type: integer + path: + type: string + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + type: object + type: object + type: array + type: object + type: object + type: object + executorSpec: + properties: + podTemplateSpec: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + creationTimestamp: + type: string + deletionGracePeriodSeconds: + type: integer + deletionTimestamp: + type: string + finalizers: + items: + type: string + type: array + generateName: + type: string + generation: + type: integer + labels: + additionalProperties: + type: string + type: object + managedFields: + items: + properties: + apiVersion: + type: string + fieldsType: + type: string + fieldsV1: + type: object + manager: + type: string + operation: + type: string + subresource: + type: string + time: + type: string + type: object + type: array + name: + type: string + namespace: + type: string + ownerReferences: + items: + properties: + apiVersion: + type: string + blockOwnerDeletion: + type: boolean + controller: + type: boolean + kind: + type: string + name: + type: string + uid: + type: string + type: object + type: array + resourceVersion: + type: string + selfLink: + type: string + uid: + type: string + type: object + spec: + properties: + activeDeadlineSeconds: + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + type: object + weight: + type: integer + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + type: object + weight: + type: integer + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + type: object + weight: + type: integer + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + type: string + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + type: object + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + type: integer + hostIP: + type: string + hostPort: + type: integer + name: + type: string + protocol: + type: string + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + type: object + type: array + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + type: object + type: array + workingDir: + type: string + type: object + type: array + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + type: string + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + type: object + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + type: integer + hostIP: + type: string + hostPort: + type: integer + name: + type: string + protocol: + type: string + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + type: object + type: array + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + type: object + type: array + workingDir: + type: string + type: object + type: array + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + ip: + type: string + type: object + type: array + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostUsers: + type: boolean + hostname: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + type: string + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + type: object + type: object + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + type: integer + hostIP: + type: string + hostPort: + type: integer + name: + type: string + protocol: + type: string + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + type: object + type: array + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + type: integer + grpc: + properties: + port: + type: integer + service: + type: string + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + type: object + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + terminationGracePeriodSeconds: + type: integer + timeoutSeconds: + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + type: object + type: array + workingDir: + type: string + type: object + type: array + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + os: + properties: + name: + type: string + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + preemptionPolicy: + type: string + priority: + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + type: object + type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + type: object + type: array + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + schedulingGates: + items: + properties: + name: + type: string + type: object + type: array + securityContext: + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + type: object + supplementalGroups: + items: + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + setHostnameAsFQDN: + type: boolean + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + type: integer + readOnly: + type: boolean + volumeID: + type: string + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + type: object + configMap: + properties: + defaultMode: + type: integer + items: + items: + properties: + key: + type: string + mode: + type: integer + path: + type: string + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + type: object + downwardAPI: + properties: + defaultMode: + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + type: object + mode: + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + type: string + type: object + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + creationTimestamp: + type: string + deletionGracePeriodSeconds: + type: integer + deletionTimestamp: + type: string + finalizers: + items: + type: string + type: array + generateName: + type: string + generation: + type: integer + labels: + additionalProperties: + type: string + type: object + managedFields: + items: + properties: + apiVersion: + type: string + fieldsType: + type: string + fieldsV1: + type: object + manager: + type: string + operation: + type: string + subresource: + type: string + time: + type: string + type: object + type: array + name: + type: string + namespace: + type: string + ownerReferences: + items: + properties: + apiVersion: + type: string + blockOwnerDeletion: + type: boolean + controller: + type: boolean + kind: + type: string + name: + type: string + uid: + type: string + type: object + type: array + resourceVersion: + type: string + selfLink: + type: string + uid: + type: string + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + type: object + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + type: object + type: array + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + type: integer + pdName: + type: string + readOnly: + type: boolean + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + type: object + hostPath: + properties: + path: + type: string + type: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + type: object + projected: + properties: + defaultMode: + type: integer + sources: + items: + properties: + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + type: integer + path: + type: string + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + type: object + mode: + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + type: string + type: object + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + type: integer + path: + type: string + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + type: integer + path: + type: string + type: object + type: object + type: array + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + type: object + secret: + properties: + defaultMode: + type: integer + items: + items: + properties: + key: + type: string + mode: + type: integer + path: + type: string + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + type: object + type: object + type: array + type: object + type: object + type: object + sparkConf: + additionalProperties: + type: string + type: object + required: + - runtimeVersions + type: object + status: + properties: + currentState: + properties: + lastObservedDriverStatus: + properties: + conditions: + items: + properties: + lastProbeTime: + type: string + lastTransitionTime: + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + containerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + ephemeralContainerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + hostIP: + type: string + initContainerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + message: + type: string + nominatedNodeName: + type: string + phase: + type: string + podIP: + type: string + podIPs: + items: + properties: + ip: + type: string + type: object + type: array + qosClass: + type: string + reason: + type: string + startTime: + type: string + type: object + currentStateSummary: + enum: + - SUBMITTED + - SCHEDULED_TO_RESTART + - DRIVER_REQUESTED + - DRIVER_STARTED + - DRIVER_READY + - INITIALIZED_BELOW_THRESHOLD_EXECUTORS + - RUNNING_HEALTHY + - RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS + - DRIVER_LAUNCH_TIMED_OUT + - EXECUTORS_LAUNCH_TIMED_OUT + - SPARK_SESSION_INITIALIZATION_TIMED_OUT + - SUCCEEDED + - FAILED + - SCHEDULING_FAILURE + - DRIVER_EVICTED + - RESOURCE_RELEASED + - TERMINATED_WITHOUT_RELEASE_RESOURCES + type: string + lastTransitionTime: + type: string + message: + type: string + type: object + stateTransitionHistory: + additionalProperties: + properties: + lastObservedDriverStatus: + properties: + conditions: + items: + properties: + lastProbeTime: + type: string + lastTransitionTime: + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + containerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + ephemeralContainerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + hostIP: + type: string + initContainerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + message: + type: string + nominatedNodeName: + type: string + phase: + type: string + podIP: + type: string + podIPs: + items: + properties: + ip: + type: string + type: object + type: array + qosClass: + type: string + reason: + type: string + startTime: + type: string + type: object + currentStateSummary: + enum: + - SUBMITTED + - SCHEDULED_TO_RESTART + - DRIVER_REQUESTED + - DRIVER_STARTED + - DRIVER_READY + - INITIALIZED_BELOW_THRESHOLD_EXECUTORS + - RUNNING_HEALTHY + - RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS + - DRIVER_LAUNCH_TIMED_OUT + - EXECUTORS_LAUNCH_TIMED_OUT + - SPARK_SESSION_INITIALIZATION_TIMED_OUT + - SUCCEEDED + - FAILED + - SCHEDULING_FAILURE + - DRIVER_EVICTED + - RESOURCE_RELEASED + - TERMINATED_WITHOUT_RELEASE_RESOURCES + type: string + lastTransitionTime: + type: string + message: + type: string + type: object + type: object + previousAttemptSummary: + properties: + stateTransitionHistory: + additionalProperties: + properties: + lastObservedDriverStatus: + properties: + conditions: + items: + properties: + lastProbeTime: + type: string + lastTransitionTime: + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + containerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + ephemeralContainerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + hostIP: + type: string + initContainerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + message: + type: string + nominatedNodeName: + type: string + phase: + type: string + podIP: + type: string + podIPs: + items: + properties: + ip: + type: string + type: object + type: array + qosClass: + type: string + reason: + type: string + startTime: + type: string + type: object + currentStateSummary: + enum: + - SUBMITTED + - SCHEDULED_TO_RESTART + - DRIVER_REQUESTED + - DRIVER_STARTED + - DRIVER_READY + - INITIALIZED_BELOW_THRESHOLD_EXECUTORS + - RUNNING_HEALTHY + - RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS + - DRIVER_LAUNCH_TIMED_OUT + - EXECUTORS_LAUNCH_TIMED_OUT + - SPARK_SESSION_INITIALIZATION_TIMED_OUT + - SUCCEEDED + - FAILED + - SCHEDULING_FAILURE + - DRIVER_EVICTED + - RESOURCE_RELEASED + - TERMINATED_WITHOUT_RELEASE_RESOURCES + type: string + lastTransitionTime: + type: string + message: + type: string + type: object + type: object + attemptInfo: + properties: + id: + type: integer + type: object + type: object + currentAttemptSummary: + properties: + stateTransitionHistory: + additionalProperties: + properties: + lastObservedDriverStatus: + properties: + conditions: + items: + properties: + lastProbeTime: + type: string + lastTransitionTime: + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + containerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + ephemeralContainerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + hostIP: + type: string + initContainerStatuses: + items: + properties: + containerID: + type: string + image: + type: string + imageID: + type: string + lastState: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + name: + type: string + ready: + type: boolean + restartCount: + type: integer + started: + type: boolean + state: + properties: + running: + properties: + startedAt: + type: string + type: object + terminated: + properties: + containerID: + type: string + exitCode: + type: integer + finishedAt: + type: string + message: + type: string + reason: + type: string + signal: + type: integer + startedAt: + type: string + type: object + waiting: + properties: + message: + type: string + reason: + type: string + type: object + type: object + type: object + type: array + message: + type: string + nominatedNodeName: + type: string + phase: + type: string + podIP: + type: string + podIPs: + items: + properties: + ip: + type: string + type: object + type: array + qosClass: + type: string + reason: + type: string + startTime: + type: string + type: object + currentStateSummary: + enum: + - SUBMITTED + - SCHEDULED_TO_RESTART + - DRIVER_REQUESTED + - DRIVER_STARTED + - DRIVER_READY + - INITIALIZED_BELOW_THRESHOLD_EXECUTORS + - RUNNING_HEALTHY + - RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS + - DRIVER_LAUNCH_TIMED_OUT + - EXECUTORS_LAUNCH_TIMED_OUT + - SPARK_SESSION_INITIALIZATION_TIMED_OUT + - SUCCEEDED + - FAILED + - SCHEDULING_FAILURE + - DRIVER_EVICTED + - RESOURCE_RELEASED + - TERMINATED_WITHOUT_RELEASE_RESOURCES + type: string + lastTransitionTime: + type: string + message: + type: string + type: object + type: object + attemptInfo: + properties: + id: + type: integer + type: object + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.currentState.currentStateSummary + name: Current State + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date diff --git a/build-tools/helm/spark-kubernetes-operator/templates/_helpers.tpl b/build-tools/helm/spark-kubernetes-operator/templates/_helpers.tpl new file mode 100644 index 00000000..34ad34d8 --- /dev/null +++ b/build-tools/helm/spark-kubernetes-operator/templates/_helpers.tpl @@ -0,0 +1,155 @@ +{{/*Licensed to the Apache Software Foundation (ASF) under one*/}} +{{/*or more contributor license agreements. See the NOTICE file*/}} +{{/*distributed with this work for additional information*/}} +{{/*regarding copyright ownership. The ASF licenses this file*/}} +{{/*to you under the Apache License, Version 2.0 (the*/}} +{{/*"License"); you may not use this file except in compliance*/}} +{{/*with the License. You may obtain a copy of the License at*/}} + +{{/* http://www.apache.org/licenses/LICENSE-2.0*/}} + +{{/*Unless required by applicable law or agreed to in writing,*/}} +{{/*software distributed under the License is distributed on an*/}} +{{/*"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY*/}} +{{/*KIND, either express or implied. See the License for the*/}} +{{/*specific language governing permissions and limitations*/}} +{{/*under the License.*/}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "spark-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "spark-operator.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "spark-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "spark-operator.commonLabels" -}} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: {{ include "spark-operator.chart" . }} +{{- end }} + +{{/* +Dynamic config labels +*/}} +{{- define "spark-operator.dynamicConfigLabels" -}} +app.kubernetes.io/name: {{ include "spark-operator.name" . }} +app.kubernetes.io/component: "operator-dynamic-config-overrides" +{{ include "spark-operator.commonLabels" . }} +{{- end }} + +{{/* +Initial config labels +*/}} +{{- define "spark-operator.initialConfigLabels" -}} +app.kubernetes.io/name: {{ include "spark-operator.name" . }} +app.kubernetes.io/component: "operator-config" +{{ include "spark-operator.commonLabels" . }} +{{- end }} + +{{/* +Deployment selector labels +*/}} +{{- define "spark-operator.deploymentSelectorLabels" -}} +app.kubernetes.io/name: {{ include "spark-operator.name" . }} +app.kubernetes.io/component: "operator-deployment" +{{- end }} + +{{/* +Create the path of the operator image to use +*/}} +{{- define "spark-operator.imagePath" -}} +{{- if .Values.image.digest }} +{{- .Values.image.repository }}@{{ .Values.image.digest }} +{{- else }} +{{- .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }} +{{- end }} +{{- end }} + +{{/* +Create the name of the operator service account to use +*/}} +{{- define "spark-operator.serviceAccountName" -}} +{{- if .Values.operatorRbac.serviceAccount.create }} +{{- default (include "spark-operator.fullname" .) .Values.operatorRbac.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.operatorRbac.serviceAccount.name }} +{{- end }} +{{- end }} + + +{{/* +Default property overrides +*/}} +{{- define "spark-operator.defaultPropertyOverrides" -}} +# Runtime resolved properties +spark.operator.namespace={{ .Release.Namespace }} +spark.operator.name={{- include "spark-operator.name" . }} +spark.operator.dynamic.config.enabled={{ .Values.operatorConfiguration.dynamicConfig.create }} +{{- if .Values.appResources.namespaces.watchGivenNamespacesOnly }} +spark.operator.watched.namespaces={{- join "," .Values.appResources.namespaces.data }} +{{- end }} +{{- end }} + +{{/* +Readiness Probe properties overrides +*/}} +{{- define "spark-operator.readinessProbe.failureThreshold" -}} +{{- default 30 .Values.operatorDeployment.operatorPod.operatorContainer.probes.startupProbe.failureThreshold }} +{{- end }} +{{- define "spark-operator.readinessProbe.periodSeconds" -}} +{{- default 10 .Values.operatorDeployment.operatorPod.operatorContainer.probes.startupProbe.periodSeconds }} +{{- end }} + +{{/* +Liveness Probe properties override +*/}} +{{- define "spark-operator.livenessProbe.initialDelaySeconds" -}} +{{- default 30 .Values.operatorDeployment.operatorPod.operatorContainer.probes.livenessProbe.initialDelaySeconds }} +{{- end }} +{{- define "spark-operator.livenessProbe.periodSeconds" -}} +{{- default 10 .Values.operatorDeployment.operatorPod.operatorContainer.probes.livenessProbe.periodSeconds }} +{{- end }} + +{{/* +Readiness Probe property overrides +*/}} +{{- define "spark-operator.probePort" -}} +{{- default 18080 .Values.operatorDeployment.operatorPod.operatorContainer.probes.port }} +{{- end }} + +{{/* +Metrics port overrides +*/}} +{{- define "spark-operator.metricsPort" -}} +{{- default 19090 .Values.operatorDeployment.operatorPod.operatorContainer.metrics.port }} +{{- end }} diff --git a/build-tools/helm/spark-kubernetes-operator/templates/rbac.yaml b/build-tools/helm/spark-kubernetes-operator/templates/rbac.yaml new file mode 100644 index 00000000..0f8b8f09 --- /dev/null +++ b/build-tools/helm/spark-kubernetes-operator/templates/rbac.yaml @@ -0,0 +1,124 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +{{/* +RBAC rules used to create the operator (cluster)role +*/}} +{{- define "spark-operator.operatorRbacRules" }} +rules: + - apiGroups: + - "" + resources: + - pods + - services + - configmaps + - persistentvolumeclaims + verbs: + - '*' + - apiGroups: + - "org.apache.spark" + resources: + - '*' + verbs: + - '*' +{{- end }} + +{{/* +Labels and annotations to be applied on rbacResources +*/}} +{{- define "spark-operator.rbacLabelsAnnotations" }} + labels: + {{- with .Values.operatorRbac.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{ include "spark-operator.commonLabels" . | nindent 4 }} + {{- with .Values.operatorRbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} + +--- +#Service account and rolebindings for operator +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "spark-operator.serviceAccountName" $ }} + namespace: {{ .Release.Namespace }} +{{- template "spark-operator.rbacLabelsAnnotations" $ }} +--- +{{- if .Values.operatorRbac.clusterRoleBinding.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Values.operatorRbac.clusterRoleBinding.name }} +{{- template "spark-operator.rbacLabelsAnnotations" $ }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Values.operatorRbac.clusterRole.name }} +subjects: + - kind: ServiceAccount + name: {{ include "spark-operator.serviceAccountName" $ }} + namespace: {{ .Release.Namespace }} +{{- end }} +--- +{{- if .Values.operatorRbac.clusterRole.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Values.operatorRbac.clusterRole.name }} + namespace: {{ .Release.Namespace }} +{{- template "spark-operator.rbacLabelsAnnotations" $ }} +{{- template "spark-operator.operatorRbacRules" $ }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Values.operatorRbac.configManagement.roleName }} + namespace: {{ .Release.Namespace }} +{{- template "spark-operator.rbacLabelsAnnotations" $ }} +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - '*' + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Values.operatorRbac.configManagement.roleBindingName }} + namespace: {{ .Release.Namespace }} +{{- template "spark-operator.rbacLabelsAnnotations" $ }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Values.operatorRbac.configManagement.roleName }} +subjects: + - kind: ServiceAccount + name: {{ include "spark-operator.serviceAccountName" $ }} + namespace: {{ .Release.Namespace }} diff --git a/build-tools/helm/spark-kubernetes-operator/templates/spark-operator.yaml b/build-tools/helm/spark-kubernetes-operator/templates/spark-operator.yaml new file mode 100644 index 00000000..b8de2100 --- /dev/null +++ b/build-tools/helm/spark-kubernetes-operator/templates/spark-operator.yaml @@ -0,0 +1,211 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "spark-operator.name" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "spark-operator.deploymentSelectorLabels" . | nindent 4 }} + {{- include "spark-operator.commonLabels" . | nindent 4 }} +spec: + replicas: {{ .Values.operatorDeployment.replicas }} + revisionHistoryLimit: 2 + strategy: + {{- toYaml .Values.operatorDeployment.strategy | nindent 4 }} + selector: + matchLabels: + {{- include "spark-operator.deploymentSelectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "spark-operator.deploymentSelectorLabels" . | nindent 8 }} + {{- if index (.Values.operatorDeployment.operatorPod) "labels" }} + {{- with .Values.operatorDeployment.operatorPod.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + annotations: + kubectl.kubernetes.io/default-container: {{ .Chart.Name }} + {{- if index (.Values.operatorDeployment.operatorPod) "annotations" }} + {{- with .Values.operatorDeployment.operatorPod.annotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- with .Values.operatorDeployment.operatorPod.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.operatorDeployment.operatorPod.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.operatorDeployment.operatorPod.nodeSelector }} + nodeSelector: {{ toYaml .Values.operatorDeployment.operatorPod.nodeSelector | nindent 8 }} + {{- end }} + {{- with .Values.operatorDeployment.operatorPod.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.operatorDeployment.operatorPod.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "spark-operator.serviceAccountName" $ }} + {{- if .Values.operatorDeployment.operatorPod.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.operatorDeployment.operatorPod.topologySpreadConstraints | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ include "spark-operator.imagePath" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: [ "./docker-entrypoint.sh", "operator" ] + ports: + - containerPort: {{ include "spark-operator.probePort" . }} + name: probe-port + - containerPort: {{ include "spark-operator.metricsPort" . }} + name: metrics-port + env: + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: {{ include "spark-operator.name" . }} + - name: LOG_CONFIG + value: -Dlog4j.configurationFile=/opt/spark-operator/conf/log4j2.properties + - name: OPERATOR_JAVA_OPTS + value: {{ .Values.operatorDeployment.operatorPod.operatorContainer.jvmArgs }} + {{- with .Values.operatorDeployment.operatorPod.operatorContainer.env }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.operatorDeployment.operatorPod.operatorContainer.envFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.operatorDeployment.operatorPod.operatorContainer.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + readinessProbe: + httpGet: + port: probe-port + path: /startup + failureThreshold: {{ include "spark-operator.readinessProbe.failureThreshold" . }} + periodSeconds: {{ include "spark-operator.readinessProbe.periodSeconds" . }} + livenessProbe: + httpGet: + port: probe-port + path: /healthz + initialDelaySeconds: {{ include "spark-operator.livenessProbe.initialDelaySeconds" . }} + periodSeconds: {{ include "spark-operator.livenessProbe.periodSeconds" . }} + {{- with .Values.operatorDeployment.operatorPod.operatorContainer.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: spark-operator-config-volume + mountPath: /opt/spark-operator/conf + - name: logs-volume + mountPath: /opt/spark-operator/logs + {{- with .Values.operatorDeployment.operatorPod.operatorContainer.volumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.operatorDeployment.operatorPod.additionalContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if index (.Values.operatorDeployment.operatorPod) "dnsPolicy" }} + dnsPolicy: {{ .Values.operatorDeployment.operatorPod.dnsPolicy | quote }} + {{- end }} + {{- if index (.Values.operatorDeployment.operatorPod) "dnsConfig" }} + dnsConfig: + {{- with .Values.operatorDeployment.operatorPod.dnsConfig }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + restartPolicy: Always + volumes: + - name: spark-operator-config-volume + configMap: + name: spark-kubernetes-operator-configuration + - name: logs-volume + emptyDir: { } + {{- with .Values.operatorDeployment.operatorPod.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: spark-kubernetes-operator-configuration + namespace: {{ .Release.Namespace }} + labels: + {{- include "spark-operator.initialConfigLabels" . | nindent 4 }} +data: + log4j2.properties: |+ +{{- if .Values.operatorConfiguration.append }} + {{- $.Files.Get "conf/log4j2.properties" | nindent 4 -}} +{{- end }} +{{- if index (.Values.operatorConfiguration) "log4j2.properties" }} + {{- index (.Values.operatorConfiguration) "log4j2.properties" | nindent 4 -}} +{{- end }} + spark-operator.properties: |+ + {{- include "spark-operator.defaultPropertyOverrides" . | nindent 4 }} +{{- if .Values.operatorConfiguration.append }} + {{- $.Files.Get "conf/spark-operator.properties" | nindent 4 -}} +{{- end }} +{{- if index (.Values.operatorConfiguration) "spark-operator.properties" }} + {{- index (.Values.operatorConfiguration) "spark-operator.properties" | nindent 4 -}} +{{- end }} + metrics.properties: |+ +{{- if index (.Values.operatorConfiguration) "metrics.properties" }} + {{- index (.Values.operatorConfiguration) "metrics.properties" | nindent 4 -}} +{{- end }} +--- +{{- if .Values.operatorConfiguration.dynamicConfig.create }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: spark-kubernetes-operator-dynamic-configuration + namespace: {{ .Release.Namespace }} + labels: + {{- include "spark-operator.dynamicConfigLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.operatorConfiguration.dynamicConfig.annotations | nindent 4 }} +{{- with .Values.operatorConfiguration.dynamicConfig.data }} +data: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} diff --git a/build-tools/helm/spark-kubernetes-operator/templates/sparkapps-resource.yaml b/build-tools/helm/spark-kubernetes-operator/templates/sparkapps-resource.yaml new file mode 100644 index 00000000..cd598cdd --- /dev/null +++ b/build-tools/helm/spark-kubernetes-operator/templates/sparkapps-resource.yaml @@ -0,0 +1,216 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +{{/* +RBAC rules used to create the app (cluster)role based on the scope +*/}} +{{- define "spark-operator.appRbacRules" }} +rules: + - apiGroups: + - "" + resources: + - pods + - services + - configmaps + - persistentvolumeclaims + verbs: + - '*' +{{- end }} + +{{/* +RoleRef for app service account rolebindings +*/}} +{{- define "spark-operator.appRoleRef" }} +roleRef: + apiGroup: rbac.authorization.k8s.io +{{- if .Values.appResources.clusterRole.create }} + kind: ClusterRole + name: {{ .Values.appResources.clusterRole.name }} +{{- else if .Values.appResources.roles.create }} + kind: Role + name: {{ .Values.appResources.roles.name }} +{{- else }} + kind: ClusterRole + name: {{ .Values.operatorRbac.clusterRole.name }} +{{- end }} +{{- end }} + +{{/* +Labels and annotations to be applied +*/}} +{{- define "spark-operator.appLabels" }} + {{- with .Values.appResources.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{ include "spark-operator.commonLabels" . | nindent 4 }} +{{- end }} + +{{- define "spark-operator.appAnnotations" }} + {{- with .Values.appResources.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} + +{{- define "spark-operator.appLabelsAnnotations" }} + labels: + {{ template "spark-operator.appLabels" $ }} + annotations: + {{ template "spark-operator.appAnnotations" $ }} +{{- end }} +--- +{{- $appResources := .Values.appResources -}} +{{- $systemNs := .Release.Namespace -}} +{{- $operatorRbac := .Values.operatorRbac -}} +{{- if index (.Values.appResources.namespaces) "data" }} +{{- range $appNs := .Values.appResources.namespaces.data }} +{{- if $appResources.namespaces.create }} +apiVersion: v1 +kind: Namespace +metadata: + name: {{ $appNs }} +{{- template "spark-operator.appLabelsAnnotations" $ }} +--- +{{- end }} +{{- if $appResources.serviceAccounts.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ $appResources.serviceAccounts.name }} + namespace: {{ $appNs }} +{{- template "spark-operator.appLabelsAnnotations" $ }} +--- +{{- end }} +{{- if $appResources.roles.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $appResources.roles.name }} + namespace: {{ $appNs }} +{{- template "spark-operator.appLabelsAnnotations" $ }} +{{- template "spark-operator.appRbacRules" $ }} +--- +{{- end }} +{{- if $appResources.roleBindings.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $appResources.roleBindings.name }} + namespace: {{ $appNs }} +{{- template "spark-operator.appLabelsAnnotations" $ }} +{{- template "spark-operator.appRoleRef" $ }} +subjects: + - kind: ServiceAccount + name: {{ $appResources.serviceAccounts.name }} + namespace: {{ $appNs }} +--- +{{- end }} +{{- if not $operatorRbac.clusterRoleBinding.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: spark-operator-rolebinding + namespace: {{ $appNs }} +{{- template "spark-operator.appLabelsAnnotations" $ }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $operatorRbac.clusterRole.name }} +subjects: + - kind: ServiceAccount + name: {{ $operatorRbac.serviceAccount.name }} + namespace: {{ $systemNs }} +--- +{{- end }} +{{- end }} +{{- else }} +{{- if $appResources.serviceAccounts.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ $appResources.serviceAccounts.name }} + namespace: {{ $systemNs }} +{{- template "spark-operator.appLabelsAnnotations" $ }} +--- +{{- end }} +{{- if $appResources.roles.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $appResources.roles.name }} + namespace: {{ $systemNs }} +{{- template "spark-operator.appLabelsAnnotations" $ }} +{{- template "spark-operator.appRbacRules" $ }} +--- +{{- end }} +{{- if $appResources.roleBindings.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $appResources.serviceAccounts.name }} + namespace: {{ $systemNs }} +{{- template "spark-operator.appLabelsAnnotations" $ }} +{{- template "spark-operator.appRoleRef" $ }} +subjects: + - kind: ServiceAccount + name: {{ $appResources.serviceAccounts.name }} + namespace: {{ $systemNs }} +--- +{{- end }} +{{- if not $operatorRbac.clusterRoleBinding.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: spark-operator-rolebinding + namespace: {{ $systemNs }} +{{- template "spark-operator.appLabelsAnnotations" $ }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $operatorRbac.clusterRole.name }} +subjects: + - kind: ServiceAccount + name: {{ $operatorRbac.serviceAccount.name }} + namespace: {{ $systemNs }} +--- +{{- end }} +{{- end }} + +{{- if $appResources.clusterRole.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ $appResources.clusterRole.name }} +{{- template "spark-operator.appLabelsAnnotations" $ }} +{{- template "spark-operator.appRbacRules" $ }} +--- +{{- end }} +{{- if $appResources.sparkApplicationSentinel.create }} +{{- range $sentinelNs := .Values.appResources.sparkApplicationSentinel.sentinelNamespaces.data }} +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: {{ $appResources.sparkApplicationSentinel.name }} + namespace: {{ $sentinelNs }} + labels: + "spark.operator/sentinel": "true" + {{- template "spark-operator.appLabels" $ }} + annotations: + {{- template "spark-operator.appAnnotations" $ }} +{{- end }} +--- +{{- end }} diff --git a/build-tools/helm/spark-kubernetes-operator/values.yaml b/build-tools/helm/spark-kubernetes-operator/values.yaml new file mode 100644 index 00000000..8a4af554 --- /dev/null +++ b/build-tools/helm/spark-kubernetes-operator/values.yaml @@ -0,0 +1,176 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +image: + repository: spark-kubernetes-operator + pullPolicy: IfNotPresent + # tag: latest + # If image digest is set then it takes precedence and the image tag will be ignored + # digest: "" + +imagePullSecrets: [ ] + +operatorDeployment: + # Replicas must be 1 + replicas: 1 + # Strategy type must be 'Recreate' unless leader election is configured + strategy: + type: Recreate + operatorPod: + priorityClassName: null + annotations: { } + labels: { } + affinity: { } + nodeSelector: { } + # Node tolerations for operator pod assignment + # https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [ ] + # Topology spread constrains + # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + topologySpreadConstraints: [ ] + operatorContainer: + jvmArgs: "-XX:+UseG1GC -Xms3G -Xmx3G -Dfile.encoding=UTF8" + env: + envFrom: + volumeMounts: { } + resources: + limits: + cpu: "1" + ephemeral-storage: 2Gi + memory: 2Gi + requests: + cpu: "1" + ephemeral-storage: 2Gi + memory: 2Gi + probes: + port: 18080 + livenessProbe: + periodSeconds: 10 + initialDelaySeconds: 30 + startupProbe: + failureThreshold: 30 + periodSeconds: 10 + metrics: + port: 19090 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 9999 + seccompProfile: + type: RuntimeDefault + additionalContainers: { } + # additionalContainers: + # - name: "" + # image: "" + volumes: { } + # volumes: + # - name: spark-artifacts + # hostPath: + # path: /tmp/spark/artifacts + # type: DirectoryOrCreate + securityContext: { } + dnsPolicy: + dnsConfig: + +operatorRbac: + serviceAccount: + create: true + name: "spark-operator" + # If disabled, a Role would be created inside each app namespace for app operations + clusterRole: + create: true + name: "spark-operator-clusterrole" + # If disabled, a RoleBinding would be created inside each app namespace for app operations + clusterRoleBinding: + create: true + name: "spark-operator-clusterrolebinding" + configManagement: + roleName: "spark-operator-config-role" + roleBindingName: "spark-operator-config-role-binding" + +appResources: + # Create namespace(s), service account(s) and rolebinding(s) for SparkApps, if configured + # Operator would act at cluster level by default if no app namespace(s) are provided + namespaces: + create: true + # When enabled, operator would by default only watch namespace(s) provided in data field + watchGivenNamespacesOnly: false + data: + # - "spark-demo" + serviceAccounts: + create: true + name: "spark" + roleBindings: + create: true + name: "spark-app-rolebinding" + roles: + # if enabled, a role would be created in each app namespace for Spark apps + create: false + name: "spark-app-role" + clusterRole: + # if enabled, a clusterrole would be created for Spark app service accounts to use + # If neither role nor clusterrole is enabled: Spark app would use the same access as operator + create: false + name: "spark-app-cluster-role" + sparkApplicationSentinel: + create: false + name: "spark-app-sentinel" + sentinelNamespaces: + data: + # When enabled, sentinel resources will be deployed to namespace(s) provided in data field. + # Note that sentinelNamespaces list shall be a subset of appResources.namespaces.data. +# - "spark-demo" + # App resources are by default annotated to avoid app abort due to operator upgrade + annotations: + # "helm.sh/resource-policy": keep + # labels to be added on app resources + labels: + "app.kubernetes.io/component": "spark-apps" + +operatorConfiguration: + # If set to true, below conf file & properties would be appended to default conf. + # Otherwise, they would override default properties + append: true + log4j2.properties: |+ + # Logging Overrides + # rootLogger.level=DEBUG + spark-operator.properties: |+ + # Property Overrides. + # + # e.g. to watch namespace 'spark' and 'default' only, instead of + # the cluster, use + # spark.operator.watched.namespaces=spark,default + # When deployed via Helm, please note that the value of spark.operator.watched.namespaces + # should be a subset of .Values.appResources.namespaces.data so that the app namespaces + # properly configured by Helm before operator starts. + # + # Enable this for hot property loading + # spark.operator.dynamic.config.enabled=false + metrics.properties: |+ + # Metrics Properties Overrides + dynamicConfig: + # If set to true, a config map would be created & watched by operator as source of truth + # for hot properties loading. + create: false + annotations: + # "helm.sh/resource-policy": keep + data: + # Spark Operator Config Runtime Properties Overrides diff --git a/build.gradle b/build.gradle new file mode 100644 index 00000000..2a576b80 --- /dev/null +++ b/build.gradle @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +buildscript { + repositories { + maven { + url = uri("https://plugins.gradle.org/m2/") + } + } + dependencies { + classpath "com.github.spotbugs.snom:spotbugs-gradle-plugin:${spotBugsGradlePluginVersion}" + } +} + +assert JavaVersion.current().isJava11Compatible(): "Java 11 or newer is required" + +subprojects { + apply plugin: 'idea' + apply plugin: 'eclipse' + apply plugin: 'java' + sourceCompatibility = 11 + targetCompatibility = 11 + + repositories { + mavenCentral() + jcenter() + } + + configurations.all { + resolutionStrategy { + force "org.slf4j:slf4j-api:$slf4jVersion" + force "io.fabric8:kubernetes-model-core:$fabric8Version" + } + } + + apply plugin: 'checkstyle' + checkstyle { + toolVersion = checkstyleVersion + configFile = file("$rootDir/config/checkstyle/checkstyle.xml") + configFile = file("$rootDir/config/checkstyle/checkstyle.xml") + configProperties = [ + "org.checkstyle.google.suppressionfilter.config": rootProject.file("config/checkstyle/checkstyle-suppressions.xml")] + ignoreFailures = false + showViolations = true + } + + apply plugin: 'pmd' + pmd { + ruleSets = ["java-basic", "java-braces"] + ruleSetFiles = files("$rootDir/config/pmd/ruleset.xml") + toolVersion = pmdVersion + consoleOutput = true + ignoreFailures = false + } + + apply plugin: 'com.github.spotbugs' + spotbugs { + toolVersion = spotBugsVersion + afterEvaluate { + reportsDir = file("${project.reporting.baseDir}/findbugs") + } + excludeFilter = file("$rootDir/config/spotbugs/exclude.xml") + ignoreFailures = false + } + + apply plugin: 'jacoco' + jacoco { + toolVersion = jacocoVersion + } + jacocoTestReport { + dependsOn test + } +} diff --git a/config/checkstyle/checkstyle-suppressions.xml b/config/checkstyle/checkstyle-suppressions.xml new file mode 100644 index 00000000..86098403 --- /dev/null +++ b/config/checkstyle/checkstyle-suppressions.xml @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/config/checkstyle/checkstyle.xml b/config/checkstyle/checkstyle.xml new file mode 100644 index 00000000..aefd442b --- /dev/null +++ b/config/checkstyle/checkstyle.xml @@ -0,0 +1,195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/config/pmd/ruleset.xml b/config/pmd/ruleset.xml new file mode 100644 index 00000000..689ef744 --- /dev/null +++ b/config/pmd/ruleset.xml @@ -0,0 +1,33 @@ + + + + + + Spark Operator Ruleset + + + + + + + + + + .*/src/generated/.* + diff --git a/config/spotbugs/exclude.xml b/config/spotbugs/exclude.xml new file mode 100644 index 00000000..c7d2498e --- /dev/null +++ b/config/spotbugs/exclude.xml @@ -0,0 +1,25 @@ + + + + + + + + + diff --git a/dev/.rat-excludes b/dev/.rat-excludes index a24671bc..928907b0 100644 --- a/dev/.rat-excludes +++ b/dev/.rat-excludes @@ -12,3 +12,8 @@ LICENSE NOTICE TAGS RELEASE +build +.helmignore +.editorconfig +gradle +sparkapplications.org.apache.spark-v1.yml diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh new file mode 100755 index 00000000..62d2261d --- /dev/null +++ b/docker-entrypoint.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +args=("$@") + +if [ "$1" = "help" ]; then + printf "Usage: $(basename "$0") (operator)\n" + printf " Or $(basename "$0") help\n\n" + exit 0 +elif [ "$1" = "operator" ]; then + echo "Starting Operator" + + exec java -cp "./$OPERATOR_JAR" $LOG_CONFIG $OPERATOR_JAVA_OPTS org.apache.spark.kubernetes.operator.SparkOperator +fi + +args=("${args[@]}") + +# Running command in pass-through mode +exec "${args[@]}" diff --git a/e2e-tests/spark-apps/spark_3_4_1/pyspark-example.yaml b/e2e-tests/spark-apps/spark_3_4_1/pyspark-example.yaml new file mode 100644 index 00000000..98e8ad1c --- /dev/null +++ b/e2e-tests/spark-apps/spark_3_4_1/pyspark-example.yaml @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: py-spark-pi-341 +spec: + pyFiles: "local:///opt/spark/examples/src/main/python/pi.py" + sparkConf: + spark.executor.instances: "1" + # see also https://hub.docker.com/_/spark + spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + runtimeVersions: + sparkVersion: v3_4_1 diff --git a/e2e-tests/spark-apps/spark_3_4_1/spark-pi_scala_2.12.yaml b/e2e-tests/spark-apps/spark_3_4_1/spark-pi_scala_2.12.yaml new file mode 100644 index 00000000..7889902e --- /dev/null +++ b/e2e-tests/spark-apps/spark_3_4_1/spark-pi_scala_2.12.yaml @@ -0,0 +1,33 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: spark-pi-341-212 +spec: + mainClass: "org.apache.spark.examples.SparkPi" + jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.4.1.jar" + sparkConf: + spark.executor.instances: "1" + # see also https://hub.docker.com/_/spark + spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + runtimeVersions: + scalaVersion: v2_12 + sparkVersion: v3_4_1 diff --git a/e2e-tests/spark-apps/spark_3_4_1/sparkr-example.yaml b/e2e-tests/spark-apps/spark_3_4_1/sparkr-example.yaml new file mode 100644 index 00000000..f3e41189 --- /dev/null +++ b/e2e-tests/spark-apps/spark_3_4_1/sparkr-example.yaml @@ -0,0 +1,46 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: sparkr-example-341 +spec: + driverSpec: + podTemplateSpec: + metadata: + spec: + containers: + - name: driver + workingDir: /opt/spark + executorSpec: + podTemplateSpec: + metadata: + spec: + containers: + - name: executor + workingDir: /opt/spark + sparkRFiles: "local:///opt/spark/examples/src/main/r/dataframe.R" + sparkConf: + spark.executor.instances: "1" + # see also https://hub.docker.com/_/spark + spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + spark.home: "/opt/spark" + runtimeVersions: + sparkVersion: v3_4_1 diff --git a/e2e-tests/spark-apps/spark_3_5_0/pyspark-example.yaml b/e2e-tests/spark-apps/spark_3_5_0/pyspark-example.yaml new file mode 100644 index 00000000..4125af91 --- /dev/null +++ b/e2e-tests/spark-apps/spark_3_5_0/pyspark-example.yaml @@ -0,0 +1,32 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: py-spark-pi-350 +spec: + pyFiles: "local:///opt/spark/examples/src/main/python/pi.py" + sparkConf: + spark.executor.instances: "1" + # see also https://hub.docker.com/_/spark + # This is the same as spark:3.5.0-java17-python3 + spark.kubernetes.container.image: "spark:3.5.0-java17-python3" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + runtimeVersions: + sparkVersion: v3_5_0 diff --git a/e2e-tests/spark-apps/spark_3_5_0/spark-pi_scala_2.12.yaml b/e2e-tests/spark-apps/spark_3_5_0/spark-pi_scala_2.12.yaml new file mode 100644 index 00000000..59c04cae --- /dev/null +++ b/e2e-tests/spark-apps/spark_3_5_0/spark-pi_scala_2.12.yaml @@ -0,0 +1,34 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: spark-pi-350-212 +spec: + mainClass: "org.apache.spark.examples.SparkPi" + jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" + sparkConf: + spark.executor.instances: "1" + # see also https://hub.docker.com/_/spark + # This is the same as spark:3.5.0-java17-python3 + spark.kubernetes.container.image: "spark:3.5.0-java17-python3" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + runtimeVersions: + scalaVersion: v2_12 + sparkVersion: v3_5_0 diff --git a/e2e-tests/spark-apps/spark_3_5_0/sparkr-example.yaml b/e2e-tests/spark-apps/spark_3_5_0/sparkr-example.yaml new file mode 100644 index 00000000..6f1557dc --- /dev/null +++ b/e2e-tests/spark-apps/spark_3_5_0/sparkr-example.yaml @@ -0,0 +1,46 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: sparkr-example-350 +spec: + driverSpec: + podTemplateSpec: + metadata: + spec: + containers: + - name: driver + workingDir: /opt/spark + executorSpec: + podTemplateSpec: + metadata: + spec: + containers: + - name: executor + workingDir: /opt/spark + sparkRFiles: "local:///opt/spark/examples/src/main/r/dataframe.R" + sparkConf: + spark.executor.instances: "1" + # see also https://hub.docker.com/_/spark + spark.kubernetes.container.image: "spark:3.5.0-java17-r" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + spark.home: "/opt/spark" + runtimeVersions: + sparkVersion: v3_5_0 diff --git a/e2e-tests/spark-apps/spark_3_5_1/pyspark-example.yaml b/e2e-tests/spark-apps/spark_3_5_1/pyspark-example.yaml new file mode 100644 index 00000000..950a7018 --- /dev/null +++ b/e2e-tests/spark-apps/spark_3_5_1/pyspark-example.yaml @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: py-spark-pi-351 +spec: + pyFiles: "local:///opt/spark/examples/src/main/python/pi.py" + sparkConf: + spark.executor.instances: "2" + # see also https://hub.docker.com/_/spark + spark.kubernetes.container.image: "spark:3.5.1-scala2.12-java17-python3-ubuntu" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + runtimeVersions: + sparkVersion: v3_5_1 diff --git a/e2e-tests/spark-apps/spark_3_5_1/spark-pi_scala_2.12.yaml b/e2e-tests/spark-apps/spark_3_5_1/spark-pi_scala_2.12.yaml new file mode 100644 index 00000000..53a5a389 --- /dev/null +++ b/e2e-tests/spark-apps/spark_3_5_1/spark-pi_scala_2.12.yaml @@ -0,0 +1,33 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: spark-pi-351-212 +spec: + mainClass: "org.apache.spark.examples.SparkPi" + jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.1.jar" + sparkConf: + spark.executor.instances: "2" + # see also https://hub.docker.com/_/spark + spark.kubernetes.container.image: "spark:3.5.1-scala2.12-java17-python3-ubuntu" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + runtimeVersions: + scalaVersion: v2_12 + sparkVersion: v3_5_1 diff --git a/e2e-tests/spark-apps/spark_3_5_1/sparkr-example.yaml b/e2e-tests/spark-apps/spark_3_5_1/sparkr-example.yaml new file mode 100644 index 00000000..08414459 --- /dev/null +++ b/e2e-tests/spark-apps/spark_3_5_1/sparkr-example.yaml @@ -0,0 +1,46 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: sparkr-example-351 +spec: + driverSpec: + podTemplateSpec: + metadata: + spec: + containers: + - name: driver + workingDir: /opt/spark + executorSpec: + podTemplateSpec: + metadata: + spec: + containers: + - name: executor + workingDir: /opt/spark + sparkRFiles: "local:///opt/spark/examples/src/main/r/dataframe.R" + sparkConf: + spark.executor.instances: "1" + # see also https://hub.docker.com/_/spark + spark.kubernetes.container.image: "spark:3.5.1-java17-r" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + spark.home: "/opt/spark" + runtimeVersions: + sparkVersion: v3_5_1 diff --git a/gradle.properties b/gradle.properties new file mode 100644 index 00000000..7a1c41ff --- /dev/null +++ b/gradle.properties @@ -0,0 +1,45 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +group=org.apache.spark.kubernetes.operator +version=1.0.0-alpha +commonsLang3Version=3.12.0 +commonsIOVersion=2.11.0 +commonsConfigurationVersion=2.9.0 +dropwizardMetricsVersion=4.2.25 +# FIXME: known binary incompatible lib brought in by Spark +fabric8Version=6.7.2 +lombokVersion=1.18.30 +operatorSDKVersion=4.7.0 +okHttpVersion=4.11.0 +# Spark +sparkVersion=3.5.0 +sparkScalaVersion=2.12 +# Logging +slf4jVersion=1.7.36 +log4jVersion=2.17.1 +log4jLayoutVersion=2.17.1 +# Test +junitVersion=5.9.2 +mockitoVersion=5.10.0 +jacocoVersion=0.8.11 +# Build +checkstyleVersion=10.8.1 +pmdVersion=6.55.0 +spotBugsGradlePluginVersion=5.2.5 +spotBugsVersion=4.2.3 +shadowJarPluginVersion=8.1.1 diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..d64cd4917707c1f8861d8cb53dd15194d4248596 GIT binary patch literal 43462 zcma&NWl&^owk(X(xVyW%ySuwf;qI=D6|RlDJ2cR^yEKh!@I- zp9QeisK*rlxC>+~7Dk4IxIRsKBHqdR9b3+fyL=ynHmIDe&|>O*VlvO+%z5;9Z$|DJ zb4dO}-R=MKr^6EKJiOrJdLnCJn>np?~vU-1sSFgPu;pthGwf}bG z(1db%xwr#x)r+`4AGu$j7~u2MpVs3VpLp|mx&;>`0p0vH6kF+D2CY0fVdQOZ@h;A` z{infNyvmFUiu*XG}RNMNwXrbec_*a3N=2zJ|Wh5z* z5rAX$JJR{#zP>KY**>xHTuw?|-Rg|o24V)74HcfVT;WtQHXlE+_4iPE8QE#DUm%x0 zEKr75ur~W%w#-My3Tj`hH6EuEW+8K-^5P62$7Sc5OK+22qj&Pd1;)1#4tKihi=~8C zHiQSst0cpri6%OeaR`PY>HH_;CPaRNty%WTm4{wDK8V6gCZlG@U3$~JQZ;HPvDJcT1V{ z?>H@13MJcCNe#5z+MecYNi@VT5|&UiN1D4ATT+%M+h4c$t;C#UAs3O_q=GxK0}8%8 z8J(_M9bayxN}69ex4dzM_P3oh@ZGREjVvn%%r7=xjkqxJP4kj}5tlf;QosR=%4L5y zWhgejO=vao5oX%mOHbhJ8V+SG&K5dABn6!WiKl{|oPkq(9z8l&Mm%(=qGcFzI=eLu zWc_oCLyf;hVlB@dnwY98?75B20=n$>u3b|NB28H0u-6Rpl((%KWEBOfElVWJx+5yg z#SGqwza7f}$z;n~g%4HDU{;V{gXIhft*q2=4zSezGK~nBgu9-Q*rZ#2f=Q}i2|qOp z!!y4p)4o=LVUNhlkp#JL{tfkhXNbB=Ox>M=n6soptJw-IDI|_$is2w}(XY>a=H52d z3zE$tjPUhWWS+5h=KVH&uqQS=$v3nRs&p$%11b%5qtF}S2#Pc`IiyBIF4%A!;AVoI zXU8-Rpv!DQNcF~(qQnyyMy=-AN~U>#&X1j5BLDP{?K!%h!;hfJI>$mdLSvktEr*89 zdJHvby^$xEX0^l9g$xW-d?J;L0#(`UT~zpL&*cEh$L|HPAu=P8`OQZV!-}l`noSp_ zQ-1$q$R-gDL)?6YaM!=8H=QGW$NT2SeZlb8PKJdc=F-cT@j7Xags+Pr*jPtlHFnf- zh?q<6;)27IdPc^Wdy-mX%2s84C1xZq9Xms+==F4);O`VUASmu3(RlgE#0+#giLh-& zcxm3_e}n4{%|X zJp{G_j+%`j_q5}k{eW&TlP}J2wtZ2^<^E(O)4OQX8FDp6RJq!F{(6eHWSD3=f~(h} zJXCf7=r<16X{pHkm%yzYI_=VDP&9bmI1*)YXZeB}F? z(%QsB5fo*FUZxK$oX~X^69;x~j7ms8xlzpt-T15e9}$4T-pC z6PFg@;B-j|Ywajpe4~bk#S6(fO^|mm1hKOPfA%8-_iGCfICE|=P_~e;Wz6my&)h_~ zkv&_xSAw7AZ%ThYF(4jADW4vg=oEdJGVOs>FqamoL3Np8>?!W#!R-0%2Bg4h?kz5I zKV-rKN2n(vUL%D<4oj@|`eJ>0i#TmYBtYmfla;c!ATW%;xGQ0*TW@PTlGG><@dxUI zg>+3SiGdZ%?5N=8uoLA|$4isK$aJ%i{hECP$bK{J#0W2gQ3YEa zZQ50Stn6hqdfxJ*9#NuSLwKFCUGk@c=(igyVL;;2^wi4o30YXSIb2g_ud$ zgpCr@H0qWtk2hK8Q|&wx)}4+hTYlf;$a4#oUM=V@Cw#!$(nOFFpZ;0lc!qd=c$S}Z zGGI-0jg~S~cgVT=4Vo)b)|4phjStD49*EqC)IPwyeKBLcN;Wu@Aeph;emROAwJ-0< z_#>wVm$)ygH|qyxZaet&(Vf%pVdnvKWJn9`%DAxj3ot;v>S$I}jJ$FLBF*~iZ!ZXE zkvui&p}fI0Y=IDX)mm0@tAd|fEHl~J&K}ZX(Mm3cm1UAuwJ42+AO5@HwYfDH7ipIc zmI;1J;J@+aCNG1M`Btf>YT>~c&3j~Qi@Py5JT6;zjx$cvOQW@3oQ>|}GH?TW-E z1R;q^QFjm5W~7f}c3Ww|awg1BAJ^slEV~Pk`Kd`PS$7;SqJZNj->it4DW2l15}xP6 zoCl$kyEF%yJni0(L!Z&14m!1urXh6Btj_5JYt1{#+H8w?5QI%% zo-$KYWNMJVH?Hh@1n7OSu~QhSswL8x0=$<8QG_zepi_`y_79=nK=_ZP_`Em2UI*tyQoB+r{1QYZCpb?2OrgUw#oRH$?^Tj!Req>XiE#~B|~ z+%HB;=ic+R@px4Ld8mwpY;W^A%8%l8$@B@1m5n`TlKI6bz2mp*^^^1mK$COW$HOfp zUGTz-cN9?BGEp}5A!mDFjaiWa2_J2Iq8qj0mXzk; z66JBKRP{p%wN7XobR0YjhAuW9T1Gw3FDvR5dWJ8ElNYF94eF3ebu+QwKjtvVu4L zI9ip#mQ@4uqVdkl-TUQMb^XBJVLW(-$s;Nq;@5gr4`UfLgF$adIhd?rHOa%D);whv z=;krPp~@I+-Z|r#s3yCH+c1US?dnm+C*)r{m+86sTJusLdNu^sqLrfWed^ndHXH`m zd3#cOe3>w-ga(Dus_^ppG9AC>Iq{y%%CK+Cro_sqLCs{VLuK=dev>OL1dis4(PQ5R zcz)>DjEkfV+MO;~>VUlYF00SgfUo~@(&9$Iy2|G0T9BSP?&T22>K46D zL*~j#yJ?)^*%J3!16f)@Y2Z^kS*BzwfAQ7K96rFRIh>#$*$_Io;z>ux@}G98!fWR@ zGTFxv4r~v)Gsd|pF91*-eaZ3Qw1MH$K^7JhWIdX%o$2kCbvGDXy)a?@8T&1dY4`;L z4Kn+f%SSFWE_rpEpL9bnlmYq`D!6F%di<&Hh=+!VI~j)2mfil03T#jJ_s?}VV0_hp z7T9bWxc>Jm2Z0WMU?`Z$xE74Gu~%s{mW!d4uvKCx@WD+gPUQ zV0vQS(Ig++z=EHN)BR44*EDSWIyT~R4$FcF*VEY*8@l=218Q05D2$|fXKFhRgBIEE zdDFB}1dKkoO^7}{5crKX!p?dZWNz$m>1icsXG2N+((x0OIST9Zo^DW_tytvlwXGpn zs8?pJXjEG;T@qrZi%#h93?FP$!&P4JA(&H61tqQi=opRzNpm zkrG}$^t9&XduK*Qa1?355wd8G2CI6QEh@Ua>AsD;7oRUNLPb76m4HG3K?)wF~IyS3`fXuNM>${?wmB zpVz;?6_(Fiadfd{vUCBM*_kt$+F3J+IojI;9L(gc9n3{sEZyzR9o!_mOwFC#tQ{Q~ zP3-`#uK#tP3Q7~Q;4H|wjZHO8h7e4IuBxl&vz2w~D8)w=Wtg31zpZhz%+kzSzL*dV zwp@{WU4i;hJ7c2f1O;7Mz6qRKeASoIv0_bV=i@NMG*l<#+;INk-^`5w@}Dj~;k=|}qM1vq_P z|GpBGe_IKq|LNy9SJhKOQ$c=5L{Dv|Q_lZl=-ky*BFBJLW9&y_C|!vyM~rQx=!vun z?rZJQB5t}Dctmui5i31C_;_}CEn}_W%>oSXtt>@kE1=JW*4*v4tPp;O6 zmAk{)m!)}34pTWg8{i>($%NQ(Tl;QC@J@FfBoc%Gr&m560^kgSfodAFrIjF}aIw)X zoXZ`@IsMkc8_=w%-7`D6Y4e*CG8k%Ud=GXhsTR50jUnm+R*0A(O3UKFg0`K;qp1bl z7``HN=?39ic_kR|^R^~w-*pa?Vj#7|e9F1iRx{GN2?wK!xR1GW!qa=~pjJb-#u1K8 zeR?Y2i-pt}yJq;SCiVHODIvQJX|ZJaT8nO+(?HXbLefulKKgM^B(UIO1r+S=7;kLJ zcH}1J=Px2jsh3Tec&v8Jcbng8;V-`#*UHt?hB(pmOipKwf3Lz8rG$heEB30Sg*2rx zV<|KN86$soN(I!BwO`1n^^uF2*x&vJ$2d$>+`(romzHP|)K_KkO6Hc>_dwMW-M(#S zK(~SiXT1@fvc#U+?|?PniDRm01)f^#55;nhM|wi?oG>yBsa?~?^xTU|fX-R(sTA+5 zaq}-8Tx7zrOy#3*JLIIVsBmHYLdD}!0NP!+ITW+Thn0)8SS!$@)HXwB3tY!fMxc#1 zMp3H?q3eD?u&Njx4;KQ5G>32+GRp1Ee5qMO0lZjaRRu&{W<&~DoJNGkcYF<5(Ab+J zgO>VhBl{okDPn78<%&e2mR{jwVCz5Og;*Z;;3%VvoGo_;HaGLWYF7q#jDX=Z#Ml`H z858YVV$%J|e<1n`%6Vsvq7GmnAV0wW4$5qQ3uR@1i>tW{xrl|ExywIc?fNgYlA?C5 zh$ezAFb5{rQu6i7BSS5*J-|9DQ{6^BVQ{b*lq`xS@RyrsJN?-t=MTMPY;WYeKBCNg z^2|pN!Q^WPJuuO4!|P@jzt&tY1Y8d%FNK5xK(!@`jO2aEA*4 zkO6b|UVBipci?){-Ke=+1;mGlND8)6+P;8sq}UXw2hn;fc7nM>g}GSMWu&v&fqh

iViYT=fZ(|3Ox^$aWPp4a8h24tD<|8-!aK0lHgL$N7Efw}J zVIB!7=T$U`ao1?upi5V4Et*-lTG0XvExbf!ya{cua==$WJyVG(CmA6Of*8E@DSE%L z`V^$qz&RU$7G5mg;8;=#`@rRG`-uS18$0WPN@!v2d{H2sOqP|!(cQ@ zUHo!d>>yFArLPf1q`uBvY32miqShLT1B@gDL4XoVTK&@owOoD)OIHXrYK-a1d$B{v zF^}8D3Y^g%^cnvScOSJR5QNH+BI%d|;J;wWM3~l>${fb8DNPg)wrf|GBP8p%LNGN# z3EaIiItgwtGgT&iYCFy9-LG}bMI|4LdmmJt@V@% zb6B)1kc=T)(|L@0;wr<>=?r04N;E&ef+7C^`wPWtyQe(*pD1pI_&XHy|0gIGHMekd zF_*M4yi6J&Z4LQj65)S zXwdM{SwUo%3SbPwFsHgqF@V|6afT|R6?&S;lw=8% z3}@9B=#JI3@B*#4s!O))~z zc>2_4Q_#&+5V`GFd?88^;c1i7;Vv_I*qt!_Yx*n=;rj!82rrR2rQ8u5(Ejlo{15P% zs~!{%XJ>FmJ})H^I9bn^Re&38H{xA!0l3^89k(oU;bZWXM@kn$#aoS&Y4l^-WEn-fH39Jb9lA%s*WsKJQl?n9B7_~P z-XM&WL7Z!PcoF6_D>V@$CvUIEy=+Z&0kt{szMk=f1|M+r*a43^$$B^MidrT0J;RI` z(?f!O<8UZkm$_Ny$Hth1J#^4ni+im8M9mr&k|3cIgwvjAgjH z8`N&h25xV#v*d$qBX5jkI|xOhQn!>IYZK7l5#^P4M&twe9&Ey@@GxYMxBZq2e7?`q z$~Szs0!g{2fGcp9PZEt|rdQ6bhAgpcLHPz?f-vB?$dc*!9OL?Q8mn7->bFD2Si60* z!O%y)fCdMSV|lkF9w%x~J*A&srMyYY3{=&$}H zGQ4VG_?$2X(0|vT0{=;W$~icCI{b6W{B!Q8xdGhF|D{25G_5_+%s(46lhvNLkik~R z>nr(&C#5wwOzJZQo9m|U<;&Wk!_#q|V>fsmj1g<6%hB{jGoNUPjgJslld>xmODzGjYc?7JSuA?A_QzjDw5AsRgi@Y|Z0{F{!1=!NES-#*f^s4l0Hu zz468))2IY5dmD9pa*(yT5{EyP^G>@ZWumealS-*WeRcZ}B%gxq{MiJ|RyX-^C1V=0 z@iKdrGi1jTe8Ya^x7yyH$kBNvM4R~`fbPq$BzHum-3Zo8C6=KW@||>zsA8-Y9uV5V z#oq-f5L5}V<&wF4@X@<3^C%ptp6+Ce)~hGl`kwj)bsAjmo_GU^r940Z-|`<)oGnh7 zFF0Tde3>ui?8Yj{sF-Z@)yQd~CGZ*w-6p2U<8}JO-sRsVI5dBji`01W8A&3$?}lxBaC&vn0E$c5tW* zX>5(zzZ=qn&!J~KdsPl;P@bmA-Pr8T*)eh_+Dv5=Ma|XSle6t(k8qcgNyar{*ReQ8 zTXwi=8vr>!3Ywr+BhggHDw8ke==NTQVMCK`$69fhzEFB*4+H9LIvdt-#IbhZvpS}} zO3lz;P?zr0*0$%-Rq_y^k(?I{Mk}h@w}cZpMUp|ucs55bcloL2)($u%mXQw({Wzc~ z;6nu5MkjP)0C(@%6Q_I_vsWrfhl7Zpoxw#WoE~r&GOSCz;_ro6i(^hM>I$8y>`!wW z*U^@?B!MMmb89I}2(hcE4zN2G^kwyWCZp5JG>$Ez7zP~D=J^LMjSM)27_0B_X^C(M z`fFT+%DcKlu?^)FCK>QzSnV%IsXVcUFhFdBP!6~se&xxrIxsvySAWu++IrH;FbcY$ z2DWTvSBRfLwdhr0nMx+URA$j3i7_*6BWv#DXfym?ZRDcX9C?cY9sD3q)uBDR3uWg= z(lUIzB)G$Hr!){>E{s4Dew+tb9kvToZp-1&c?y2wn@Z~(VBhqz`cB;{E4(P3N2*nJ z_>~g@;UF2iG{Kt(<1PyePTKahF8<)pozZ*xH~U-kfoAayCwJViIrnqwqO}7{0pHw$ zs2Kx?s#vQr7XZ264>5RNKSL8|Ty^=PsIx^}QqOOcfpGUU4tRkUc|kc7-!Ae6!+B{o~7nFpm3|G5^=0#Bnm6`V}oSQlrX(u%OWnC zoLPy&Q;1Jui&7ST0~#+}I^&?vcE*t47~Xq#YwvA^6^} z`WkC)$AkNub|t@S!$8CBlwbV~?yp&@9h{D|3z-vJXgzRC5^nYm+PyPcgRzAnEi6Q^gslXYRv4nycsy-SJu?lMps-? zV`U*#WnFsdPLL)Q$AmD|0`UaC4ND07+&UmOu!eHruzV|OUox<+Jl|Mr@6~C`T@P%s zW7sgXLF2SSe9Fl^O(I*{9wsFSYb2l%-;&Pi^dpv!{)C3d0AlNY6!4fgmSgj_wQ*7Am7&$z;Jg&wgR-Ih;lUvWS|KTSg!&s_E9_bXBkZvGiC6bFKDWZxsD$*NZ#_8bl zG1P-#@?OQzED7@jlMJTH@V!6k;W>auvft)}g zhoV{7$q=*;=l{O>Q4a@ ziMjf_u*o^PsO)#BjC%0^h>Xp@;5$p{JSYDt)zbb}s{Kbt!T*I@Pk@X0zds6wsefuU zW$XY%yyRGC94=6mf?x+bbA5CDQ2AgW1T-jVAJbm7K(gp+;v6E0WI#kuACgV$r}6L? zd|Tj?^%^*N&b>Dd{Wr$FS2qI#Ucs1yd4N+RBUQiSZGujH`#I)mG&VKoDh=KKFl4=G z&MagXl6*<)$6P}*Tiebpz5L=oMaPrN+caUXRJ`D?=K9!e0f{@D&cZLKN?iNP@X0aF zE(^pl+;*T5qt?1jRC=5PMgV!XNITRLS_=9{CJExaQj;lt!&pdzpK?8p>%Mb+D z?yO*uSung=-`QQ@yX@Hyd4@CI^r{2oiu`%^bNkz+Nkk!IunjwNC|WcqvX~k=><-I3 zDQdbdb|!v+Iz01$w@aMl!R)koD77Xp;eZwzSl-AT zr@Vu{=xvgfq9akRrrM)}=!=xcs+U1JO}{t(avgz`6RqiiX<|hGG1pmop8k6Q+G_mv zJv|RfDheUp2L3=^C=4aCBMBn0aRCU(DQwX-W(RkRwmLeuJYF<0urcaf(=7)JPg<3P zQs!~G)9CT18o!J4{zX{_e}4eS)U-E)0FAt}wEI(c0%HkxgggW;(1E=>J17_hsH^sP z%lT0LGgbUXHx-K*CI-MCrP66UP0PvGqM$MkeLyqHdbgP|_Cm!7te~b8p+e6sQ_3k| zVcwTh6d83ltdnR>D^)BYQpDKlLk3g0Hdcgz2}%qUs9~~Rie)A-BV1mS&naYai#xcZ z(d{8=-LVpTp}2*y)|gR~;qc7fp26}lPcLZ#=JpYcn3AT9(UIdOyg+d(P5T7D&*P}# zQCYplZO5|7+r19%9e`v^vfSS1sbX1c%=w1;oyruXB%Kl$ACgKQ6=qNWLsc=28xJjg zwvsI5-%SGU|3p>&zXVl^vVtQT3o-#$UT9LI@Npz~6=4!>mc431VRNN8od&Ul^+G_kHC`G=6WVWM z%9eWNyy(FTO|A+@x}Ou3CH)oi;t#7rAxdIXfNFwOj_@Y&TGz6P_sqiB`Q6Lxy|Q{`|fgmRG(k+!#b*M+Z9zFce)f-7;?Km5O=LHV9f9_87; zF7%R2B+$?@sH&&-$@tzaPYkw0;=i|;vWdI|Wl3q_Zu>l;XdIw2FjV=;Mq5t1Q0|f< zs08j54Bp`3RzqE=2enlkZxmX6OF+@|2<)A^RNQpBd6o@OXl+i)zO%D4iGiQNuXd+zIR{_lb96{lc~bxsBveIw6umhShTX+3@ZJ=YHh@ zWY3(d0azg;7oHn>H<>?4@*RQbi>SmM=JrHvIG(~BrvI)#W(EAeO6fS+}mxxcc+X~W6&YVl86W9WFSS}Vz-f9vS?XUDBk)3TcF z8V?$4Q)`uKFq>xT=)Y9mMFVTUk*NIA!0$?RP6Ig0TBmUFrq*Q-Agq~DzxjStQyJ({ zBeZ;o5qUUKg=4Hypm|}>>L=XKsZ!F$yNTDO)jt4H0gdQ5$f|d&bnVCMMXhNh)~mN z@_UV6D7MVlsWz+zM+inZZp&P4fj=tm6fX)SG5H>OsQf_I8c~uGCig$GzuwViK54bcgL;VN|FnyQl>Ed7(@>=8$a_UKIz|V6CeVSd2(P z0Uu>A8A+muM%HLFJQ9UZ5c)BSAv_zH#1f02x?h9C}@pN@6{>UiAp>({Fn(T9Q8B z^`zB;kJ5b`>%dLm+Ol}ty!3;8f1XDSVX0AUe5P#@I+FQ-`$(a;zNgz)4x5hz$Hfbg z!Q(z26wHLXko(1`;(BAOg_wShpX0ixfWq3ponndY+u%1gyX)_h=v1zR#V}#q{au6; z!3K=7fQwnRfg6FXtNQmP>`<;!N137paFS%y?;lb1@BEdbvQHYC{976l`cLqn;b8lp zIDY>~m{gDj(wfnK!lpW6pli)HyLEiUrNc%eXTil|F2s(AY+LW5hkKb>TQ3|Q4S9rr zpDs4uK_co6XPsn_z$LeS{K4jFF`2>U`tbgKdyDne`xmR<@6AA+_hPNKCOR-Zqv;xk zu5!HsBUb^!4uJ7v0RuH-7?l?}b=w5lzzXJ~gZcxRKOovSk@|#V+MuX%Y+=;14i*%{)_gSW9(#4%)AV#3__kac1|qUy!uyP{>?U#5wYNq}y$S9pCc zFc~4mgSC*G~j0u#qqp9 z${>3HV~@->GqEhr_Xwoxq?Hjn#=s2;i~g^&Hn|aDKpA>Oc%HlW(KA1?BXqpxB;Ydx)w;2z^MpjJ(Qi(X!$5RC z*P{~%JGDQqojV>2JbEeCE*OEu!$XJ>bWA9Oa_Hd;y)F%MhBRi*LPcdqR8X`NQ&1L# z5#9L*@qxrx8n}LfeB^J{%-?SU{FCwiWyHp682F+|pa+CQa3ZLzBqN1{)h4d6+vBbV zC#NEbQLC;}me3eeYnOG*nXOJZEU$xLZ1<1Y=7r0(-U0P6-AqwMAM`a(Ed#7vJkn6plb4eI4?2y3yOTGmmDQ!z9`wzbf z_OY#0@5=bnep;MV0X_;;SJJWEf^E6Bd^tVJ9znWx&Ks8t*B>AM@?;D4oWUGc z!H*`6d7Cxo6VuyS4Eye&L1ZRhrRmN6Lr`{NL(wDbif|y&z)JN>Fl5#Wi&mMIr5i;x zBx}3YfF>>8EC(fYnmpu~)CYHuHCyr5*`ECap%t@y=jD>!_%3iiE|LN$mK9>- zHdtpy8fGZtkZF?%TW~29JIAfi2jZT8>OA7=h;8T{{k?c2`nCEx9$r zS+*&vt~2o^^J+}RDG@+9&M^K*z4p{5#IEVbz`1%`m5c2};aGt=V?~vIM}ZdPECDI)47|CWBCfDWUbxBCnmYivQ*0Nu_xb*C>~C9(VjHM zxe<*D<#dQ8TlpMX2c@M<9$w!RP$hpG4cs%AI){jp*Sj|*`m)5(Bw*A0$*i-(CA5#%>a)$+jI2C9r6|(>J8InryENI z$NohnxDUB;wAYDwrb*!N3noBTKPpPN}~09SEL18tkG zxgz(RYU_;DPT{l?Q$+eaZaxnsWCA^ds^0PVRkIM%bOd|G2IEBBiz{&^JtNsODs;5z zICt_Zj8wo^KT$7Bg4H+y!Df#3mbl%%?|EXe!&(Vmac1DJ*y~3+kRKAD=Ovde4^^%~ zw<9av18HLyrf*_>Slp;^i`Uy~`mvBjZ|?Ad63yQa#YK`4+c6;pW4?XIY9G1(Xh9WO8{F-Aju+nS9Vmv=$Ac0ienZ+p9*O%NG zMZKy5?%Z6TAJTE?o5vEr0r>f>hb#2w2U3DL64*au_@P!J!TL`oH2r*{>ffu6|A7tv zL4juf$DZ1MW5ZPsG!5)`k8d8c$J$o;%EIL0va9&GzWvkS%ZsGb#S(?{!UFOZ9<$a| zY|a+5kmD5N&{vRqkgY>aHsBT&`rg|&kezoD)gP0fsNYHsO#TRc_$n6Lf1Z{?+DLziXlHrq4sf(!>O{?Tj;Eh@%)+nRE_2VxbN&&%%caU#JDU%vL3}Cb zsb4AazPI{>8H&d=jUaZDS$-0^AxE@utGs;-Ez_F(qC9T=UZX=>ok2k2 ziTn{K?y~a5reD2A)P${NoI^>JXn>`IeArow(41c-Wm~)wiryEP(OS{YXWi7;%dG9v zI?mwu1MxD{yp_rrk!j^cKM)dc4@p4Ezyo%lRN|XyD}}>v=Xoib0gOcdXrQ^*61HNj z=NP|pd>@yfvr-=m{8$3A8TQGMTE7g=z!%yt`8`Bk-0MMwW~h^++;qyUP!J~ykh1GO z(FZ59xuFR$(WE;F@UUyE@Sp>`aVNjyj=Ty>_Vo}xf`e7`F;j-IgL5`1~-#70$9_=uBMq!2&1l zomRgpD58@)YYfvLtPW}{C5B35R;ZVvB<<#)x%srmc_S=A7F@DW8>QOEGwD6suhwCg z>Pa+YyULhmw%BA*4yjDp|2{!T98~<6Yfd(wo1mQ!KWwq0eg+6)o1>W~f~kL<-S+P@$wx*zeI|1t7z#Sxr5 zt6w+;YblPQNplq4Z#T$GLX#j6yldXAqj>4gAnnWtBICUnA&-dtnlh=t0Ho_vEKwV` z)DlJi#!@nkYV#$!)@>udAU*hF?V`2$Hf=V&6PP_|r#Iv*J$9)pF@X3`k;5})9^o4y z&)~?EjX5yX12O(BsFy-l6}nYeuKkiq`u9145&3Ssg^y{5G3Pse z9w(YVa0)N-fLaBq1`P!_#>SS(8fh_5!f{UrgZ~uEdeMJIz7DzI5!NHHqQtm~#CPij z?=N|J>nPR6_sL7!f4hD_|KH`vf8(Wpnj-(gPWH+ZvID}%?~68SwhPTC3u1_cB`otq z)U?6qo!ZLi5b>*KnYHWW=3F!p%h1;h{L&(Q&{qY6)_qxNfbP6E3yYpW!EO+IW3?@J z);4>g4gnl^8klu7uA>eGF6rIGSynacogr)KUwE_R4E5Xzi*Qir@b-jy55-JPC8c~( zo!W8y9OGZ&`xmc8;=4-U9=h{vCqfCNzYirONmGbRQlR`WWlgnY+1wCXbMz&NT~9*| z6@FrzP!LX&{no2!Ln_3|I==_4`@}V?4a;YZKTdw;vT<+K+z=uWbW(&bXEaWJ^W8Td z-3&1bY^Z*oM<=M}LVt>_j+p=2Iu7pZmbXrhQ_k)ysE9yXKygFNw$5hwDn(M>H+e1&9BM5!|81vd%r%vEm zqxY3?F@fb6O#5UunwgAHR9jp_W2zZ}NGp2%mTW@(hz7$^+a`A?mb8|_G*GNMJ) zjqegXQio=i@AINre&%ofexAr95aop5C+0MZ0m-l=MeO8m3epm7U%vZB8+I+C*iNFM z#T3l`gknX;D$-`2XT^Cg*vrv=RH+P;_dfF++cP?B_msQI4j+lt&rX2)3GaJx%W*Nn zkML%D{z5tpHH=dksQ*gzc|}gzW;lwAbxoR07VNgS*-c3d&8J|;@3t^ zVUz*J*&r7DFRuFVDCJDK8V9NN5hvpgGjwx+5n)qa;YCKe8TKtdnh{I7NU9BCN!0dq zczrBk8pE{{@vJa9ywR@mq*J=v+PG;?fwqlJVhijG!3VmIKs>9T6r7MJpC)m!Tc#>g zMtVsU>wbwFJEfwZ{vB|ZlttNe83)$iz`~#8UJ^r)lJ@HA&G#}W&ZH*;k{=TavpjWE z7hdyLZPf*X%Gm}i`Y{OGeeu^~nB8=`{r#TUrM-`;1cBvEd#d!kPqIgYySYhN-*1;L z^byj%Yi}Gx)Wnkosi337BKs}+5H5dth1JA{Ir-JKN$7zC)*}hqeoD(WfaUDPT>0`- z(6sa0AoIqASwF`>hP}^|)a_j2s^PQn*qVC{Q}htR z5-)duBFXT_V56-+UohKXlq~^6uf!6sA#ttk1o~*QEy_Y-S$gAvq47J9Vtk$5oA$Ct zYhYJ@8{hsC^98${!#Ho?4y5MCa7iGnfz}b9jE~h%EAAv~Qxu)_rAV;^cygV~5r_~?l=B`zObj7S=H=~$W zPtI_m%g$`kL_fVUk9J@>EiBH zOO&jtn~&`hIFMS5S`g8w94R4H40mdNUH4W@@XQk1sr17b{@y|JB*G9z1|CrQjd+GX z6+KyURG3;!*BQrentw{B2R&@2&`2}n(z-2&X7#r!{yg@Soy}cRD~j zj9@UBW+N|4HW4AWapy4wfUI- zZ`gSL6DUlgj*f1hSOGXG0IVH8HxK?o2|3HZ;KW{K+yPAlxtb)NV_2AwJm|E)FRs&& z=c^e7bvUsztY|+f^k7NXs$o1EUq>cR7C0$UKi6IooHWlK_#?IWDkvywnzg&ThWo^? z2O_N{5X39#?eV9l)xI(>@!vSB{DLt*oY!K1R8}_?%+0^C{d9a%N4 zoxHVT1&Lm|uDX%$QrBun5e-F`HJ^T$ zmzv)p@4ZHd_w9!%Hf9UYNvGCw2TTTbrj9pl+T9%-_-}L(tES>Or-}Z4F*{##n3~L~TuxjirGuIY#H7{%$E${?p{Q01 zi6T`n;rbK1yIB9jmQNycD~yZq&mbIsFWHo|ZAChSFPQa<(%d8mGw*V3fh|yFoxOOiWJd(qvVb!Z$b88cg->N=qO*4k~6;R==|9ihg&riu#P~s4Oap9O7f%crSr^rljeIfXDEg>wi)&v*a%7zpz<9w z*r!3q9J|390x`Zk;g$&OeN&ctp)VKRpDSV@kU2Q>jtok($Y-*x8_$2piTxun81@vt z!Vj?COa0fg2RPXMSIo26T=~0d`{oGP*eV+$!0I<(4azk&Vj3SiG=Q!6mX0p$z7I}; z9BJUFgT-K9MQQ-0@Z=^7R<{bn2Fm48endsSs`V7_@%8?Bxkqv>BDoVcj?K#dV#uUP zL1ND~?D-|VGKe3Rw_7-Idpht>H6XRLh*U7epS6byiGvJpr%d}XwfusjH9g;Z98H`x zyde%%5mhGOiL4wljCaWCk-&uE4_OOccb9c!ZaWt4B(wYl!?vyzl%7n~QepN&eFUrw zFIOl9c({``6~QD+43*_tzP{f2x41h(?b43^y6=iwyB)2os5hBE!@YUS5?N_tXd=h( z)WE286Fbd>R4M^P{!G)f;h<3Q>Fipuy+d2q-)!RyTgt;wr$(?9ox3;q+{E*ZQHhOn;lM`cjnu9 zXa48ks-v(~b*;MAI<>YZH(^NV8vjb34beE<_cwKlJoR;k6lJNSP6v}uiyRD?|0w+X@o1ONrH8a$fCxXpf? z?$DL0)7|X}Oc%h^zrMKWc-NS9I0Utu@>*j}b@tJ=ixQSJ={4@854wzW@E>VSL+Y{i z#0b=WpbCZS>kUCO_iQz)LoE>P5LIG-hv9E+oG}DtlIDF>$tJ1aw9^LuhLEHt?BCj& z(O4I8v1s#HUi5A>nIS-JK{v!7dJx)^Yg%XjNmlkWAq2*cv#tHgz`Y(bETc6CuO1VkN^L-L3j_x<4NqYb5rzrLC-7uOv z!5e`GZt%B782C5-fGnn*GhDF$%(qP<74Z}3xx+{$4cYKy2ikxI7B2N+2r07DN;|-T->nU&!=Cm#rZt%O_5c&1Z%nlWq3TKAW0w zQqemZw_ue--2uKQsx+niCUou?HjD`xhEjjQd3%rrBi82crq*~#uA4+>vR<_S{~5ce z-2EIl?~s z1=GVL{NxP1N3%=AOaC}j_Fv=ur&THz zyO!d9kHq|c73kpq`$+t+8Bw7MgeR5~`d7ChYyGCBWSteTB>8WAU(NPYt2Dk`@#+}= zI4SvLlyk#pBgVigEe`?NG*vl7V6m+<}%FwPV=~PvvA)=#ths==DRTDEYh4V5}Cf$z@#;< zyWfLY_5sP$gc3LLl2x+Ii)#b2nhNXJ{R~vk`s5U7Nyu^3yFg&D%Txwj6QezMX`V(x z=C`{76*mNb!qHHs)#GgGZ_7|vkt9izl_&PBrsu@}L`X{95-2jf99K)0=*N)VxBX2q z((vkpP2RneSIiIUEnGb?VqbMb=Zia+rF~+iqslydE34cSLJ&BJW^3knX@M;t*b=EA zNvGzv41Ld_T+WT#XjDB840vovUU^FtN_)G}7v)1lPetgpEK9YS^OWFkPoE{ovj^=@ zO9N$S=G$1ecndT_=5ehth2Lmd1II-PuT~C9`XVePw$y8J#dpZ?Tss<6wtVglm(Ok7 z3?^oi@pPio6l&!z8JY(pJvG=*pI?GIOu}e^EB6QYk$#FJQ%^AIK$I4epJ+9t?KjqA+bkj&PQ*|vLttme+`9G=L% ziadyMw_7-M)hS(3E$QGNCu|o23|%O+VN7;Qggp?PB3K-iSeBa2b}V4_wY`G1Jsfz4 z9|SdB^;|I8E8gWqHKx!vj_@SMY^hLEIbSMCuE?WKq=c2mJK z8LoG-pnY!uhqFv&L?yEuxo{dpMTsmCn)95xanqBrNPTgXP((H$9N${Ow~Is-FBg%h z53;|Y5$MUN)9W2HBe2TD`ct^LHI<(xWrw}$qSoei?}s)&w$;&!14w6B6>Yr6Y8b)S z0r71`WmAvJJ`1h&poLftLUS6Ir zC$bG9!Im_4Zjse)#K=oJM9mHW1{%l8sz$1o?ltdKlLTxWWPB>Vk22czVt|1%^wnN@*!l)}?EgtvhC>vlHm^t+ogpgHI1_$1ox9e;>0!+b(tBrmXRB`PY1vp-R**8N7 zGP|QqI$m(Rdu#=(?!(N}G9QhQ%o!aXE=aN{&wtGP8|_qh+7a_j_sU5|J^)vxq;# zjvzLn%_QPHZZIWu1&mRAj;Sa_97p_lLq_{~j!M9N^1yp3U_SxRqK&JnR%6VI#^E12 z>CdOVI^_9aPK2eZ4h&^{pQs}xsijXgFYRIxJ~N7&BB9jUR1fm!(xl)mvy|3e6-B3j zJn#ajL;bFTYJ2+Q)tDjx=3IklO@Q+FFM}6UJr6km7hj7th9n_&JR7fnqC!hTZoM~T zBeaVFp%)0cbPhejX<8pf5HyRUj2>aXnXBqDJe73~J%P(2C?-RT{c3NjE`)om! zl$uewSgWkE66$Kb34+QZZvRn`fob~Cl9=cRk@Es}KQm=?E~CE%spXaMO6YmrMl%9Q zlA3Q$3|L1QJ4?->UjT&CBd!~ru{Ih^in&JXO=|<6J!&qp zRe*OZ*cj5bHYlz!!~iEKcuE|;U4vN1rk$xq6>bUWD*u(V@8sG^7>kVuo(QL@Ki;yL zWC!FT(q{E8#on>%1iAS0HMZDJg{Z{^!De(vSIq&;1$+b)oRMwA3nc3mdTSG#3uYO_ z>+x;7p4I;uHz?ZB>dA-BKl+t-3IB!jBRgdvAbW!aJ(Q{aT>+iz?91`C-xbe)IBoND z9_Xth{6?(y3rddwY$GD65IT#f3<(0o#`di{sh2gm{dw*#-Vnc3r=4==&PU^hCv$qd zjw;>i&?L*Wq#TxG$mFIUf>eK+170KG;~+o&1;Tom9}}mKo23KwdEM6UonXgc z!6N(@k8q@HPw{O8O!lAyi{rZv|DpgfU{py+j(X_cwpKqcalcqKIr0kM^%Br3SdeD> zHSKV94Yxw;pjzDHo!Q?8^0bb%L|wC;4U^9I#pd5O&eexX+Im{ z?jKnCcsE|H?{uGMqVie_C~w7GX)kYGWAg%-?8|N_1#W-|4F)3YTDC+QSq1s!DnOML3@d`mG%o2YbYd#jww|jD$gotpa)kntakp#K;+yo-_ZF9qrNZw<%#C zuPE@#3RocLgPyiBZ+R_-FJ_$xP!RzWm|aN)S+{$LY9vvN+IW~Kf3TsEIvP+B9Mtm! zpfNNxObWQpLoaO&cJh5>%slZnHl_Q~(-Tfh!DMz(dTWld@LG1VRF`9`DYKhyNv z2pU|UZ$#_yUx_B_|MxUq^glT}O5Xt(Vm4Mr02><%C)@v;vPb@pT$*yzJ4aPc_FZ3z z3}PLoMBIM>q_9U2rl^sGhk1VUJ89=*?7|v`{!Z{6bqFMq(mYiA?%KbsI~JwuqVA9$H5vDE+VocjX+G^%bieqx->s;XWlKcuv(s%y%D5Xbc9+ zc(_2nYS1&^yL*ey664&4`IoOeDIig}y-E~_GS?m;D!xv5-xwz+G`5l6V+}CpeJDi^ z%4ed$qowm88=iYG+(`ld5Uh&>Dgs4uPHSJ^TngXP_V6fPyl~>2bhi20QB%lSd#yYn zO05?KT1z@?^-bqO8Cg`;ft>ilejsw@2%RR7;`$Vs;FmO(Yr3Fp`pHGr@P2hC%QcA|X&N2Dn zYf`MqXdHi%cGR@%y7Rg7?d3?an){s$zA{!H;Ie5exE#c~@NhQUFG8V=SQh%UxUeiV zd7#UcYqD=lk-}sEwlpu&H^T_V0{#G?lZMxL7ih_&{(g)MWBnCZxtXg znr#}>U^6!jA%e}@Gj49LWG@*&t0V>Cxc3?oO7LSG%~)Y5}f7vqUUnQ;STjdDU}P9IF9d9<$;=QaXc zL1^X7>fa^jHBu_}9}J~#-oz3Oq^JmGR#?GO7b9a(=R@fw@}Q{{@`Wy1vIQ#Bw?>@X z-_RGG@wt|%u`XUc%W{J z>iSeiz8C3H7@St3mOr_mU+&bL#Uif;+Xw-aZdNYUpdf>Rvu0i0t6k*}vwU`XNO2he z%miH|1tQ8~ZK!zmL&wa3E;l?!!XzgV#%PMVU!0xrDsNNZUWKlbiOjzH-1Uoxm8E#r`#2Sz;-o&qcqB zC-O_R{QGuynW14@)7&@yw1U}uP(1cov)twxeLus0s|7ayrtT8c#`&2~Fiu2=R;1_4bCaD=*E@cYI>7YSnt)nQc zohw5CsK%m?8Ack)qNx`W0_v$5S}nO|(V|RZKBD+btO?JXe|~^Qqur%@eO~<8-L^9d z=GA3-V14ng9L29~XJ>a5k~xT2152zLhM*@zlp2P5Eu}bywkcqR;ISbas&#T#;HZSf z2m69qTV(V@EkY(1Dk3`}j)JMo%ZVJ*5eB zYOjIisi+igK0#yW*gBGj?@I{~mUOvRFQR^pJbEbzFxTubnrw(Muk%}jI+vXmJ;{Q6 zrSobKD>T%}jV4Ub?L1+MGOD~0Ir%-`iTnWZN^~YPrcP5y3VMAzQ+&en^VzKEb$K!Q z<7Dbg&DNXuow*eD5yMr+#08nF!;%4vGrJI++5HdCFcGLfMW!KS*Oi@=7hFwDG!h2< zPunUEAF+HncQkbfFj&pbzp|MU*~60Z(|Ik%Tn{BXMN!hZOosNIseT?R;A`W?=d?5X zK(FB=9mZusYahp|K-wyb={rOpdn=@;4YI2W0EcbMKyo~-#^?h`BA9~o285%oY zfifCh5Lk$SY@|2A@a!T2V+{^!psQkx4?x0HSV`(w9{l75QxMk!)U52Lbhn{8ol?S) zCKo*7R(z!uk<6*qO=wh!Pul{(qq6g6xW;X68GI_CXp`XwO zxuSgPRAtM8K7}5E#-GM!*ydOOG_{A{)hkCII<|2=ma*71ci_-}VPARm3crFQjLYV! z9zbz82$|l01mv`$WahE2$=fAGWkd^X2kY(J7iz}WGS z@%MyBEO=A?HB9=^?nX`@nh;7;laAjs+fbo!|K^mE!tOB>$2a_O0y-*uaIn8k^6Y zSbuv;5~##*4Y~+y7Z5O*3w4qgI5V^17u*ZeupVGH^nM&$qmAk|anf*>r zWc5CV;-JY-Z@Uq1Irpb^O`L_7AGiqd*YpGUShb==os$uN3yYvb`wm6d=?T*it&pDk zo`vhw)RZX|91^^Wa_ti2zBFyWy4cJu#g)_S6~jT}CC{DJ_kKpT`$oAL%b^!2M;JgT zM3ZNbUB?}kP(*YYvXDIH8^7LUxz5oE%kMhF!rnPqv!GiY0o}NR$OD=ITDo9r%4E>E0Y^R(rS^~XjWyVI6 zMOR5rPXhTp*G*M&X#NTL`Hu*R+u*QNoiOKg4CtNPrjgH>c?Hi4MUG#I917fx**+pJfOo!zFM&*da&G_x)L(`k&TPI*t3e^{crd zX<4I$5nBQ8Ax_lmNRa~E*zS-R0sxkz`|>7q_?*e%7bxqNm3_eRG#1ae3gtV9!fQpY z+!^a38o4ZGy9!J5sylDxZTx$JmG!wg7;>&5H1)>f4dXj;B+@6tMlL=)cLl={jLMxY zbbf1ax3S4>bwB9-$;SN2?+GULu;UA-35;VY*^9Blx)Jwyb$=U!D>HhB&=jSsd^6yw zL)?a|>GxU!W}ocTC(?-%z3!IUhw^uzc`Vz_g>-tv)(XA#JK^)ZnC|l1`@CdX1@|!| z_9gQ)7uOf?cR@KDp97*>6X|;t@Y`k_N@)aH7gY27)COv^P3ya9I{4z~vUjLR9~z1Z z5=G{mVtKH*&$*t0@}-i_v|3B$AHHYale7>E+jP`ClqG%L{u;*ff_h@)al?RuL7tOO z->;I}>%WI{;vbLP3VIQ^iA$4wl6@0sDj|~112Y4OFjMs`13!$JGkp%b&E8QzJw_L5 zOnw9joc0^;O%OpF$Qp)W1HI!$4BaXX84`%@#^dk^hFp^pQ@rx4g(8Xjy#!X%+X5Jd@fs3amGT`}mhq#L97R>OwT5-m|h#yT_-v@(k$q7P*9X~T*3)LTdzP!*B} z+SldbVWrrwQo9wX*%FyK+sRXTa@O?WM^FGWOE?S`R(0P{<6p#f?0NJvnBia?k^fX2 zNQs7K-?EijgHJY}&zsr;qJ<*PCZUd*x|dD=IQPUK_nn)@X4KWtqoJNHkT?ZWL_hF? zS8lp2(q>;RXR|F;1O}EE#}gCrY~#n^O`_I&?&z5~7N;zL0)3Tup`%)oHMK-^r$NT% zbFg|o?b9w(q@)6w5V%si<$!U<#}s#x@0aX-hP>zwS#9*75VXA4K*%gUc>+yzupTDBOKH8WR4V0pM(HrfbQ&eJ79>HdCvE=F z|J>s;;iDLB^3(9}?biKbxf1$lI!*Z%*0&8UUq}wMyPs_hclyQQi4;NUY+x2qy|0J; zhn8;5)4ED1oHwg+VZF|80<4MrL97tGGXc5Sw$wAI#|2*cvQ=jB5+{AjMiDHmhUC*a zlmiZ`LAuAn_}hftXh;`Kq0zblDk8?O-`tnilIh|;3lZp@F_osJUV9`*R29M?7H{Fy z`nfVEIDIWXmU&YW;NjU8)EJpXhxe5t+scf|VXM!^bBlwNh)~7|3?fWwo_~ZFk(22% zTMesYw+LNx3J-_|DM~`v93yXe=jPD{q;li;5PD?Dyk+b? zo21|XpT@)$BM$%F=P9J19Vi&1#{jM3!^Y&fr&_`toi`XB1!n>sbL%U9I5<7!@?t)~ z;&H%z>bAaQ4f$wIzkjH70;<8tpUoxzKrPhn#IQfS%9l5=Iu))^XC<58D!-O z{B+o5R^Z21H0T9JQ5gNJnqh#qH^na|z92=hONIM~@_iuOi|F>jBh-?aA20}Qx~EpDGElELNn~|7WRXRFnw+Wdo`|# zBpU=Cz3z%cUJ0mx_1($X<40XEIYz(`noWeO+x#yb_pwj6)R(__%@_Cf>txOQ74wSJ z0#F3(zWWaR-jMEY$7C*3HJrohc79>MCUu26mfYN)f4M~4gD`}EX4e}A!U}QV8!S47 z6y-U-%+h`1n`*pQuKE%Av0@)+wBZr9mH}@vH@i{v(m-6QK7Ncf17x_D=)32`FOjjo zg|^VPf5c6-!FxN{25dvVh#fog=NNpXz zfB$o+0jbRkHH{!TKhE709f+jI^$3#v1Nmf80w`@7-5$1Iv_`)W^px8P-({xwb;D0y z7LKDAHgX<84?l!I*Dvi2#D@oAE^J|g$3!)x1Ua;_;<@#l1fD}lqU2_tS^6Ht$1Wl} zBESo7o^)9-Tjuz$8YQSGhfs{BQV6zW7dA?0b(Dbt=UnQs&4zHfe_sj{RJ4uS-vQpC zX;Bbsuju4%!o8?&m4UZU@~ZZjeFF6ex2ss5_60_JS_|iNc+R0GIjH1@Z z=rLT9%B|WWgOrR7IiIwr2=T;Ne?30M!@{%Qf8o`!>=s<2CBpCK_TWc(DX51>e^xh8 z&@$^b6CgOd7KXQV&Y4%}_#uN*mbanXq(2=Nj`L7H7*k(6F8s6{FOw@(DzU`4-*77{ zF+dxpv}%mFpYK?>N_2*#Y?oB*qEKB}VoQ@bzm>ptmVS_EC(#}Lxxx730trt0G)#$b zE=wVvtqOct1%*9}U{q<)2?{+0TzZzP0jgf9*)arV)*e!f`|jgT{7_9iS@e)recI#z zbzolURQ+TOzE!ymqvBY7+5NnAbWxvMLsLTwEbFqW=CPyCsmJ}P1^V30|D5E|p3BC5 z)3|qgw@ra7aXb-wsa|l^in~1_fm{7bS9jhVRkYVO#U{qMp z)Wce+|DJ}4<2gp8r0_xfZpMo#{Hl2MfjLcZdRB9(B(A(f;+4s*FxV{1F|4d`*sRNd zp4#@sEY|?^FIJ;tmH{@keZ$P(sLh5IdOk@k^0uB^BWr@pk6mHy$qf&~rI>P*a;h0C{%oA*i!VjWn&D~O#MxN&f@1Po# zKN+ zrGrkSjcr?^R#nGl<#Q722^wbYcgW@{+6CBS<1@%dPA8HC!~a`jTz<`g_l5N1M@9wn9GOAZ>nqNgq!yOCbZ@1z`U_N`Z>}+1HIZxk*5RDc&rd5{3qjRh8QmT$VyS;jK z;AF+r6XnnCp=wQYoG|rT2@8&IvKq*IB_WvS%nt%e{MCFm`&W*#LXc|HrD?nVBo=(8*=Aq?u$sDA_sC_RPDUiQ+wnIJET8vx$&fxkW~kP9qXKt zozR)@xGC!P)CTkjeWvXW5&@2?)qt)jiYWWBU?AUtzAN}{JE1I)dfz~7$;}~BmQF`k zpn11qmObXwRB8&rnEG*#4Xax3XBkKlw(;tb?Np^i+H8m(Wyz9k{~ogba@laiEk;2! zV*QV^6g6(QG%vX5Um#^sT&_e`B1pBW5yVth~xUs#0}nv?~C#l?W+9Lsb_5)!71rirGvY zTIJ$OPOY516Y|_014sNv+Z8cc5t_V=i>lWV=vNu#!58y9Zl&GsMEW#pPYPYGHQ|;vFvd*9eM==$_=vc7xnyz0~ zY}r??$<`wAO?JQk@?RGvkWVJlq2dk9vB(yV^vm{=NVI8dhsX<)O(#nr9YD?I?(VmQ z^r7VfUBn<~p3()8yOBjm$#KWx!5hRW)5Jl7wY@ky9lNM^jaT##8QGVsYeaVywmpv>X|Xj7gWE1Ezai&wVLt3p)k4w~yrskT-!PR!kiyQlaxl(( zXhF%Q9x}1TMt3~u@|#wWm-Vq?ZerK={8@~&@9r5JW}r#45#rWii};t`{5#&3$W)|@ zbAf2yDNe0q}NEUvq_Quq3cTjcw z@H_;$hu&xllCI9CFDLuScEMg|x{S7GdV8<&Mq=ezDnRZAyX-8gv97YTm0bg=d)(>N z+B2FcqvI9>jGtnK%eO%y zoBPkJTk%y`8TLf4)IXPBn`U|9>O~WL2C~C$z~9|0m*YH<-vg2CD^SX#&)B4ngOSG$ zV^wmy_iQk>dfN@Pv(ckfy&#ak@MLC7&Q6Ro#!ezM*VEh`+b3Jt%m(^T&p&WJ2Oqvj zs-4nq0TW6cv~(YI$n0UkfwN}kg3_fp?(ijSV#tR9L0}l2qjc7W?i*q01=St0eZ=4h zyGQbEw`9OEH>NMuIe)hVwYHsGERWOD;JxEiO7cQv%pFCeR+IyhwQ|y@&^24k+|8fD zLiOWFNJ2&vu2&`Jv96_z-Cd5RLgmeY3*4rDOQo?Jm`;I_(+ejsPM03!ly!*Cu}Cco zrQSrEDHNyzT(D5s1rZq!8#?f6@v6dB7a-aWs(Qk>N?UGAo{gytlh$%_IhyL7h?DLXDGx zgxGEBQoCAWo-$LRvM=F5MTle`M})t3vVv;2j0HZY&G z22^iGhV@uaJh(XyyY%} zd4iH_UfdV#T=3n}(Lj^|n;O4|$;xhu*8T3hR1mc_A}fK}jfZ7LX~*n5+`8N2q#rI$ z@<_2VANlYF$vIH$ zl<)+*tIWW78IIINA7Rr7i{<;#^yzxoLNkXL)eSs=%|P>$YQIh+ea_3k z_s7r4%j7%&*NHSl?R4k%1>Z=M9o#zxY!n8sL5>BO-ZP;T3Gut>iLS@U%IBrX6BA3k z)&@q}V8a{X<5B}K5s(c(LQ=%v1ocr`t$EqqY0EqVjr65usa=0bkf|O#ky{j3)WBR(((L^wmyHRzoWuL2~WTC=`yZ zn%VX`L=|Ok0v7?s>IHg?yArBcync5rG#^+u)>a%qjES%dRZoIyA8gQ;StH z1Ao7{<&}6U=5}4v<)1T7t!J_CL%U}CKNs-0xWoTTeqj{5{?Be$L0_tk>M9o8 zo371}S#30rKZFM{`H_(L`EM9DGp+Mifk&IP|C2Zu_)Ghr4Qtpmkm1osCf@%Z$%t+7 zYH$Cr)Ro@3-QDeQJ8m+x6%;?YYT;k6Z0E-?kr>x33`H%*ueBD7Zx~3&HtWn0?2Wt} zTG}*|v?{$ajzt}xPzV%lL1t-URi8*Zn)YljXNGDb>;!905Td|mpa@mHjIH%VIiGx- zd@MqhpYFu4_?y5N4xiHn3vX&|e6r~Xt> zZG`aGq|yTNjv;9E+Txuoa@A(9V7g?1_T5FzRI;!=NP1Kqou1z5?%X~Wwb{trRfd>i z8&y^H)8YnKyA_Fyx>}RNmQIczT?w2J4SNvI{5J&}Wto|8FR(W;Qw#b1G<1%#tmYzQ zQ2mZA-PAdi%RQOhkHy9Ea#TPSw?WxwL@H@cbkZwIq0B!@ns}niALidmn&W?!Vd4Gj zO7FiuV4*6Mr^2xlFSvM;Cp_#r8UaqIzHJQg_z^rEJw&OMm_8NGAY2)rKvki|o1bH~ z$2IbfVeY2L(^*rMRU1lM5Y_sgrDS`Z??nR2lX;zyR=c%UyGb*%TC-Dil?SihkjrQy~TMv6;BMs7P8il`H7DmpVm@rJ;b)hW)BL)GjS154b*xq-NXq2cwE z^;VP7ua2pxvCmxrnqUYQMH%a%nHmwmI33nJM(>4LznvY*k&C0{8f*%?zggpDgkuz&JBx{9mfb@wegEl2v!=}Sq2Gaty0<)UrOT0{MZtZ~j5y&w zXlYa_jY)I_+VA-^#mEox#+G>UgvM!Ac8zI<%JRXM_73Q!#i3O|)lOP*qBeJG#BST0 zqohi)O!|$|2SeJQo(w6w7%*92S})XfnhrH_Z8qe!G5>CglP=nI7JAOW?(Z29;pXJ9 zR9`KzQ=WEhy*)WH>$;7Cdz|>*i>=##0bB)oU0OR>>N<21e4rMCHDemNi2LD>Nc$;& zQRFthpWniC1J6@Zh~iJCoLOxN`oCKD5Q4r%ynwgUKPlIEd#?QViIqovY|czyK8>6B zSP%{2-<;%;1`#0mG^B(8KbtXF;Nf>K#Di72UWE4gQ%(_26Koiad)q$xRL~?pN71ZZ zujaaCx~jXjygw;rI!WB=xrOJO6HJ!!w}7eiivtCg5K|F6$EXa)=xUC za^JXSX98W`7g-tm@uo|BKj39Dl;sg5ta;4qjo^pCh~{-HdLl6qI9Ix6f$+qiZ$}s= zNguKrU;u+T@ko(Vr1>)Q%h$?UKXCY>3se%&;h2osl2D zE4A9bd7_|^njDd)6cI*FupHpE3){4NQ*$k*cOWZ_?CZ>Z4_fl@n(mMnYK62Q1d@+I zr&O))G4hMihgBqRIAJkLdk(p(D~X{-oBUA+If@B}j& zsHbeJ3RzTq96lB7d($h$xTeZ^gP0c{t!Y0c)aQE;$FY2!mACg!GDEMKXFOPI^)nHZ z`aSPJpvV0|bbrzhWWkuPURlDeN%VT8tndV8?d)eN*i4I@u zVKl^6{?}A?P)Fsy?3oi#clf}L18t;TjNI2>eI&(ezDK7RyqFxcv%>?oxUlonv(px) z$vnPzRH`y5A(x!yOIfL0bmgeMQB$H5wenx~!ujQK*nUBW;@Em&6Xv2%s(~H5WcU2R z;%Nw<$tI)a`Ve!>x+qegJnQsN2N7HaKzrFqM>`6R*gvh%O*-%THt zrB$Nk;lE;z{s{r^PPm5qz(&lM{sO*g+W{sK+m3M_z=4=&CC>T`{X}1Vg2PEfSj2x_ zmT*(x;ov%3F?qoEeeM>dUn$a*?SIGyO8m806J1W1o+4HRhc2`9$s6hM#qAm zChQ87b~GEw{ADfs+5}FJ8+|bIlIv(jT$Ap#hSHoXdd9#w<#cA<1Rkq^*EEkknUd4& zoIWIY)sAswy6fSERVm&!SO~#iN$OgOX*{9@_BWFyJTvC%S++ilSfCrO(?u=Dc?CXZ zzCG&0yVR{Z`|ZF0eEApWEo#s9osV>F{uK{QA@BES#&;#KsScf>y zvs?vIbI>VrT<*!;XmQS=bhq%46-aambZ(8KU-wOO2=en~D}MCToB_u;Yz{)1ySrPZ z@=$}EvjTdzTWU7c0ZI6L8=yP+YRD_eMMos}b5vY^S*~VZysrkq<`cK3>>v%uy7jgq z0ilW9KjVDHLv0b<1K_`1IkbTOINs0=m-22c%M~l=^S}%hbli-3?BnNq?b`hx^HX2J zIe6ECljRL0uBWb`%{EA=%!i^4sMcj+U_TaTZRb+~GOk z^ZW!nky0n*Wb*r+Q|9H@ml@Z5gU&W`(z4-j!OzC1wOke`TRAYGZVl$PmQ16{3196( zO*?`--I}Qf(2HIwb2&1FB^!faPA2=sLg(@6P4mN)>Dc3i(B0;@O-y2;lM4akD>@^v z=u>*|!s&9zem70g7zfw9FXl1bpJW(C#5w#uy5!V?Q(U35A~$dR%LDVnq@}kQm13{} zd53q3N(s$Eu{R}k2esbftfjfOITCL;jWa$}(mmm}d(&7JZ6d3%IABCapFFYjdEjdK z&4Edqf$G^MNAtL=uCDRs&Fu@FXRgX{*0<(@c3|PNHa>L%zvxWS={L8%qw`STm+=Rd zA}FLspESSIpE_^41~#5yI2bJ=9`oc;GIL!JuW&7YetZ?0H}$$%8rW@*J37L-~Rsx!)8($nI4 zZhcZ2^=Y+p4YPl%j!nFJA|*M^gc(0o$i3nlphe+~-_m}jVkRN{spFs(o0ajW@f3K{ zDV!#BwL322CET$}Y}^0ixYj2w>&Xh12|R8&yEw|wLDvF!lZ#dOTHM9pK6@Nm-@9Lnng4ZHBgBSrr7KI8YCC9DX5Kg|`HsiwJHg2(7#nS;A{b3tVO?Z% za{m5b3rFV6EpX;=;n#wltDv1LE*|g5pQ+OY&*6qCJZc5oDS6Z6JD#6F)bWxZSF@q% z+1WV;m!lRB!n^PC>RgQCI#D1br_o^#iPk>;K2hB~0^<~)?p}LG%kigm@moD#q3PE+ zA^Qca)(xnqw6x>XFhV6ku9r$E>bWNrVH9fum0?4s?Rn2LG{Vm_+QJHse6xa%nzQ?k zKug4PW~#Gtb;#5+9!QBgyB@q=sk9=$S{4T>wjFICStOM?__fr+Kei1 z3j~xPqW;W@YkiUM;HngG!;>@AITg}vAE`M2Pj9Irl4w1fo4w<|Bu!%rh%a(Ai^Zhi zs92>v5;@Y(Zi#RI*ua*h`d_7;byQSa*v9E{2x$<-_=5Z<7{%)}4XExANcz@rK69T0x3%H<@frW>RA8^swA+^a(FxK| zFl3LD*ImHN=XDUkrRhp6RY5$rQ{bRgSO*(vEHYV)3Mo6Jy3puiLmU&g82p{qr0F?ohmbz)f2r{X2|T2 z$4fdQ=>0BeKbiVM!e-lIIs8wVTuC_m7}y4A_%ikI;Wm5$9j(^Y z(cD%U%k)X>_>9~t8;pGzL6L-fmQO@K; zo&vQzMlgY95;1BSkngY)e{`n0!NfVgf}2mB3t}D9@*N;FQ{HZ3Pb%BK6;5#-O|WI( zb6h@qTLU~AbVW#_6?c!?Dj65Now7*pU{h!1+eCV^KCuPAGs28~3k@ueL5+u|Z-7}t z9|lskE`4B7W8wMs@xJa{#bsCGDFoRSNSnmNYB&U7 zVGKWe%+kFB6kb)e;TyHfqtU6~fRg)f|>=5(N36)0+C z`hv65J<$B}WUc!wFAb^QtY31yNleq4dzmG`1wHTj=c*=hay9iD071Hc?oYoUk|M*_ zU1GihAMBsM@5rUJ(qS?9ZYJ6@{bNqJ`2Mr+5#hKf?doa?F|+^IR!8lq9)wS3tF_9n zW_?hm)G(M+MYb?V9YoX^_mu5h-LP^TL^!Q9Z7|@sO(rg_4+@=PdI)WL(B7`!K^ND- z-uIuVDCVEdH_C@c71YGYT^_Scf_dhB8Z2Xy6vGtBSlYud9vggOqv^L~F{BraSE_t} zIkP+Hp2&nH^-MNEs}^`oMLy11`PQW$T|K(`Bu*(f@)mv1-qY(_YG&J2M2<7k;;RK~ zL{Fqj9yCz8(S{}@c)S!65aF<=&eLI{hAMErCx&>i7OeDN>okvegO87OaG{Jmi<|}D zaT@b|0X{d@OIJ7zvT>r+eTzgLq~|Dpu)Z&db-P4z*`M$UL51lf>FLlq6rfG)%doyp z)3kk_YIM!03eQ8Vu_2fg{+osaEJPtJ-s36R+5_AEG12`NG)IQ#TF9c@$99%0iye+ zUzZ57=m2)$D(5Nx!n)=5Au&O0BBgwxIBaeI(mro$#&UGCr<;C{UjJVAbVi%|+WP(a zL$U@TYCxJ=1{Z~}rnW;7UVb7+ZnzgmrogDxhjLGo>c~MiJAWs&&;AGg@%U?Y^0JhL ze(x6Z74JG6FlOFK(T}SXQfhr}RIFl@QXKnIcXYF)5|V~e-}suHILKT-k|<*~Ij|VF zC;t@=uj=hot~*!C68G8hTA%8SzOfETOXQ|3FSaIEjvBJp(A)7SWUi5!Eu#yWgY+;n zlm<$+UDou*V+246_o#V4kMdto8hF%%Lki#zPh}KYXmMf?hrN0;>Mv%`@{0Qn`Ujp) z=lZe+13>^Q!9zT);H<(#bIeRWz%#*}sgUX9P|9($kexOyKIOc`dLux}c$7It4u|Rl z6SSkY*V~g_B-hMPo_ak>>z@AVQ(_N)VY2kB3IZ0G(iDUYw+2d7W^~(Jq}KY=JnWS( z#rzEa&0uNhJ>QE8iiyz;n2H|SV#Og+wEZv=f2%1ELX!SX-(d3tEj$5$1}70Mp<&eI zCkfbByL7af=qQE@5vDVxx1}FSGt_a1DoE3SDI+G)mBAna)KBG4p8Epxl9QZ4BfdAN zFnF|Y(umr;gRgG6NLQ$?ZWgllEeeq~z^ZS7L?<(~O&$5|y)Al^iMKy}&W+eMm1W z7EMU)u^ke(A1#XCV>CZ71}P}0x)4wtHO8#JRG3MA-6g=`ZM!FcICCZ{IEw8Dm2&LQ z1|r)BUG^0GzI6f946RrBlfB1Vs)~8toZf~7)+G;pv&XiUO(%5bm)pl=p>nV^o*;&T z;}@oZSibzto$arQgfkp|z4Z($P>dTXE{4O=vY0!)kDO* zGF8a4wq#VaFpLfK!iELy@?-SeRrdz%F*}hjKcA*y@mj~VD3!it9lhRhX}5YOaR9$} z3mS%$2Be7{l(+MVx3 z(4?h;P!jnRmX9J9sYN#7i=iyj_5q7n#X(!cdqI2lnr8T$IfOW<_v`eB!d9xY1P=2q&WtOXY=D9QYteP)De?S4}FK6#6Ma z=E*V+#s8>L;8aVroK^6iKo=MH{4yEZ_>N-N z`(|;aOATba1^asjxlILk<4}f~`39dBFlxj>Dw(hMYKPO3EEt1@S`1lxFNM+J@uB7T zZ8WKjz7HF1-5&2=l=fqF-*@>n5J}jIxdDwpT?oKM3s8Nr`x8JnN-kCE?~aM1H!hAE z%%w(3kHfGwMnMmNj(SU(w42OrC-euI>Dsjk&jz3ts}WHqmMpzQ3vZrsXrZ|}+MHA7 z068obeXZTsO*6RS@o3x80E4ok``rV^Y3hr&C1;|ZZ0|*EKO`$lECUYG2gVFtUTw)R z4Um<0ZzlON`zTdvVdL#KFoMFQX*a5wM0Czp%wTtfK4Sjs)P**RW&?lP$(<}q%r68Z zS53Y!d@&~ne9O)A^tNrXHhXBkj~$8j%pT1%%mypa9AW5E&s9)rjF4@O3ytH{0z6riz|@< zB~UPh*wRFg2^7EbQrHf0y?E~dHlkOxof_a?M{LqQ^C!i2dawHTPYUE=X@2(3<=OOxs8qn_(y>pU>u^}3y&df{JarR0@VJn0f+U%UiF=$Wyq zQvnVHESil@d|8&R<%}uidGh7@u^(%?$#|&J$pvFC-n8&A>utA=n3#)yMkz+qnG3wd zP7xCnF|$9Dif@N~L)Vde3hW8W!UY0BgT2v(wzp;tlLmyk2%N|0jfG$%<;A&IVrOI< z!L)o>j>;dFaqA3pL}b-Je(bB@VJ4%!JeX@3x!i{yIeIso^=n?fDX`3bU=eG7sTc%g%ye8$v8P@yKE^XD=NYxTb zbf!Mk=h|otpqjFaA-vs5YOF-*GwWPc7VbaOW&stlANnCN8iftFMMrUdYNJ_Bnn5Vt zxfz@Ah|+4&P;reZxp;MmEI7C|FOv8NKUm8njF7Wb6Gi7DeODLl&G~}G4be&*Hi0Qw z5}77vL0P+7-B%UL@3n1&JPxW^d@vVwp?u#gVcJqY9#@-3X{ok#UfW3<1fb%FT`|)V~ggq z(3AUoUS-;7)^hCjdT0Kf{i}h)mBg4qhtHHBti=~h^n^OTH5U*XMgDLIR@sre`AaB$ zg)IGBET_4??m@cx&c~bA80O7B8CHR7(LX7%HThkeC*@vi{-pL%e)yXp!B2InafbDF zjPXf1mko3h59{lT6EEbxKO1Z5GF71)WwowO6kY|6tjSVSWdQ}NsK2x{>i|MKZK8%Q zfu&_0D;CO-Jg0#YmyfctyJ!mRJp)e#@O0mYdp|8x;G1%OZQ3Q847YWTyy|%^cpA;m zze0(5p{tMu^lDkpe?HynyO?a1$_LJl2L&mpeKu%8YvgRNr=%2z${%WThHG=vrWY@4 zsA`OP#O&)TetZ>s%h!=+CE15lOOls&nvC~$Qz0Ph7tHiP;O$i|eDwpT{cp>+)0-|; zY$|bB+Gbel>5aRN3>c0x)4U=|X+z+{ zn*_p*EQoquRL+=+p;=lm`d71&1NqBz&_ph)MXu(Nv6&XE7(RsS)^MGj5Q?Fwude-(sq zjJ>aOq!7!EN>@(fK7EE#;i_BGvli`5U;r!YA{JRodLBc6-`n8K+Fjgwb%sX;j=qHQ z7&Tr!)!{HXoO<2BQrV9Sw?JRaLXV8HrsNevvnf>Y-6|{T!pYLl7jp$-nEE z#X!4G4L#K0qG_4Z;Cj6=;b|Be$hi4JvMH!-voxqx^@8cXp`B??eFBz2lLD8RRaRGh zn7kUfy!YV~p(R|p7iC1Rdgt$_24i0cd-S8HpG|`@my70g^y`gu%#Tf_L21-k?sRRZHK&at(*ED0P8iw{7?R$9~OF$Ko;Iu5)ur5<->x!m93Eb zFYpIx60s=Wxxw=`$aS-O&dCO_9?b1yKiPCQmSQb>T)963`*U+Ydj5kI(B(B?HNP8r z*bfSBpSu)w(Z3j7HQoRjUG(+d=IaE~tv}y14zHHs|0UcN52fT8V_<@2ep_ee{QgZG zmgp8iv4V{k;~8@I%M3<#B;2R>Ef(Gg_cQM7%}0s*^)SK6!Ym+~P^58*wnwV1BW@eG z4sZLqsUvBbFsr#8u7S1r4teQ;t)Y@jnn_m5jS$CsW1um!p&PqAcc8!zyiXHVta9QC zY~wCwCF0U%xiQPD_INKtTb;A|Zf29(mu9NI;E zc-e>*1%(LSXB`g}kd`#}O;veb<(sk~RWL|f3ljxCnEZDdNSTDV6#Td({6l&y4IjKF z^}lIUq*ZUqgTPumD)RrCN{M^jhY>E~1pn|KOZ5((%F)G|*ZQ|r4zIbrEiV%42hJV8 z3xS)=!X1+=olbdGJ=yZil?oXLct8FM{(6ikLL3E%=q#O6(H$p~gQu6T8N!plf!96| z&Q3=`L~>U0zZh;z(pGR2^S^{#PrPxTRHD1RQOON&f)Siaf`GLj#UOk&(|@0?zm;Sx ztsGt8=29-MZs5CSf1l1jNFtNt5rFNZxJPvkNu~2}7*9468TWm>nN9TP&^!;J{-h)_ z7WsHH9|F%I`Pb!>KAS3jQWKfGivTVkMJLO-HUGM_a4UQ_%RgL6WZvrW+Z4ujZn;y@ zz9$=oO!7qVTaQAA^BhX&ZxS*|5dj803M=k&2%QrXda`-Q#IoZL6E(g+tN!6CA!CP* zCpWtCujIea)ENl0liwVfj)Nc<9mV%+e@=d`haoZ*`B7+PNjEbXBkv=B+Pi^~L#EO$D$ZqTiD8f<5$eyb54-(=3 zh)6i8i|jp(@OnRrY5B8t|LFXFQVQ895n*P16cEKTrT*~yLH6Z4e*bZ5otpRDri&+A zfNbK1D5@O=sm`fN=WzWyse!za5n%^+6dHPGX#8DyIK>?9qyX}2XvBWVqbP%%D)7$= z=#$WulZlZR<{m#gU7lwqK4WS1Ne$#_P{b17qe$~UOXCl>5b|6WVh;5vVnR<%d+Lnp z$uEmML38}U4vaW8>shm6CzB(Wei3s#NAWE3)a2)z@i{4jTn;;aQS)O@l{rUM`J@K& l00vQ5JBs~;vo!vr%%-k{2_Fq1Mn4QF81S)AQ99zk{{c4yR+0b! literal 0 HcmV?d00001 diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..a80b22ce --- /dev/null +++ b/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,7 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-bin.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew new file mode 100755 index 00000000..1aa94a42 --- /dev/null +++ b/gradlew @@ -0,0 +1,249 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 00000000..25da30db --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,92 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/settings.gradle b/settings.gradle new file mode 100644 index 00000000..ca9eb466 --- /dev/null +++ b/settings.gradle @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +rootProject.name = 'apache-spark-kubernetes-operator' + +include 'spark-operator-api' +include 'spark-operator' +include 'spark-submission-worker' +include 'spark-operator-tests' diff --git a/spark-operator-api/build.gradle b/spark-operator-api/build.gradle new file mode 100644 index 00000000..8ce4b25b --- /dev/null +++ b/spark-operator-api/build.gradle @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +dependencies { + // fabric8 + implementation("io.fabric8:kubernetes-client:$fabric8Version") { + exclude group: 'com.squareup.okhttp3' + } + compileOnly("io.fabric8:crd-generator-apt:$fabric8Version") + annotationProcessor("io.fabric8:crd-generator-apt:$fabric8Version") + + // utils + implementation("org.apache.commons:commons-lang3:$commonsLang3Version") + implementation("commons-io:commons-io:$commonsIOVersion") + implementation("org.projectlombok:lombok:$lombokVersion") + annotationProcessor("org.projectlombok:lombok:$lombokVersion") + + // logging + implementation("org.apache.logging.log4j:log4j-slf4j-impl:$log4jVersion") + implementation("org.apache.logging.log4j:log4j-core:$log4jVersion") + + testImplementation platform("org.junit:junit-bom:$junitVersion") + testImplementation 'org.junit.jupiter:junit-jupiter' +} + +test { + useJUnitPlatform() +} + +task finalizeGeneratedCRD(type: Exec, dependsOn: jar) { + println "Updating PrinterColumns for generated CRD" + commandLine 'sh', './src/main/resources/printer-columns.sh' +} + +task copyGeneratedCRD(type: Copy, dependsOn: finalizeGeneratedCRD) { + from "build/classes/java/main/META-INF/fabric8/sparkapplications.org.apache.spark-v1.yml" + into "../build-tools/helm/spark-kubernetes-operator/crds" +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/BaseResource.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/BaseResource.java new file mode 100644 index 00000000..2df46de7 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/BaseResource.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +import io.fabric8.kubernetes.api.model.Namespaced; +import io.fabric8.kubernetes.client.CustomResource; +import org.apache.spark.kubernetes.operator.spec.BaseSpec; +import org.apache.spark.kubernetes.operator.status.BaseAttemptSummary; +import org.apache.spark.kubernetes.operator.status.BaseState; +import org.apache.spark.kubernetes.operator.status.BaseStatus; + +public class BaseResource, + SPEC extends BaseSpec, STATUS extends BaseStatus> + extends CustomResource implements Namespaced { +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/Constants.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/Constants.java new file mode 100644 index 00000000..a5f256ca --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/Constants.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +public class Constants { + public static final String API_GROUP = "org.apache.spark"; + public static final String API_VERSION = "v1alpha1"; + public static final String LABEL_SPARK_APPLICATION_NAME = "spark.operator/spark-app-name"; + public static final String LABEL_SPARK_OPERATOR_NAME = "spark.operator/name"; + public static final String LABEL_RESOURCE_NAME = "app.kubernetes.io/name"; + public static final String LABEL_SPARK_ROLE_NAME = "spark-role"; + public static final String LABEL_SPARK_ROLE_DRIVER_VALUE = "driver"; + public static final String LABEL_SPARK_ROLE_EXECUTOR_VALUE = "executor"; + public static final String SPARK_CONF_SENTINEL_DUMMY_FIELD = "sentinel.dummy.number"; + + public static final String SENTINEL_LABEL = "spark.operator/sentinel"; + + // Default state messages + public static final String DriverRequestedMessage = + "Requested driver from resource scheduler. "; + public static final String DriverCompletedMessage = + "Spark application completed successfully. "; + public static final String DriverTerminatedBeforeInitializationMessage = + "Driver container is terminated without SparkContext / SparkSession initialization. "; + public static final String DriverFailedInitContainersMessage = + "Driver has failed init container(s). Refer last observed status for details. "; + public static final String DriverFailedMessage = + "Driver has one or more failed critical container(s), refer last observed status for " + + "details. "; + public static final String DriverSucceededMessage = + "Driver has critical container(s) exited with 0. "; + public static final String DriverRestartedMessage = + "Driver has one or more critical container(s) restarted unexpectedly, refer last " + + "observed status for details. "; + public static final String AppStopRequestReceivedMessage = + "Received request to shutdown Spark application. "; + public static final String AppCancelledMessage = + "Spark application has been shutdown as requested. "; + public static final String DriverUnexpectedRemovedMessage = + "Driver removed. This could caused by 'exit' called in driver process with non-zero " + + "code, involuntary disruptions or unintentional destroy behavior, check " + + "Kubernetes events for more details. "; + public static final String DriverLaunchTimeoutMessage = + "The driver has not responded to the initial health check request within the " + + "allotted start-up time. This can be configured by setting " + + ".spec.applicationTolerations.applicationTimeoutConfig "; + public static final String DriverRunning = "Driver has started running. "; + public static final String DriverReady = "Driver has reached ready state. "; + public static final String SubmittedStateMessage = + "Spark application has been created on Kubernetes Cluster. "; + public static final String UnknownStateMessage = "Cannot process application status. "; + public static final String ExceedMaxRetryAttemptMessage = + "The maximum number of restart attempts (%d) has been exceeded. "; + public static final String ScheduleFailureMessage = + "Failed to request driver from scheduler backend. "; + public static final String RunningHealthyMessage = "Application is running healthy. "; + public static final String InitializedWithBelowThresholdExecutorsMessage = + "The application is running with less than minimal number of requested initial " + + "executors. "; + public static final String RunningWithBelowThresholdExecutorsMessage = + "The Spark application is running with less than minimal number of requested " + + "executors. "; + public static final String ExecutorLaunchTimeoutMessage = + "The Spark application failed to get enough executors in the given time threshold. "; +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplication.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplication.java new file mode 100644 index 00000000..07b2b5a3 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplication.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import io.fabric8.kubernetes.model.annotation.Group; +import io.fabric8.kubernetes.model.annotation.ShortNames; +import io.fabric8.kubernetes.model.annotation.Version; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; +import org.apache.spark.kubernetes.operator.status.ApplicationAttemptSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; + +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonDeserialize() +@Group(Constants.API_GROUP) +@Version(Constants.API_VERSION) +@ShortNames({"sparkapp"}) +@JsonIgnoreProperties(ignoreUnknown = true) +public class SparkApplication extends + BaseResource { + @Override + public ApplicationStatus initStatus() { + return new ApplicationStatus(); + } + + @Override + public ApplicationSpec initSpec() { + return new ApplicationSpec(); + } +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplicationList.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplicationList.java new file mode 100644 index 00000000..a2435b46 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplicationList.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +import io.fabric8.kubernetes.api.model.DefaultKubernetesResourceList; +import lombok.NoArgsConstructor; + +@NoArgsConstructor +public class SparkApplicationList extends DefaultKubernetesResourceList { +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/decorators/ResourceDecorator.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/decorators/ResourceDecorator.java new file mode 100644 index 00000000..dc4b34c8 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/decorators/ResourceDecorator.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.decorators; + +import io.fabric8.kubernetes.api.model.HasMetadata; + +public interface ResourceDecorator { + T decorate(T resource); +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/diff/Diffable.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/diff/Diffable.java new file mode 100644 index 00000000..5361554f --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/diff/Diffable.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.diff; + +public interface Diffable { +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java new file mode 100644 index 00000000..9d07d4b7 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import io.fabric8.generator.annotation.Required; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.NoArgsConstructor; + +import java.util.ArrayList; +import java.util.List; + +@Data +@NoArgsConstructor +@AllArgsConstructor +@Builder +@EqualsAndHashCode(callSuper = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class ApplicationSpec extends BaseSpec { + protected String mainClass; + @Required + protected RuntimeVersions runtimeVersions; + protected String jars; + protected String pyFiles; + protected String sparkRFiles; + protected String files; + @Builder.Default + protected DeploymentMode deploymentMode = DeploymentMode.CLUSTER_MODE; + protected String proxyUser; + @Builder.Default + protected List driverArgs = new ArrayList<>(); + @Builder.Default + protected ApplicationTolerations applicationTolerations = new ApplicationTolerations(); + protected BaseApplicationTemplateSpec driverSpec; + protected BaseApplicationTemplateSpec executorSpec; +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTimeoutConfig.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTimeoutConfig.java new file mode 100644 index 00000000..cdd1a2ba --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTimeoutConfig.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +@Builder +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class ApplicationTimeoutConfig { + @Builder.Default + protected Long driverStartTimeoutMillis = 300 * 1000L; + @Builder.Default + protected Long sparkSessionStartTimeoutMillis = 300 * 1000L; + @Builder.Default + protected Long executorStartTimeoutMillis = 300 * 1000L; + @Builder.Default + protected Long forceTerminationGracePeriodMillis = 300 * 1000L; + @Builder.Default + protected Long terminationRequeuePeriodMillis = 2 * 1000L; +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java new file mode 100644 index 00000000..b9254c1b --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +@Builder +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class ApplicationTolerations { + @Builder.Default + protected RestartConfig restartConfig = new RestartConfig(); + @Builder.Default + protected ApplicationTimeoutConfig applicationTimeoutConfig = new ApplicationTimeoutConfig(); + /** + * Determine the toleration behavior for executor / worker instances. + */ + @Builder.Default + protected InstanceConfig instanceConfig = new InstanceConfig(); + /** + * If disabled, operator would not attempt to delete resources after app terminates. + * While this can be helpful in dev phase, it shall not be enabled for prod use cases. + * Caution: in order to avoid resource conflicts among multiple attempts, this can be disabled + * iff restart policy is set to NEVER. + */ + @Builder.Default + protected Boolean deleteOnTermination = true; +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseApplicationTemplateSpec.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseApplicationTemplateSpec.java new file mode 100644 index 00000000..894cf7ad --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseApplicationTemplateSpec.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import io.fabric8.kubernetes.api.model.PodTemplateSpec; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +@Builder +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class BaseApplicationTemplateSpec { + protected PodTemplateSpec podTemplateSpec; +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseSpec.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseSpec.java new file mode 100644 index 00000000..57a992c8 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseSpec.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.Data; +import org.apache.spark.kubernetes.operator.diff.Diffable; + +import java.util.HashMap; +import java.util.Map; + +@Data +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class BaseSpec implements Diffable { + protected Map sparkConf = new HashMap<>(); +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java new file mode 100644 index 00000000..d13cddfe --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +public enum DeploymentMode { + CLUSTER_MODE, + CLIENT_MODE +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/InstanceConfig.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/InstanceConfig.java new file mode 100644 index 00000000..229c920f --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/InstanceConfig.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * Config tolerations of executor instances for the application. + * Used then the target cluster is lack of batch / gang scheduling + * This is different from SparkConf: spark.executor.instances + *

+ * For example, with below spec: + * spec: + * applicationTolerations: + * instanceConfig: + * minExecutors: 3 + * initExecutors: 5 + * maxExecutors: 10 + * sparkConf: + * spark.executor.instances: "10" + *

+ * Spark would try to bring up 10 executors as defined in SparkConf. In addition, from SparkApp + * perspective, + * + If Spark app acquires less than 5 executors in given tine window + * (.spec.applicationTolerations.applicationTimeoutConfig.executorStartTimeoutMillis) after + * submitted, it would be shut down proactively in order to avoid resource deadlock. + * + Spark app would be marked as 'RUNNING_WITH_PARTIAL_CAPACITY' if it loses executors after + * successfully start up. + * + Spark app would be marked as 'RUNNING_HEALTHY' if it has at least min executors after + * successfully start up. + */ +@Data +@NoArgsConstructor +@AllArgsConstructor +@Builder +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class InstanceConfig { + @Builder.Default + protected long initExecutors = 0L; + @Builder.Default + protected long minExecutors = 0L; + @Builder.Default + protected long maxExecutors = 0L; +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java new file mode 100644 index 00000000..26394ca5 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +public enum JDKVersion { + Java11, + Java17 +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartConfig.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartConfig.java new file mode 100644 index 00000000..c25b1b1e --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartConfig.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +@Builder +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class RestartConfig { + @Builder.Default + protected RestartPolicy restartPolicy = RestartPolicy.NEVER; + @Builder.Default + protected Long maxRestartAttempts = 3L; + @Builder.Default + protected Long restartBackoffMillis = 30000L; +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java new file mode 100644 index 00000000..856c8fe5 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +import org.apache.spark.kubernetes.operator.status.BaseStateSummary; + +public enum RestartPolicy { + ALWAYS, + NEVER, + ON_FAILURE, + ON_INFRASTRUCTURE_FAILURE; + + public static boolean attemptRestartOnState(final RestartPolicy policy, + final BaseStateSummary stateSummary) { + switch (policy) { + case NEVER: + return false; + case ALWAYS: + return true; + case ON_FAILURE: + return stateSummary.isFailure(); + case ON_INFRASTRUCTURE_FAILURE: + return stateSummary.isInfrastructureFailure(); + } + return false; + } +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RuntimeVersions.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RuntimeVersions.java new file mode 100644 index 00000000..8dd7b38d --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RuntimeVersions.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import io.fabric8.generator.annotation.Required; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +@Builder +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class RuntimeVersions { + @Required + protected SparkVersion sparkVersion; + protected ScalaVersion scalaVersion; + protected JDKVersion jdkVersion; +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ScalaVersion.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ScalaVersion.java new file mode 100644 index 00000000..d786d691 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ScalaVersion.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +public enum ScalaVersion { + v2_12, + v2_13 +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java new file mode 100644 index 00000000..df21d398 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.spec; + +/** + * Spark Operator supports versions with + * official Spark images + */ +public enum SparkVersion { + v3_5_1, + v3_5_0, + v3_4_2, + v3_4_1, + v3_4_0, + v3_3_3, + v3_3_2, + v3_3_1, + v3_3_0, + v3_2_0 +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationAttemptSummary.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationAttemptSummary.java new file mode 100644 index 00000000..a3013b36 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationAttemptSummary.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.status; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.NoArgsConstructor; + +import java.util.Map; + +@Data +@NoArgsConstructor +@AllArgsConstructor +@EqualsAndHashCode(callSuper = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class ApplicationAttemptSummary extends BaseAttemptSummary { + // The state transition history for given attempt + // This is used when state history trimming is enabled + protected Map stateTransitionHistory; +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationState.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationState.java new file mode 100644 index 00000000..e8fbb7d7 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationState.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.status; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import io.fabric8.kubernetes.api.model.PodStatus; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.Setter; +import lombok.ToString; + +import java.io.Serializable; +import java.time.Instant; + +import static org.apache.spark.kubernetes.operator.Constants.SubmittedStateMessage; + +@ToString(callSuper = true) +@EqualsAndHashCode(callSuper = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class ApplicationState extends BaseState implements Serializable { + + @Getter + @Setter + PodStatus lastObservedDriverStatus; + + public ApplicationState() { + super(ApplicationStateSummary.SUBMITTED, Instant.now().toString(), SubmittedStateMessage); + } + + public ApplicationState(ApplicationStateSummary currentStateSummary, String message) { + super(currentStateSummary, Instant.now().toString(), message); + } +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStateSummary.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStateSummary.java new file mode 100644 index 00000000..88a30e08 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStateSummary.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.status; + +import java.util.Set; + +public enum ApplicationStateSummary implements BaseStateSummary { + /** + * Spark application is submitted to the cluster but yet scheduled. + */ + SUBMITTED, + + /** + * Spark application will be restarted with same configuration + */ + SCHEDULED_TO_RESTART, + + /** + * A request has been made to start driver pod in the cluster + */ + DRIVER_REQUESTED, + + /** + * Driver pod has reached running state + */ + DRIVER_STARTED, + + /** + * Spark session is initialized + */ + DRIVER_READY, + + /** + * Less that minimal required executor pods become ready during starting up + */ + INITIALIZED_BELOW_THRESHOLD_EXECUTORS, + + /** + * All required executor pods started + */ + RUNNING_HEALTHY, + + /** + * The application has lost a fraction of executors for external reasons + */ + RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS, + + /** + * The request timed out for driver + */ + DRIVER_LAUNCH_TIMED_OUT, + + /** + * The request timed out for executors + */ + EXECUTORS_LAUNCH_TIMED_OUT, + + /** + * Timed out waiting for context to be initialized + */ + SPARK_SESSION_INITIALIZATION_TIMED_OUT, + + /** + * The application completed successfully, or System.exit is called explicitly with zero state + */ + SUCCEEDED, + + /** + * The application has failed, JVM exited abnormally, or System.exit is called explicitly + * with non-zero state + */ + FAILED, + + /** + * The job has failed because of a scheduler side issue. e.g. driver scheduled on node with + * insufficient resources + */ + SCHEDULING_FAILURE, + + /** + * The driver pod was failed with Evicted reason + */ + DRIVER_EVICTED, + + /** + * all resources (pods, services .etc have been cleaned up) + */ + RESOURCE_RELEASED, + + /** + * If configured, operator may mark app as terminated without releasing resources. While this + * can be helpful in dev phase, it shall not be enabled for prod use cases. + */ + TERMINATED_WITHOUT_RELEASE_RESOURCES; + + public boolean isInitializing() { + return SUBMITTED.equals(this) || SCHEDULED_TO_RESTART.equals(this); + } + + public boolean isStarting() { + return SCHEDULED_TO_RESTART.ordinal() < this.ordinal() + && RUNNING_HEALTHY.ordinal() > this.ordinal(); + } + + public boolean isTerminated() { + return RESOURCE_RELEASED.equals(this) + || TERMINATED_WITHOUT_RELEASE_RESOURCES.equals(this); + } + + public boolean isStopping() { + return RUNNING_HEALTHY.ordinal() < this.ordinal() && !isTerminated(); + } + + public static final Set infrastructureFailures = + Set.of(DRIVER_LAUNCH_TIMED_OUT, + EXECUTORS_LAUNCH_TIMED_OUT, SCHEDULING_FAILURE); + + public static final Set failures = Set.of(DRIVER_LAUNCH_TIMED_OUT, + EXECUTORS_LAUNCH_TIMED_OUT, SCHEDULING_FAILURE, FAILED, + SPARK_SESSION_INITIALIZATION_TIMED_OUT); + + @Override + public boolean isFailure() { + return failures.contains(this); + } + + @Override + public boolean isInfrastructureFailure() { + return infrastructureFailures.contains(this); + } + +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStatus.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStatus.java new file mode 100644 index 00000000..bc944d53 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStatus.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.status; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.EqualsAndHashCode; +import lombok.ToString; +import org.apache.commons.lang3.StringUtils; +import org.apache.spark.kubernetes.operator.spec.RestartConfig; +import org.apache.spark.kubernetes.operator.spec.RestartPolicy; + +import java.util.Collections; +import java.util.Map; +import java.util.TreeMap; + +import static org.apache.spark.kubernetes.operator.Constants.ExceedMaxRetryAttemptMessage; + +@EqualsAndHashCode(callSuper = true) +@ToString(callSuper = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class ApplicationStatus + extends BaseStatus { + + public ApplicationStatus() { + super(new ApplicationState(), new ApplicationAttemptSummary()); + } + + public ApplicationStatus(ApplicationState currentState, + Map stateTransitionHistory, + ApplicationAttemptSummary previousAttemptSummary, + ApplicationAttemptSummary currentAttemptSummary) { + super(currentState, stateTransitionHistory, previousAttemptSummary, currentAttemptSummary); + } + + /** + * Create a new ApplicationStatus, set the given latest state as current and update state + * history + */ + public ApplicationStatus appendNewState(ApplicationState state) { + return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), + previousAttemptSummary, currentAttemptSummary); + } + + /** + * Create ApplicationStatus to be updated upon termination of current attempt, with respect + * to current state and restart config. + * + * @param restartConfig restart config for the app + * @param stateMessageOverride state message to be applied + * @param trimStateTransitionHistory if enabled, operator would trim the state history, + * keeping only previous and current attempt. + * @return updated ApplicationStatus + */ + public ApplicationStatus terminateOrRestart(final RestartConfig restartConfig, + String stateMessageOverride, + boolean trimStateTransitionHistory) { + if (!currentState.currentStateSummary.isStopping()) { + // application is not stopping, skip + throw new RuntimeException( + "Spark application cannot be directly terminated unless in stopping " + + "state, current state is: " + currentState); + } + + if (!RestartPolicy.attemptRestartOnState(restartConfig.getRestartPolicy(), + currentState.getCurrentStateSummary())) { + // no restart configured + ApplicationState state = new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, + stateMessageOverride); + return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), + previousAttemptSummary, currentAttemptSummary); + } + + if (currentAttemptSummary.getAttemptInfo().getId() >= + restartConfig.getMaxRestartAttempts()) { + String stateMessage = String.format(ExceedMaxRetryAttemptMessage, + restartConfig.getMaxRestartAttempts()); + if (StringUtils.isNotEmpty(stateMessageOverride)) { + stateMessage += stateMessageOverride; + } + // max number of restart attempt reached + ApplicationState state = + new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, stateMessage); + // still use previous & current attempt summary - they are to be updated only upon + // new restart + return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), + previousAttemptSummary, currentAttemptSummary); + } + + ApplicationAttemptSummary nextAttemptSummary = new ApplicationAttemptSummary(); + nextAttemptSummary.setAttemptInfo( + currentAttemptSummary.getAttemptInfo().createNextAttemptInfo()); + ApplicationState state = new ApplicationState(ApplicationStateSummary.SCHEDULED_TO_RESTART, + stateMessageOverride); + + if (trimStateTransitionHistory) { + currentAttemptSummary.setStateTransitionHistory(stateTransitionHistory); + return new ApplicationStatus(state, + Collections.singletonMap(getCurrentStateId() + 1, state), currentAttemptSummary, + nextAttemptSummary); + } else { + return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), + currentAttemptSummary, nextAttemptSummary); + } + } + + private Map createUpdatedHistoryWithNewState(ApplicationState state) { + TreeMap updatedHistory = new TreeMap<>(stateTransitionHistory); + updatedHistory.put(updatedHistory.lastKey() + 1L, state); + return updatedHistory; + } +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/AttemptInfo.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/AttemptInfo.java new file mode 100644 index 00000000..eed944eb --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/AttemptInfo.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.status; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +@Builder +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class AttemptInfo { + @Builder.Default + protected final Long id = 0L; + + public AttemptInfo createNextAttemptInfo() { + return new AttemptInfo(id + 1L); + } +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseAttemptSummary.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseAttemptSummary.java new file mode 100644 index 00000000..0d2d5412 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseAttemptSummary.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.status; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.Data; + +@Data +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class BaseAttemptSummary { + protected AttemptInfo attemptInfo = new AttemptInfo(); +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseState.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseState.java new file mode 100644 index 00000000..92a5a5c1 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseState.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.status; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.AllArgsConstructor; +import lombok.Data; + +import java.io.Serializable; + +@Data +@AllArgsConstructor +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class BaseState implements Serializable { + protected T currentStateSummary; + protected String lastTransitionTime; + protected String message; +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStateSummary.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStateSummary.java new file mode 100644 index 00000000..6e1af4bb --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStateSummary.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.status; + +public interface BaseStateSummary { + boolean isFailure(); + + boolean isInfrastructureFailure(); +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStatus.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStatus.java new file mode 100644 index 00000000..0144aa1d --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStatus.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.status; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.ToString; + +import java.util.Collections; +import java.util.Map; +import java.util.TreeMap; + +@ToString +@EqualsAndHashCode +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class BaseStatus, AS extends BaseAttemptSummary> { + @Getter + STATE currentState; + @Getter + Map stateTransitionHistory; + @Getter + AS previousAttemptSummary; + @Getter + AS currentAttemptSummary; + + public BaseStatus(STATE initState, AS currentAttemptSummary) { + this.currentState = initState; + this.stateTransitionHistory = new TreeMap<>(); + this.stateTransitionHistory.put(0L, initState); + this.previousAttemptSummary = null; + this.currentAttemptSummary = currentAttemptSummary; + } + + public BaseStatus(STATE currentState, + Map stateTransitionHistory, + AS previousAttemptSummary, + AS currentAttemptSummary) { + this.currentState = currentState; + this.stateTransitionHistory = new TreeMap<>(stateTransitionHistory); + this.previousAttemptSummary = previousAttemptSummary; + this.currentAttemptSummary = currentAttemptSummary; + } + + protected long getCurrentStateId() { + return Collections.max(stateTransitionHistory.keySet()); + } +} diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/utils/ModelUtils.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/utils/ModelUtils.java new file mode 100644 index 00000000..d3d71381 --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/utils/ModelUtils.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.utils; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.OwnerReference; +import io.fabric8.kubernetes.api.model.OwnerReferenceBuilder; +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.api.model.PodBuilder; +import io.fabric8.kubernetes.api.model.PodTemplateSpec; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; + +public class ModelUtils { + public static final String DRIVER_SPARK_CONTAINER_PROP_KEY = + "spark.kubernetes.driver.podTemplateContainerName"; + public static final String DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY = + "spark.kubernetes.driver.podTemplateFile"; + public static final String EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY = + "spark.kubernetes.executor.podTemplateFile"; + public static final ObjectMapper objectMapper = new ObjectMapper(); + + public static Pod defaultPod() { + return new PodBuilder() + .withNewMetadata() + .endMetadata() + .withNewSpec() + .endSpec() + .build(); + } + + public static Pod getPodFromTemplateSpec(PodTemplateSpec podTemplateSpec) { + if (podTemplateSpec != null) { + return new PodBuilder() + .withMetadata(podTemplateSpec.getMetadata()) + .withSpec(podTemplateSpec.getSpec()) + .withAdditionalProperties(podTemplateSpec.getAdditionalProperties()) + .build(); + } else { + return defaultPod(); + } + } + + /** + * Return true if given container name is main container in driver pod + * If `spark.kubernetes.driver.podTemplateContainerName` is not set, all containers are + * considered as main + */ + public static boolean isDriverMainContainer(final ApplicationSpec appSpec, + final String containerName) { + if (appSpec == null || appSpec.getSparkConf() == null + || !appSpec.getSparkConf().containsKey(DRIVER_SPARK_CONTAINER_PROP_KEY)) { + return true; + } + return appSpec.getSparkConf().get(DRIVER_SPARK_CONTAINER_PROP_KEY) + .equalsIgnoreCase(containerName); + } + + /** + * Build OwnerReference to the given resource + * + * @param owner the owner + * @return OwnerReference to be used for subresources + */ + public static OwnerReference buildOwnerReferenceTo(HasMetadata owner) { + return new OwnerReferenceBuilder() + .withName(owner.getMetadata().getName()) + .withApiVersion(owner.getApiVersion()) + .withKind(owner.getKind()) + .withUid(owner.getMetadata().getUid()) + .withBlockOwnerDeletion(true) + .build(); + } + + public static String asJsonString(T resource) { + try { + return objectMapper.writeValueAsString(resource); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + public static boolean overrideDriverTemplate(ApplicationSpec applicationSpec) { + return applicationSpec != null && applicationSpec.getDriverSpec() != null + && applicationSpec.getDriverSpec().getPodTemplateSpec() != null; + } + + public static boolean overrideExecutorTemplate(ApplicationSpec applicationSpec) { + return applicationSpec != null && applicationSpec.getExecutorSpec() != null + && applicationSpec.getExecutorSpec().getPodTemplateSpec() != null; + } +} diff --git a/spark-operator-api/src/main/resources/printer-columns.sh b/spark-operator-api/src/main/resources/printer-columns.sh new file mode 100755 index 00000000..b64a56eb --- /dev/null +++ b/spark-operator-api/src/main/resources/printer-columns.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +# This is a workaround. See https://github.com/fabric8io/kubernetes-client/issues/3069 +# We do a yq to add printer columns + +script_path=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +crd_path="${script_path}/../../../build/classes/java/main/META-INF/fabric8/sparkapplications.org.apache.spark-v1.yml" +yq -i '.spec.versions[0] += ({"additionalPrinterColumns": [{"jsonPath": ".status.currentState.currentStateSummary", "name": "Current State", "type": "string"}, {"jsonPath": ".metadata.creationTimestamp", "name": "Age", "type": "date"}]})' $crd_path + diff --git a/spark-operator-api/src/test/java/org/apache/spark/kubernetes/operator/status/ApplicationStatusTest.java b/spark-operator-api/src/test/java/org/apache/spark/kubernetes/operator/status/ApplicationStatusTest.java new file mode 100644 index 00000000..89a1b3ce --- /dev/null +++ b/spark-operator-api/src/test/java/org/apache/spark/kubernetes/operator/status/ApplicationStatusTest.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.status; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import static org.apache.spark.kubernetes.operator.status.ApplicationStateSummary.SUBMITTED; + +class ApplicationStatusTest { + + @Test + void testInitStatus() { + ApplicationStatus applicationStatus = new ApplicationStatus(); + Assertions.assertEquals(SUBMITTED, applicationStatus.currentState.currentStateSummary); + Assertions.assertEquals(1, applicationStatus.stateTransitionHistory.size()); + Assertions.assertEquals(applicationStatus.currentState, + applicationStatus.stateTransitionHistory.get(0L)); + } + + @Test + void testAppendNewState() { + ApplicationStatus applicationStatus = new ApplicationStatus(); + ApplicationState newState = + new ApplicationState(ApplicationStateSummary.RUNNING_HEALTHY, "foo"); + ApplicationStatus newStatus = applicationStatus.appendNewState(newState); + Assertions.assertEquals(2, newStatus.stateTransitionHistory.size()); + Assertions.assertEquals(newState, newStatus.stateTransitionHistory.get(1L)); + } + +} diff --git a/spark-operator-docs/.gitignore b/spark-operator-docs/.gitignore new file mode 100644 index 00000000..3348a208 --- /dev/null +++ b/spark-operator-docs/.gitignore @@ -0,0 +1,42 @@ +.gradle +build/ +!gradle/wrapper/gradle-wrapper.jar +!**/src/main/**/build/ +!**/src/test/**/build/ + +### IntelliJ IDEA ### +.idea/modules.xml +.idea/jarRepositories.xml +.idea/compiler.xml +.idea/libraries/ +*.iws +*.iml +*.ipr +out/ +!**/src/main/**/out/ +!**/src/test/**/out/ + +### Eclipse ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache +bin/ +!**/src/main/**/bin/ +!**/src/test/**/bin/ + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ + +### VS Code ### +.vscode/ + +### Mac OS ### +.DS_Store diff --git a/spark-operator-docs/architecture.md b/spark-operator-docs/architecture.md new file mode 100644 index 00000000..a37000ad --- /dev/null +++ b/spark-operator-docs/architecture.md @@ -0,0 +1,64 @@ + + +# Design & Architecture + +**Spark-Kubernetes-Operator** (Operator) acts as a control plane to manage the complete +deployment lifecycle of Spark applications. The Operator can be installed on a Kubernetes +cluster using Helm. In most production environments it is typically deployed in a designated +namespace and controls Spark deployments in one or more managed namespaces. The custom resource +definition (CRD) that describes the schema of a SparkApplication is a cluster wide resource. +For a CRD, the declaration must be registered before any resources of that CRDs kind(s) can be +used, and the registration process sometimes takes a few seconds. + +Users can interact with the operator using the kubectl or k8s API. The Operator continuously +tracks cluster events relating to the SparkApplication custom resources. When the operator +receives a new resource update, it will take action to adjust the Kubernetes cluster to the +desired state as part of its reconciliation loop. The initial loop consists of the following +high-level steps: + +* User submits a SparkApplication custom resource(CR) using kubectl / API +* Operator launches driver and observes its status +* Operator observes driver-spawn resources (e.g. executors) till app terminates +* Operator releases all Spark-app owned resources to cluster +* The SparkApplication CR can be (re)applied on the cluster any time - e.g. to issue proactive + termination of an application. The Operator makes continuous adjustments to imitate the + desired state until the + current state becomes the desired state. All lifecycle management operations are realized + using this very simple + principle in the Operator. + +The Operator is built with the Java Operator SDK and uses the Native Kubernetes Integration for +launching Spark deployments and submitting jobs under the hood. The Java Operator SDK is a +higher level +framework and related tooling to support writing Kubernetes Operators in Java. Both the Java +Operator SDK and Spark’s native +kubernetes integration itself is using the Fabric8 Kubernetes Client to interact with the +Kubernetes API Server. + +## State Transition + +[](resources/state.png) + +* Spark application are expected to run from submitted to succeeded before releasing resources +* User may configure the app CR to time-out after given threshold of time +* In User may configure the app CR to skip releasing resources after terminated. This is + typically used at dev phase: pods / configmaps. etc would be kept for debugging. They have + ownerreference to the Application CR and therefore can still be cleaned up when the owner + SparkApplication CR is deleted. diff --git a/spark-operator-docs/configuration.md b/spark-operator-docs/configuration.md new file mode 100644 index 00000000..cb6428d6 --- /dev/null +++ b/spark-operator-docs/configuration.md @@ -0,0 +1,99 @@ + + +# Configuration + +## Configure Operator + +Spark Operator supports different ways to configure the behavior: + +* **spark-operator.properties** provided when deploying the operator. In addition to the + [property file](../build-tools/helm/spark-kubernetes-operator/conf/spark-operator. + properties), it is also possible to override or append config properties in helm [Values + files](../build-tools/helm/spark-kubernetes-operator/values.yaml). +* **System Properties** : when provided as system properties (e.g. via -D options to the + operator JVM), it overrides the values provided in property file. +* **Hot property loading** : when enabled, a [configmap](https://kubernetes. + io/docs/concepts/configuration/configmap/) would be created with the operator in the same + namespace. Operator would monitor updates performed on the configmap. Hot properties + override takes highest precedence. + - An example use case: operator use hot properties to figure the list of namespace(s) to + operate Spark applications. The hot properties config map can be updated and + maintained by user or additional microservice to tune the operator behavior without + rebooting it. + - Please be advised that not all properties can be hot-loaded and honored at runtime. + Refer the list of supported properties section for more details. + +To enable hot properties loading, update the **helm chart values file** with + +``` + +operatorConfiguration: + spark-operator.properties: |+ + spark.operator.dynamic.config.enabled=true + # ... all other config overides... + dynamicConfig: + create: true + +``` + +## Supported Config Properties + +| Name | Type | Default Value | Allow Hot Property Override | Description | +|------------------------------------------------------------------------------|---------|----------------------------------------------|-----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| spark.operator.name | string | spark-kubernetes-operator | false | Name of the operator. | +| spark.operator.namespace | string | spark-system | false | Namespace that operator is deployed within. | +| spark.operator.watched.namespaces | string | | true | Comma-separated list of namespaces that the operator would be watching for Spark resources. If unset, operator would watch all namespaces by default. When deployed via Helm, please note that the value should be a subset of .Values.appResources.namespaces.data. | +| spark.operator.dynamic.config.enabled | boolean | false | false | When enabled, operator would use config map as source of truth for config property override. The config map need to be created in spark.operator.namespace, and labeled with operator name. | +| spark.operator.dynamic.config.selector.str | string | `app.kubernetes.io/component=dynamic-config` | false | The selector str applied to dynamic config map. | +| spark.operator.terminate.on.informer.failure | boolean | false | false | Enable to indicate informer errors should stop operator startup. If disabled, operator startup will ignore recoverable errors, caused for example by RBAC issues and will retry periodically | +| spark.operator.termination.timeout.seconds | integer | 30 | false | Grace period for operator shutdown before reconciliation threads are killed. | +| spark.operator.reconciler.parallelism | integer | 30 | false | Thread pool size for Spark Operator reconcilers. Use -1 for unbounded pool. | +| spark.operator.rate.limiter.refresh.period.seconds | integer | 15 | false | Operator rate limiter refresh period(in seconds) for each resource. | +| spark.operator.rate.limiter.limit | integer | 5 | false | Max number of reconcile loops triggered within the rate limiter refresh period for each resource. Setting the limit <= 0 disables the limiter. | +| spark.operator.retry.initial.internal.seconds | integer | 5 | false | Initial interval(in seconds) of retries on unhandled controller errors | +| spark.operator.retry.internal.multiplier | double | 1.5 | false | Interval multiplier of retries on unhandled controller errors. | +| spark.operator.retry.max.interval.seconds | integer | -1 | false | Max interval(in seconds) of retries on unhandled controller errors. Set to -1 for unlimited. | +| spark.operator.retry.max.attempts | integer | 15 | false | Max attempts of retries on unhandled controller errors. | +| spark.operator.driver.create.max.attempts | integer | 3 | true | Maximal number of retry attempts of requesting driver for Spark application. | +| spark.operator.max.retry.attempts.on.kube.failure | long | 3 | true | Maximal number of retry attempts of requests to k8s server upon response 429 and 5xx. | +| spark.operator.retry.attempt.after.seconds | long | 1 | true | Default time (in seconds) to wait till next request. This would be used if server does not set Retry-After in response. | +| spark.operator.max.retry.attempt.after.seconds | long | 15 | true | Maximal time (in seconds) to wait till next request. | +| spark.operator.status.patch.max.retry | long | 3 | true | Maximal number of retry attempts of requests to k8s server for resource status update. | +| spark.operator.status.patch.failure.backoff.seconds | long | 3 | true | Default time (in seconds) to wait till next request to patch resource status update. | +| spark.operator.app.reconcile.interval.seconds | long | 120 | true | Interval (in seconds) to reconcile when application is is starting up. Note that reconcile is always expected to be triggered per update - this interval controls the reconcile behavior when operator still need to reconcile even when there's no update ,e.g. for timeout checks. | +| spark.operator.foreground.request.timeout.seconds | long | 120 | true | Timeout (in seconds) to for requests made to API server. this applies only to foreground requests. | +| spark.operator.trim.attempt.state.transition.history | boolean | true | true | When enabled, operator would trim state transition history when a new attempt starts, keeping previous attempt summary only. | +| spark.operator.josdk.metrics.enabled | boolean | true | true | When enabled, the josdk metrics will be added in metrics source and configured for operator. | +| spark.operator.kubernetes.client.metrics.enabled | boolean | true | true | Enable KubernetesClient metrics for measuring the HTTP traffic to the Kubernetes API Server. Since the metrics is collected via Okhttp interceptors, can be disabled when opt in customized interceptors. | +| spark.operator.kubernetes.client.metrics.group.by.response.code.group.enable | boolean | true | true | When enabled, additional metrics group by http response code group(1xx, 2xx, 3xx, 4xx, 5xx) received from API server will be added. Users can disable it when their monitoring system can combine lower level kubernetes.client.http.response.<3-digit-response-code> metrics. | +| spark.operator.probe.port | integer | 18080 | false | The port used for health/readiness check probe status. | +| spark.operator.sentinel.executor.pool.size | integer | 3 | false | Size of executor service in Sentinel Managers to check the health of sentinel resources. | +| spark.operator.health.sentinel.resource.reconciliation.delay.seconds | integer | 60 | true | Allowed max time(seconds) between spec update and reconciliation for sentinel resources. | +| spark.operator.leader.election.enabled | boolean | false | false | Enable leader election for the operator to allow running standby instances. | +| spark.operator.leader.election.lease.name | string | spark-operator-lease | false | Leader election lease name, must be unique for leases in the same namespace. | +| spark.operator.leader.election.lease.duration.seconds | long | 1200 | false | Leader election lease duration. | +| spark.operator.leader.election.renew.deadline.seconds | long | 600 | false | Leader election renew deadline. | +| spark.operator.leader.election.retry.period.seconds | long | 180 | false | Leader election retry period. | +| spark.operator.metrics.port | integer | 19090 | false | The port used for export metrics. | + +## Config Metrics Publishing Behavior + +Spark Operator uses the same source & sink interface as Apache Spark. This means you can +use existing Spark metrics sink for both applications and the operator. diff --git a/spark-operator-docs/developer_guide.md b/spark-operator-docs/developer_guide.md new file mode 100644 index 00000000..35bc0fcd --- /dev/null +++ b/spark-operator-docs/developer_guide.md @@ -0,0 +1,84 @@ + + +# Developer Guide + +## Build Operator Locally + +To build operator locally, use + +```shell +./gradlew clean build +``` + +If you are working on API (CRD) changes, remember to update CRD yaml in chart as well + +```shell +# This requires yq installed locally to add additional printer columns +# could be removed after fixing https://github.com/fabric8io/kubernetes-client/issues/3069 +./gradlew :spark-operator-api:copyGeneratedCRD +``` + +## Build Operator Image Locally + + ```bash + # Build a local container image which can be used for minikube.etc. + # For testing in remote k8s cluster, please also do `docker push` to make it available + # to the cluster / nodes + docker build --build-arg BASE_VERSION=1.0.0-alpha -t spark-kubernetes-operator:1.0.0-alpha . + ``` + +## Deploy Operator + +### Install the Spark Operator + + ```bash + helm install spark-kubernetes-operator \ + -f build-tools/helm/spark-kubernetes-operator/values.yaml \ + build-tools/helm/spark-kubernetes-operator/ + ``` + +### Upgrade the operator to a new version + + ```bash + # update CRD as applicable + kubectl replace -f /path/to/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml + + # upgrade deployment + helm upgrade spark-kubernetes-operator \ + -f build-tools/helm/spark-kubernetes-operator/values.yaml \ + --set image.tag= \ + build-tools/helm/spark-kubernetes-operator/ + ``` + +## Run Tests + +In addition to unit tests, we are actively working on the e2e test framework for the +operator. This depends on the CI integration for operator. + +For now, in order to manually run e2e tests: + +* Build operator image and install the built image in k8s cluster +* Run AppSubmitToSucceedTest + +```shell +java -cp /path/to/spark-operator-test.jar \ + -Dspark.operator.test.app.yaml.files.dir=/path/to/e2e-tests/ \ + org.apache.spark.kubernetes.operator.AppSubmitToSucceedTest +``` diff --git a/spark-operator-docs/getting_started.md b/spark-operator-docs/getting_started.md new file mode 100644 index 00000000..60696155 --- /dev/null +++ b/spark-operator-docs/getting_started.md @@ -0,0 +1,190 @@ + + +## Getting Started + +This doc provides a quick introduction to creating and managing Spark applications with +Operator. + +To follow along with this guide, first, clone this repository and have a +[Minikube](https://minikube.sigs.k8s.io/docs/) cluster ready for a quick start of running examples +locally. Make sure to update kube config as well - this example would deploy Spark Operator +and run Spark application(s) in the current context and namespace. + +It is possible to try the operator on remote k8s cluster (EKS / GKE .etc). To do so, make +sure you publish the built operator image to a docker registry that's accessible for the +cluster. + +### Compatibility + +- JDK11 or later +- Operator used fabric8 which assumes to be compatible with available k8s versions. +- Spark versions 3.2 and above + +### Start minikube + +Start miniKube and make it access locally-built image + +```shell +minikube start +eval $(minikube docker-env) +``` + +### Build Spark Operator Image Locally + + ```bash + # Build a local container image which can be used for minikube.etc. + # For testing in remote k8s cluster, please also do `docker push` to make it available + # to the cluster / nodes + docker build --build-arg BASE_VERSION=1.0.0-alpha -t spark-kubernetes-operator:1.0.0-alpha . + ``` +### Install the Spark Operator + + ```bash + helm install spark-kubernetes-operator -f build-tools/helm/spark-kubernetes-operator/values.yaml build-tools/helm/spark-kubernetes-operator/ + ``` +### Verify the Installation + +Check if the pods are up and running: + ```shell + $ kubectl get pods + ``` + +Which would show operator pod like + +``` +NAME READY STATUS RESTARTS AGE +spark-kubernetes-operator-995d88bdf-nwr7r 1/1 Running 0 16s +``` + +You may also find the installed CRD with + + ```shell + $ kubectl get crd sparkapplications.org.apache.spark + ``` + + +### Start Spark Application + +Start Spark-pi with + + ```bash + kubectl create -f spark-operator/src/main/resources/spark-pi.yaml + ``` + +### Monitor Spark Application State Transition + + ```bash + kubectl get sparkapp spark-pi -o yaml + ``` + +It should give Spark application spec as well as the state transition history, for example + +``` +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + creationTimestamp: "2024-04-02T22:24:47Z" + finalizers: + - sparkapplications.org.apache.spark/finalizer + generation: 2 + name: spark-pi + namespace: default + resourceVersion: "963" + uid: 356dedb1-0c09-4515-9233-165d28ae6d27 +spec: + applicationTolerations: + applicationTimeoutConfig: + driverStartTimeoutMillis: 300000 + executorStartTimeoutMillis: 300000 + forceTerminationGracePeriodMillis: 300000 + sparkSessionStartTimeoutMillis: 300000 + terminationRequeuePeriodMillis: 2000 + deleteOnTermination: false + instanceConfig: + initExecutors: 0 + maxExecutors: 0 + minExecutors: 0 + restartConfig: + maxRestartAttempts: 3 + restartBackoffMillis: 30000 + restartPolicy: NEVER + deploymentMode: CLUSTER_MODE + driverArgs: [] + jars: local:///opt/spark/examples/jars/spark-examples_2.12-3.4.1.jar + mainClass: org.apache.spark.examples.SparkPi + runtimeVersions: + scalaVersion: v2_12 + sparkVersion: v3_4_1 + sparkConf: + spark.executor.instances: "5" + spark.kubernetes.authenticate.driver.serviceAccountName: spark + spark.kubernetes.container.image: spark:3.4.1-scala2.12-java11-python3-r-ubuntu + spark.kubernetes.namespace: spark-test +status: + currentAttemptSummary: + attemptInfo: + id: 0 + currentState: + currentStateSummary: RUNNING_HEALTHY + lastTransitionTime: "2024-04-02T22:24:52.342061Z" + message: 'Application is running healthy. ' + stateTransitionHistory: + "0": + currentStateSummary: SUBMITTED + lastTransitionTime: "2024-04-02T22:24:47.592355Z" + message: 'Spark application has been created on Kubernetes Cluster. ' + "1": + currentStateSummary: DRIVER_REQUESTED + lastTransitionTime: "2024-04-02T22:24:50.268363Z" + message: 'Requested driver from resource scheduler. ' + "2": + currentStateSummary: DRIVER_STARTED + lastTransitionTime: "2024-04-02T22:24:52.238794Z" + message: 'Driver has started running. ' + "3": + currentStateSummary: DRIVER_READY + lastTransitionTime: "2024-04-02T22:24:52.239101Z" + message: 'Driver has reached ready state. ' + "4": + currentStateSummary: RUNNING_HEALTHY + lastTransitionTime: "2024-04-02T22:24:52.342061Z" + message: 'Application is running healthy. ' +``` + +Delete application Spark-pi and its secondary resources with + + ```bash + kubectl delete -f spark-operator/src/main/resources/spark-pi.yaml + ``` + + +#### Uninstallation + +To remove the installed resources from your cluster, use: + +```bash +helm uninstall spark-kubernetes-operator +``` + +### More examples + +More PySpark / SparkR examples can be found under [e2e-tests](../e2e-tests). + +Read more about how to understand, write and build your SparkApplication [here](spark_application.md). diff --git a/spark-operator-docs/metrics_logging.md b/spark-operator-docs/metrics_logging.md new file mode 100644 index 00000000..5faff025 --- /dev/null +++ b/spark-operator-docs/metrics_logging.md @@ -0,0 +1,110 @@ + + +# Metrics + +Spark operator, +following [Apache Spark](https://spark.apache.org/docs/latest/monitoring.html#metrics), +has a configurable metrics system based on +the [Dropwizard Metrics Library](https://metrics.dropwizard.io/4.2.0/). Note that Spark Operator +does not have Spark UI, MetricsServlet +and PrometheusServlet from org.apache.spark.metrics.sink package are not supported. If you are +interested in Prometheus metrics exporting, please take a look at below section `Forward Metrics to Prometheus` + +## JVM Metrics + +Spark Operator collects JVM metrics +via [Codahale JVM Metrics](https://javadoc.io/doc/com.codahale.metrics/metrics-jvm/latest/index.html) + +- BufferPoolMetricSet +- FileDescriptorRatioGauge +- GarbageCollectorMetricSet +- MemoryUsageGaugeSet +- ThreadStatesGaugeSet + +## Kubernetes Client Metrics + +| Metrics Name | Type | Description | +|-----------------------------------------------------------|------------|--------------------------------------------------------------------------------------------------------------------------| +| kubernetes.client.http.request | Meter | Tracking the rates of HTTP request sent to the Kubernetes API Server | +| kubernetes.client.http.response | Meter | Tracking the rates of HTTP response from the Kubernetes API Server | +| kubernetes.client.http.response.failed | Meter | Tracking the rates of HTTP requests which have no response from the Kubernetes API Server | +| kubernetes.client.http.response.failed | Meter | Tracking the rates of HTTP requests which have no response from the Kubernetes API Server | +| kubernetes.client.http.response.failed | Meter | Tracking the rates of HTTP requests which have no response from the Kubernetes API Server | +| kubernetes.client.http.response.latency.nanos | Histograms | Measures the statistical distribution of HTTP response latency from the Kubernetes API Server | +| kubernetes.client.http.response. | Meter | Tracking the rates of HTTP response based on response code from the Kubernetes API Server | +| kubernetes.client.http.request. | Meter | Tracking the rates of HTTP request based type of method to the Kubernetes API Server | +| kubernetes.client.http.response.1xx | Meter | Tracking the rates of HTTP Code 1xx responses (informational) received from the Kubernetes API Server per response code. | +| kubernetes.client.http.response.2xx | Meter | Tracking the rates of HTTP Code 2xx responses (success) received from the Kubernetes API Server per response code. | +| kubernetes.client.http.response.3xx | Meter | Tracking the rates of HTTP Code 3xx responses (redirection) received from the Kubernetes API Server per response code. | +| kubernetes.client.http.response.4xx | Meter | Tracking the rates of HTTP Code 4xx responses (client error) received from the Kubernetes API Server per response code. | +| kubernetes.client.http.response.5xx | Meter | Tracking the rates of HTTP Code 5xx responses (server error) received from the Kubernetes API Server per response code. | +| kubernetes.client.. | Meter | Tracking the rates of HTTP request for a combination of one Kubernetes resource and one http method | +| kubernetes.client... | Meter | Tracking the rates of HTTP request for a combination of one namespace-scoped Kubernetes resource and one http method | + +## Forward Metrics to Prometheus + +In this section, we will show you how to forward spark operator metrics +to [Prometheus](https://prometheus.io). + +* Modify the + build-tools/helm/spark-kubernetes-operator/values.yaml file' s metrics properties section: + +```properties +metrics.properties:|+ + spark.metrics.conf.operator.sink.mosaic.class=org.apache.spark.kubernetes.operator.metrics.sink.PrometheusPullModelSink +``` + +* Install the Spark Operator + +```bash +helm install spark-kubernetes-operator -f build-tools/helm/spark-kubernetes-operator/values.yaml build-tools/helm/spark-kubernetes-operator/ +``` + +* Install Prometheus via Helm Chart + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/prometheus +``` + +* Find and Annotate Spark Operator Pods + +```bash +kubectl get pods -l app.kubernetes.io/name=spark-kubernetes-operator +NAME READY STATUS RESTARTS AGE +spark-kubernetes-operator-598cb5d569-bvvd2 1/1 Running 0 24m + +kubectl annotate pods spark-kubernetes-operator-598cb5d569-bvvd2 prometheus.io/scrape=true +kubectl annotate pods spark-kubernetes-operator-598cb5d569-bvvd2 prometheus.io/path=/prometheus +kubectl annotate pods spark-kubernetes-operator-598cb5d569-bvvd2 prometheus.io/port=19090 +``` + +* Check Metrics via Prometheus UI + +```bash +kubectl get pods | grep "prometheus-server" +prometheus-server-654bc74fc9-8hgkb 2/2 Running 0 59m + +kubectl port-forward --address 0.0.0.0 pod/prometheus-server-654bc74fc9-8hgkb 8080:9090 +``` + +open your browser with address `localhost:8080`. Click on Status Targets tab, you should be able +to find target as below. +[](resources/prometheus.png) diff --git a/spark-operator-docs/operations.md b/spark-operator-docs/operations.md new file mode 100644 index 00000000..c205fb76 --- /dev/null +++ b/spark-operator-docs/operations.md @@ -0,0 +1,122 @@ + + +## Manage Your Spark Operator + +The operator installation is managed by a helm chart. To install run: + +``` +helm install spark-kubernetes-operator \ + -f build-tools/helm/spark-kubernetes-operator/values.yaml \ + build-tools/helm/spark-kubernetes-operator/ +``` + +Alternatively to install the operator (and also the helm chart) to a specific namespace: + +``` +helm install spark-kubernetes-operator \ + -f build-tools/helm/spark-kubernetes-operator/values.yaml \ + build-tools/helm/spark-kubernetes-operator/ \ + --namespace spark-system --create-namespace +``` + +Note that in this case you will need to update the namespace in the examples accordingly. + +### Spark Application Namespaces + +By default, Spark applications are created in the same namespace as the operator deployment. +You many also configure the chart deployment to add necessary RBAC resources for +applications to enable them running in additional namespaces. + +## Overriding configuration parameters during Helm install + +Helm provides different ways to override the default installation parameters (contained +in `values.yaml`) for the Helm chart. + +To override single parameters you can use `--set`, for example: + +``` +helm install --set image.repository=/spark-kubernetes-operator \ + -f build-tools/helm/spark-kubernetes-operator/values.yaml \ + build-tools/helm/spark-kubernetes-operator/ +``` + +You can also provide multiple custom values file by using the `-f` flag, the latest takes +higher precedence: + +``` +helm install spark-kubernetes-operator \ + -f build-tools/helm/spark-kubernetes-operator/values.yaml \ + -f my_values.yaml \ + build-tools/helm/spark-kubernetes-operator/ +``` + +The configurable parameters of the Helm chart and which default values as detailed in the +following table: + +| Parameters | Description | +|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| image.repository | The image repository of spark-kubernetes-operator. | +| image.pullPolicy | The image pull policy of spark-kubernetes-operator. | +| image.tag | The image tag of spark-kubernetes-operator. | +| image.digest | The image tag of spark-kubernetes-operator. If set then it takes precedence and the image tag will be ignored. | +| imagePullSecrets | The image pull secrets of spark-kubernetes-operator. | +| operatorDeployment.replica | Operator replica count. Must be 1 unless leader election is configured. | +| operatorDeployment.strategy.type | Operator pod upgrade strategy. Must be Recreate unless leader election is configured. | +| operatorDeployment.operatorPod.annotations | Custom annotations to be added to the operator pod | +| operatorDeployment.operatorPod.labels | Custom labels to be added to the operator pod | +| operatorDeployment.operatorPod.nodeSelector | Custom nodeSelector to be added to the operator pod. | +| operatorDeployment.operatorPod.topologySpreadConstraints | Custom topologySpreadConstraints to be added to the operator pod. | +| operatorDeployment.operatorPod.dnsConfig | DNS configuration to be used by the operator pod. | +| operatorDeployment.operatorPod.volumes | Additional volumes to be added to the operator pod. | +| operatorDeployment.operatorPod.priorityClassName | Priority class name to be used for the operator pod | +| operatorDeployment.operatorPod.securityContext | Security context overrides for the operator pod | +| operatorDeployment.operatorContainer.jvmArgs | JVM arg override for the operator container. | +| operatorDeployment.operatorContainer.env | Custom env to be added to the operator container. | +| operatorDeployment.operatorContainer.envFrom | Custom envFrom to be added to the operator container, e.g. for downward API. | +| operatorDeployment.operatorContainer.probes | Probe config for the operator container. | +| operatorDeployment.operatorContainer.securityContext | Security context overrides for the operator container. | +| operatorDeployment.operatorContainer.resources | Resources for the operator container. | +| operatorDeployment.additionalContainers | Additional containers to be added to the operator pod, e.g. sidecar. | +| operatorRbac.serviceAccount.create | Whether to crete service account for operator to use. | +| operatorRbac.clusterRole.create | Whether to crete ClusterRole for operator to use. If disabled, a role would be created in operator & app namespaces | +| operatorRbac.clusterRoleBinding.create | Whether to crete ClusterRoleBinding for operator to use. If disabled, a rolebinding would be created in operator & app namespaces | +| operatorRbac.clusterRole.configManagement.roleName | Role name for operator configuration management (hot property loading and leader election) | +| appResources.namespaces.create | Whether to create dedicated namespaces for Spark apps. | +| appResources.namespaces.watchGivenNamespacesOnly | When enabled, operator would by default only watch namespace(s) provided in data field | +| appResources.namespaces.data | list of namespaces to create for apps | +| appResources.clusterRole.create | if enabled, a clusterrole would be created for Spark app service accounts to use | +| appResources.role.create | if enabled, a role would be created in each app namespace for Spark apps | +| appResources.serviceAccounts.data | list of namespaces to create for apps | +| appResources.labels | Labels to be applied for all app resources | +| appResources.annotations | Annotaions to be applied for all app resources | +| appResources.sparkApplicationSentinel.create | If enabled, sentinel resources will be created for operator to watch and reconcile for the health probe purpose. | +| appResources.sparkApplicationSentinel.sentinelNamespaces | A list of namespaces where sentinel resources will be created in. Note that these namespaces have to be a subset of appResources.namespaces.watchGivenNamespacesOnly | +| operatorConfiguration.append | If set to true, below conf file & properties would be appended to default conf. Otherwise, they would override default properties | +| operatorConfiguration.log4j2.properties | The default log4j2 configuration | +| operatorConfiguration.spark-operator.properties | The default operator configuration | +| operatorConfiguration.metrics.properties | The default operator metrics (sink) configuration | +| operatorConfiguration.dynamicConfig.create | If set to true, a config map would be created & watched by operator as source of truth for hot properties loading. | | + +For more information check the [Helm documentation](https://helm.sh/docs/helm/helm_install/). + +__Notice__: The pod resources should be set as your workload in different environments to +archive a matched K8s pod QoS. See +also [Pod Quality of Service Classes](https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes). + diff --git a/spark-operator-docs/operator_probes.md b/spark-operator-docs/operator_probes.md new file mode 100644 index 00000000..7aeecd19 --- /dev/null +++ b/spark-operator-docs/operator_probes.md @@ -0,0 +1,82 @@ + + +# Operator Probes + +In Kubernetes world, the kubelet uses readiness probes to know when a container is ready to +start accepting traffic, and it uses liveness probes to know when to restart a container. Here +for Spark Operators, we provided those as below by default. You can override the values in +values.yaml if you use Helm Chart to deploy the Spark Operator. + +``` +ports: +- containerPort: 18080 + name: probe-port +livenessProbe: + httpGet: + port: probe-port + path: /healthz + initialDelaySeconds: 30 + periodSeconds: 10 +readinessProbe: + httpGet: + port: probe-port + path: /startup + failureThreshold: 30 + periodSeconds: 10 +``` + +## Operator Readiness Probe + +A readiness probe helps to determine whether current instances can serve the traffic. +Therefore, Spark Operator's readiness probe has to make sure both operator has started and also +need to verify the existence of required rbac access. + +## Operator Health(Liveness) Probe + +A built-in health endpoint that serves as the information source for Operator liveness. Since +Java Operator SDK provides [runtimeInfo](https://javaoperatorsdk.io/docs/features#runtime-info) +to check the actual health of event sources. Spark Operator' s healthProbe will check: + +* operator runtimeInfo health state +* Sentinel resources health state + +### Operator Sentinel Resource + +Learning +from [Apache Flink Operator](https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main/docs/operations/health/#canary-resources), +a dummy spark application resource in any watched namespace can help Spark operator health +probe monitor. + +Here is a Spark Sentinel resource example with the label `"spark.operator/sentinel": "true"` +and it will not result in creation of any other kubernetes resources. Controlled by +property `health.sentinel.resource.reconciliation.delay.seconds`, by default, the timeout to +reconcile the sentinel resources is 60 seconds. If the operator cannot reconcile these +resources within limited time, the operator health probe will return HTTP code 500 when kubelet +send the HTTP Get to the liveness endpoint, and the +kubelet will then kill the spark operator container and restart it. + +```yaml +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: spark-sentinel-resources + labels: + "spark.operator/sentinel": "true" +``` diff --git a/spark-operator-docs/resources/prometheus.png b/spark-operator-docs/resources/prometheus.png new file mode 100644 index 0000000000000000000000000000000000000000..5507d577eb4a6da1cf222e60c8a256cc2a6d1208 GIT binary patch literal 184821 zcmeFZXIN9))&`0Qh=72C^r|9C7m(frrASqJ4Tunt5_(S%rKm_zq)8K{_ufMkgh0bBFsy)$Bq&Q0`rblP!%%A=Ied+r=LNc*jR`SX_B~OUWqwXk} z)02Pruqoh7T6fsxw^lC+?($FQkQc$r;sd(P2)^n?sJH^1 z*El<&8+HBe%#Wkg$e93j-*@hrA)Fu5AC^F_J$T*lJ~W3BCro#hh@`XliTFkN=GOa@ z&l%@zOCNyaksFw}0!?y?_^I20;-;}Gjg+ZYuuQ`tiZBsHn~Dp~d~+0TU&=f(a?T3} z2&eM5rzWzn-E1;``t=iIm&UoR)NrfTqX%}``2h_*NbAbNDu*?6Lcnm*6Mj=(HOk$X zm1L1odC=FS&ICsOrEugf=3V%UsOGRMY^5e2$}Np$n==TIRO z93o$t9w-z>(bEzHE8ftNc}sMb^LqIO6RL~1nspfU&gv^6)94m1xw9??vE7392w^A> z2tQh0&KCJvp^CRBt(6RXODD@6chw$r21~LcTwgg;WbIx za~aQsRc`3;=~460#XP_8(cmN5J+5pXJ=WZd{SmdHi+AA4P-W|Hw4$tz5uVD2A1?Qp zVT3V{F<0Fg4}@yZLm8n|5J}(S(G*Xt`n)YfUD^75^-rq~l`8=?;Rq z%XDEGR^i2zMj!_YG2%MHy0dlkrjhU`D_?f3Ra_*Jn7SzDpOQDaIA1V3b;D#a>u?ZungG%dV@dYprX9E2k@AjZDq`8f(|V9|u3=qTi}zUZJ}l zafQqAD(fY44%tg^?&#K#hhDCH#)=afg;znBMEc5JI8{!_v zHHK?K7h|ve;CXv3Nw2zK-Q@NAN7-NR|F~9Yz-Q=iZ`puF|HC!o>%`YHcpdNO-m5JX zO@E~;rnhr{?)p6MnqfownD(uEm}`7waiuA)Ph5kA6`JpdXFEJ5t9b7FcH|c8y?X{F z`Y-fa4XztX=AqP;zERjYm@qk#uMic4?bt-jvCOgHlEU2<=f;vnGNlar&0~s8vQ3H( z6t=^d%sbWj^&JyUqDOQ+%4>6cpC7nxLl+JEK;0Q#9x3g~rV_hf#*j?KAvwy~Eo9+X zrYce8n-!u^(L3+zZVK8p41BPwvCnw4(j%31Ghg$YXFuJewH~=5?3+SGFsgSU1JjN< z&w7p(#G0d(p%vOyUHIB5XjN+5cRb7Ywrq&ShY3o!82mic5gUBKyKTSC&P+|gK+;N5 zM=^4)g(95dBZWAF7$YTP4pT|EYxv`E-&d_IPA%=P@?NbaT52*}&AIBQxf6zJ@ zIh$G4X({3-5;$y8*5woL6R{Pvl{@E=7~PRDXFOLi2c;7YBM9@L)1-sUEpL&$Ctct={!HPFjd{M@oEOl?Z^G1_iUDYa%Zw` zvVBr09RvS&L5bq`lXnJ#XYQj>OLx7(y|BX-gPO?j8H3rKG4t-^c~}oL%C1qvip{EF zqZmW&C+hvGB*`exh+gJiid;%?3adq%#d!-Gi|%?4cyzrzT*ZIc-(b&dy?O88&}nPM z*1oi6`SH&4)Mt}Ch<4l+ybxX(KYuFo)bx{q;B`er@F@Kb5fc&F>BOfB)Y6aqdU`EC zez*CYrBuxMnRAv?P_aPKFr+f{USn)Xe-M;nmvr(1?nIP=3b^=`l}3*47R(;s$qBYw2@iJtSZu&;GD*GMia(i#^g&P_X4xM z5_(IM+m*lIrk^F~%uRRLp1sHz;iMLxA+LH^YF5@(CdVIX1`{^DZW<5eMT&y0yDSU6 zRt0%`c&m9w3>Ndv>PjIO-SlT4Kk{f?ti0)Kzd1c9=9~URCmGbdiKd60Chi=g} zkdMKMmPop|A?XhJ4=W#3kyUv*x{Z2P8|6q;RR)U$dmh5A0BU{k0z#wr|c4yQ3Xrqt@$33DYtTm7C1I zlP=r?)p+(FE@pVz9`4%<40f4$)lRrRbNPtQ#CEordR8&PDh?~sni@WCR5q>5Nk@2@ zNYlHg)Gc}LPRn-t8LpMBP5M*sy)Wbauw>2TbOnSqq*RNtXRBj0Ni1{2Fbms^U>-Q`o+N*zBQeW@k*bmR89Edm|qGnTG=WL{^A!^1Ve zbCWs7(EH_4kl;HPo`CN4DrL+D27JY^;Nu$bc7c3LlgkK#T`jWqv=Om~0E&HSW29!Q zsY$?djA#jnPB9S>16QYjkK8Hdzpj-}T_ZUC^FAQ~L8u)8(XTdI!1wV_6!1BI=j8kJ zn-GFCz+V@DkN11R->u2h-=F?{O_%~aBesq_o*f_a8g1B!MWtRds&N{0a zyAcr3aUFk7solS_4)i~0r)T7Dq^Tig4RI8(dI))7BjD}meB2L#jJFhU>1g9_#o_Jf z;N&LdEqm!_3n}3G__p9Bj-O53?PV_+Y3gt&L0oM(!~}!{gf7XEad2?RxIVO%x_?{w z*XzJvvX>sYyE{t>3VL~Y33!PJKwQCsHzXw`1%-qKg@yTn7W{5LPVQFT{7!C{PdfSC z&utqwYgapGcRPp^$8o<_4JH7~i&Y1ZW0~Atxp-B=fWV|9bMT5&!K?qkp|AEGhA~SO4wN|9n;7 z&Bj#;;s}iCF842f{Tlb*AO3n#M(}v)|3-=vI{&;25L%8*M({7D$&t;+MacquWU;%g ztp|JqQg-}vN*nlf?c^J{K4p5^<*Y6sRSE=Zw{PirpIRP26VG=iP3-%~u(#@^Ym6}* ztwDsBo?p6kum0&9Jw4&4q&&87`AA;TzvXkjHAVJ(hGw9cdM|oJBBZ%(=F<08W$6a$fi{7 zyCa`gymM@-yEM+kOQ#6-Bi+T@Uy<3-(+`jet*oRr@G&uEh)ATO2wwj>qZ=2!17GYN zRW$T7U3k@v$9+}x#CB|0X8%dmIbD}p@?uLq?K^>805R%1V7B;Znl{rz+UCOP~V z%AjwKs{!u5qn()zF_oIwU7Y)c|CW_DrwegOh4YKwz5^vxv<$f;y1 z%E+B>4klw74P|!oW{!0&WR7Wun?n|8}js`KD|ArC^0JQisQ(|hMm>b;;F^p+{J)yd6h;1&_coE(fyHy7H$4q z-$50NK6cpNsJFPnxj5xK?Q@Bl*9gh)D{w$>dDbb@{Oq}zPy}o0w-J&;&C#Pr7_ptX zSQuNBzcI9Fo*(#o^vnxi3$=++$huAnwB}8ie(cG7HHG&zM5Rl*ftU>#jVh$@RDR{jls=7}eMlcch;{=z zX%j7S3H8qGKXH?4T%qdA6KhM>J|21LUn5KLhOYNxiBR+pChbLjcKr328J?BNDuhJu z)IXI>m|^=DD{iux!b2+}Pu9lKV@5cx+vh1FB$4sfad9nPZ!Vf|n}vai_(Vg}eTCaNbwq{kg)E`k#_U9atDaEmyLnpzFCvc*5{Z@iE_=Z$NNS(!qC z61kvO;j{%@$8wA&U9KHgm2mef`>2g;oh3|=C?E@2qtnBCJVJ~p#zyTrjl{Ou) z_~e}r&$)2+*TRT$=}+YU95E`7U8C~KPdi6sV)}D%$O>$rHkeaqWg|%u9XDY_I*F*^ z$507S@D`jj^dU57<*c7-zHBw98T@U_zh33%=eOe7(wz+KcaH;}5L#CIiyYguKdr%1PQC-7+hw8A6DBh|2?kB#3i!tkRY{?z z-E<*-oC@zth(=lhgsd=}IBr=wdF_f$Q|>kFw_Q}gw6Im;H}RHs&D3y&fG-2o4THtl znr#lWwy~I6tNGZ?PCN2CBE0L~#Ly$mj?(Mv?;d`g>t%bP{W3KRnz5_Qj>MPkZJ`EZ z26$oePSG*qwLYz2yZISNxGWt!0df_R2XNC+Y`? z5EJRc!7M`bOgtg_CWHRV-|BGxgc+@?{n^A3E$Aupdkwq9hd! z0{F~XdyA`u_?3!M;4g(&c>nsBt+6<-3Q3>J5jc5W;qr;C3Gd@wRD3UW3hCzIRC(z{ z1b+Lx7^0&BbG4R3B9milVuBrBF1W##2tU)G`rDeA{I(`!=~V*+gG?vOdGAR$9Q;wi zcSR%Swq*kmOZOHR1o5|7_v8}d2Auj+`UMDSh*_3sWb5C7PR+Q$uq(;G=WY2x!4t|8 zCAWGhO3vkp_b=6;{(54lG`?I8oV(~OBMzC&kvyTquepn6K0#D4mx20TDii=+SJ3HC zI1s%;4lfMEaU{`j>xEG0M~> z-k#={1d#vqS8%#_Z5%=f!if3(?JBR|a`m&xBUwO3sw`3;pAVEdh4f8%xjU!+$va8V z=kVBi{cQ4;=0qhuh)`Q0FLr-R0OwDCq@ltL(rnb%forK2v@1!8@Gx>z~ zGO)ild;XCIFk|bTcEy62jcOe~i@M+DD;%)PsbuMqyO9dM)|7e`zhrj~a3IIF6W^e7 zny?IzkZyyXoxjPRg5?Kb*4)0{ci!s*0#Yzi);TONQzCh&S@65>0-j%e7c2PYQF^TW z>f4t0rz_|-K;GyqH{?>Po$+27);QE*dV*rB0Jgk>zCv2JV&mQIeO*jRv z6E2#ma==kK8*(T2i+fH85-fM29B~m)nS!3Mk>4~t5s>gJ-yti?i|0Hzb>sn>U>#y) z+yI@Kv#tCkK%~^!sVBsC0*H+bJB@NA`D>VF6S|Tnm(aHqaMv-b*=o%L!x*ks=q% zelZMizp}sZf=wXCmr~s3mlx~g``Nd+n_#YV;Ehwrve$oSBhh1CfR7`-JmCr-fu5Nw zz;w3oSIFSuH51L>6w&)dkx(KOvqyF={V$44{xqux{<}d~nt1q=8K0eyyMLp|DZ*wz z{sacrlN6SYAx9M?vBU-dSUc~e{(>BazaWP-0I*n>O_Wc@?9e(PK~lt98n` z*S-FCM0mYR?B8ejhspozQO`p{LbT?9Rqp?=@c*w%2~<8$tqOUYqC@rnm2{#BPZWHk zJ>nOC1L^{cD(8fG-`ZWa89R&{fJzquH|D)&sbM60SoM zf3#!&)yhBGvHu#_|I;iVqrd<9%0GGP|N6>5qKo(U{tJ8l0J{DQd;WieJ>_N`*bg9z zkmvvjqk(gRzMh;(wIIXc!|=_P;6bVoU6`508ohiLbwep+W}xogQQtF#%j9FOYt9nu z`M6S^9ruv+XOTNWg%PEZOpD<&f~=(S2je=%$q^$NBK13WR&3S=XTo)7_BWn-r|_9; zjnwfv)Ox029E*5KS)i>7)r(ZKP@iR|O` zpZdBv)kiphkXN9WPR!&ZwU`;4+M?o~pAn?LazF2t>_3amdUOc+=~eNIH=j*mcuCr# z20;bX0utqQ%UETfnT|Y?rUb*7LBD4VM+XOW%vBTN(^+gpOvc|0tEfVix>nl@rah4S zWY^;-vfGQS$0uWSce4b?=>(tw+D1;_e%wT$yQZ95VxFZ16wBx(Inhe_Bhfc;?->?= z2b&z*!J!^VO9vPS+~6>!KDcumEr=6lp_H!j=PRssSnh7}YZR8<8cb=}yXf5(d8Sbu zCPy2h(uQn5>hDNVCnmcQhKuEwZma|KGS+5!k;c(Y{Z57a$>;+9ivJ1_6>%hA2Y#{M zyK|Gx$Nx$QH@8##T)5^bZSue?J^6tcoCN2-sn#m_u8fS?z-#34$i<0bwOPiB-pZ_& z#AgE4PC|NZbdaN@o%Mb%Ei5^8lgq9t`y_M9VygFh{>8`SgKXn8WL!QNlhwNXDgO1A zP{9Q%Lwt$M7-DITb?SNYJh!jG~&I)hO_3Zj#k&$ zqLntnAkh`0Dgn(IWt%zg(W7~%{)lCNR}Dxt_&K8FD!wJ*EKj@i$nD%kO9Fl5u|U9zJj=PzcGB!6QFjwyMIflEb-9;VXyI!0dgj z_wilZo3rfag0>1-Um@sv=zPyyo$wYK^e91~&pw1!><+L_6|>IGD~F)_`%M1Xy829k z7;&Ppmk-1NeW!D-a%2*J^MdDr8H@v)#4wZ`X$gLIK1Yl8QH#PsGFAAikWXhw+M7Oe zuZ*)=G{AfwNn-DR8ia{4XdzsMemELRF=R5U;&+Nd8@UnVpjw}yr5M&J*&6>``(MJ8{{<3R;)Ig_N9Bz{y%0B}!?^yky*#*GeuQR3eRBbABr*3U(0R*zcJ%E{t|fWqEt`0NzA z8(zTcuvNdK(+xGu^(#Z*4-w}=q1wEu9&!~+3+8o=T8SOzmt@J2RY>K5#QN*xjyDsctXUua5knsHaYR{I_dE`xQ7tlXf>qI|rh@Rc!+vGND8Dm`L%3*o zS`|Wu<*2eis&PtMiy|en!otH0;{Bn>AKQBHs5THIjvOw71Xxz*XVbn4m~(aX821|1 zGF=EaM-0)g!m7tS^3IJk1+4Yo1oJ~@<_{a;k#7gByLc>B%IJ)&gBAC>&eVa;BMAQ) znICBZSq&0&RRR%;{aEQmR2pY)9Mx?0jx;W6L&&vFa|?1~W%}8bpFP2Qiw1?VF`rF# zxj&{i*VbMYeAD0&Zz6F`Zt|D#g9o2(60@%bxbqbsys{Z>B+0vItA`t>iVX6bM(B0l zxxrgAaM#hQ08FR2`^90oJkKbsa!@MDB1~%b6b9Lvfj`=vTJM(dWP@G`dOdVb+mt8h z*lrG(XHdtQzOCv%g2yShVqIER>e!ghKD&`sJF>X}qL2}|8TM*fA*uSXK}EUoSfi<0 z0<+%ZeB&vdqtGcX6p3s+d-9eoZlEg>c=`I@q*P*`AAt zR1@E{QaYQSfPzS*Wu+~q3;!dLBJD{2^pq-{3Mqu10xeM?8e{`J2gb!9RBHiH@X&mD zgYzhs=Je~FM7dI3@Q_dMhaYzLCeI=JL9Ad#EzTL#Y&o{BF)RgsrB70yLpIm~v9a~p z8K-?lMX5PsRu&Sjc&z1CiSreq@#KlFYEwVdVUn~kOkLWTag^>UySb@k@fA|YNIq0F z-6E&wr|&*;)JTfut3%|=*6(^GPX!dRQqY@;GvKg4ZX@`Hq=t9wuFlcU$;yUY4e-+T zVZS&s(rCLx*LV-INRju28tWcAT#2nU4Q&b)WLWCMeSYD{Gaf+ZY1@8l)GG*X%|6Rt zR-kKrH(g1IV`>t>p0wM^{(qgIBFN=*{eLPG}G*j_oZ-fME> z3@%gK&!$WfOgxE<^&jE<4lJ6J_=AYziQM3CH~ha2-!M)tGxHkz(4#w*`Eq{|VU3Wa z#!W(|%_+Fr%dC5FsHS%CEv?JEf>9|4=<4yhMleF28s=iUFZn6}pqX2gs1cvIz0uOWtM$lmXIjM1Gk7I66+a$Zpqq!(F`!7MdXZiUIO8yWY=)57={w&VUJ-_4uXOf$ap^;TW<$9(d zCK!P&lc7!~?1hZ-$UjU{{E^6>AaB3kdgvZWVp(=i!k7aa23aEu#cBs6U4;ANhj)f& zc*Py4Qhn?V7ut&MC)r2@>B3|fy6P(Z4aV|cp^&0Y8zprfAuBvVG`$*acFMCTn=H*1 zo(*{7CbUf9;Nd6$3KaMHyht*WWWCTSmCk(v$gHL0owx3056q z_B9U~IO-WfyPoQx#))>Drsx5GQGlVBN1`38T1{4_dLb(;va7rWyNPunO;hu`q1?V( zO1NRq>kF_?m8>5F@qq2q&DrV?KwnKB^v@L+uTOQ-Q9~M|-z=K4sA>luc{vT9-+3W! zneH4Q6`;?oD!C=%WnZ&~@}xr)&U-bQv2jcL*NPu)$&o-;g_L=WG+jURjOpBzVF=q(pojzo zEMeMtCXmO-a0jaILt1^`+qFN7z7dPhW%-cG6NNjvNj}vbz@8xdDy82Ay?|Mva-S!8 zVL!ZM0xZrrtAj(9^2;g|M4>*p9=CWP+4&UeS%2PoKjSg`-6Bx~0898SFHMbEI=GAi zdlH7{sQesZmSW=+Z){5tJKFt>>7X+bHi*^Yox8In;uAMmCj2Y{vPEb7U0BQJA;YEC z7<$Um!RE*ek@Rjae8vvzQUB5Km^?1C@LK{VY$-K`{tRl+xQ04n5-yVNW?0#AH3}EF zM^5L1^`4WiYf_&+#|s!@1AINw7slPu@A76uztty7M^IR|00vU5b!xbIl?scmXXoiBX zT7M>Xvk9_5;pcGUh74@v;4J&^{3u5is(1CcjT8AhZ)l?w+AQ2Wv2#74rU(WkHOb!=YB;HN=jqb!w!H;hmcYi zpDC+xE4Y3;yw})0?4xf}V$~xmbW02h^eA4F0~#t}dMMAH>Ybt`(vAPIkW!p&&qo0& zf@%+hZXapI=gnNHC_+(SKBmHX@q6z>wC)q4Mt8*E<5mmm=-H=l_I^pUlqkeN{MM zrHTlGYOj%NW3(tFuVNy0D%UgMwT*P$$qcpm;60(0s)0bvEL}zLntw4jU)8l3wnqM4 zN~G9sXgvY!z<)nuv@z43etMpBj25g4Th4J!ET7;3FbITgg2wk{XG=_5o+A*&R#ST)jRcKPb2LI)&fhL1F)$%;y z{j}dF?4lzip20YexOdiyoY*N5NA1quxrq9rUKi)>*RobQt6bYf&1C>LGNAH+)09|? zdJ%KfG#S!_1M9H?+kZq4-4+uAWK1S@rlD}hya?yL2z8R)eR)<2ZoISSt=;L(-Yt{@ zPzRmdh@%Wz8>fxlSn2FY5lDYEg^9js`(ctE9Oa!O7KPgl2oFk@8?5tYAC9Jd zsXevlkb`U}E9I3nSj-Z&KvlPzqV_$uE~mB~UQ4Ri_8M}9-x#Zr&?>!~zV(R{Jm38s zUsL4~?3iMRzm6hCl}$Wa1@_6!t;$z${~>?!*CQe3Kw^5;fGb_)b=`K@sWtq|)=zJ^ zh&>}m3wW3JrZ)*JWSc^Bm~@Fzy48~XOEJu}jgJAeC_XO9g3=fdcy(**Zu^ZAeu#Ig zk*OTp={`Gb{p=G*>84}%K6LbrOOpwoutpgK!RE@meP^)OXIk56Z&Cf_QSSOf05=W! zepK{^QBcWy#V9U>H&5(-bp}vPM6q7dly~k*Zv(1h{x~W-T6*z%y(0BoNEqw}gEG&< z7lJU5wvk^({!PxlvXvOZvexUJ6UmE7RegC{&>Ot){u1rk5&vsl@9=YPjJJvq9uoDt z^+!UB31>V*8=LikhZMCnO+#$t9dqYDSV>w6T4S-VmUc<+$ z&m1A{kBdK|>NXP#0wI~DVa{7Rd+qX@gWbno1`>qxTl2wA9hkOvU;LR}f%d$i_BOzy zWek{A5!1U$Q_u0Nb^9N^;YfP%k)*whO`S#Yp-#>A;sPmNs?ay4c=M^3$`SRs~*$y_-ZR`?egRKNdWG zM-rM`#qSBx^DR%-KZ91t5p~@190w_bo*{6H1onnJK!?NQa|qF)&9y7$_p4|v#t+LI z)(6P~2UkEnY;s!zg%YPdNsIRNAgdZf()HQ0{+pdQE?ShP3z?C*;uec$UsM}@`Q-TS zj-2ULa_vmx7{vNc;X2AAjwqyTr4-p(IqBQ96J?9$PmbCDvUV;=9hSg;2{$cW+r%r* zG75r^T4-{>Q#@+dUcWBhnr{cc)f%|2k)+t1p-#3piBs^Yl^;(pew=h35euLP^XFbc zfZiB-8@CqGq#Jik;$j-s7oul9G(4o;L%WK*2dt4R(KFWYs>b}ad6vG5VlTy&X$7$^ z3@bE|A4gZa8{shL6r@^6i&-fNh}5;pQ7HFSx`Xr|;?x?XqTJg8jJ9_5qHI zISqdwigyRqG<$YKsJK^aepGjl$R;BnD@}oF{W;8>&G%hk9fcdViJDhU40BZ~4h!`x zc3=QZT!bb)bo|N&f@wfj1!e2q>blmuRyGkdWsde6M(ql|WNsk8jt@PIp~#0371>_0 zZC^Wj_aAi&&QJ2`MPdg&Xar(RuJQb%8o7cS8DG4@fo~pN;|MQ(BNe_#W-PvTwLox< zl-(N&ZnpSoq}@vu4)VkvQ>8|xt=7+`gsLc?F4D%tgnGnfzpHw;w?bVErZ;DwUFzmjuL(FV@UNub`wp@bah81b+SgmeE z;KG|dUQ+vuSTaR4?j^{uZf#n4Gi7i~I&A`}X6MJVj8TokSm;bHkj&mp&=^>=GH7M>p1n! z+ah7v-`p(q@s4>tG-DosM%lYA_eU~CX}l%`iGZ|H2`KxTIbxuQ7!~)NEM2f<{nbA~ zMg)O#z{Z(K=ytC)eQ?GmHwS1edYghhGg*U96|wj=WxYIu2Pw!+-iAX^?FQko`LBdNiLsgi%XX8jzaKHch(cC3?>@xr_!E5A>8*B z%zv832@k2=k-r`TYPhQoNGK*{h@$+^$9->#^nnZ6*Aj zf-ihYltnFKe@?IR!AalqNI;%U*`J~xKF)YNUE&Q|Aek(C#Q`5Vea+T`sxpUht5naJ zdzo#mN?zy!%w<%k`UoG}CyO0XINIPs}$bwQrfhbe#!p>hwAnIbrn1 ziSn8`s*~T;QHM0KB{2sjs49aQQmx*INLvp>P=uP|=Jz=-BLy9 zxsRWdHx31p69bqa>1rPQ=9x1wjv~<>WsY~*AxC}je&|Dse^?+o0QSk1MCyLWluJbK zUbPP=&fltq9~)N0-9QXjV2}>B{;d@p(IC~ugq@P`bDwxah`# zdh}G`0NL=G{ZfjP^wt0`;NMP7xKXZB0=RZRKG!SxezI=%{uhSlN2}pVR?gWn+3|yg z)4P--4I(VnD{pjsFB74(fsDQX_97Q&_w>PpbK{4orypV*3}1*8XMxD$9dbm&eCSlQ zt#)~HDbn#AB({;Y5_kswbLm-91GV_8_FfxcJk0fqgO?&?!!#_ldz%8S0urVcK-3&% zz7$;TXSHBdYU(woGvRT!Imo*baCIqmplYu|!M7x2{!hN~H=re=i+utd zDhkMAe2dsfNhx*+?syCY^cxOi8A|gx@sWPbCE76!vIhr0ptVi0OZ;kW`ef-@F-4NT z_nLh(x)HvemJ-;PTxr!9i^_EBRbrqJiVw$c#;&Bu@4rGKYTP6oXu&U6t|u(W5DyKQ zx$rR<+KEjn$8%!IFIH3?h!NX3Sy{$b8M02Y4&JmEbZL)Q# zoGd?D+?jIzIt-02NI5jNyM{m+C3I2Dp)~}}3oV}iJ zVwa;=wQNYH^{UJRbOC%@z&ulT=ej%_#XNATo}rUG9ekLrkGGlC+>W2lH)ZKh^rLCg z0ACfHFua+zZr(6N4}K#+wTj!bmbD7DX;Z3cKIDa2bT))l6=CXACeOySJSd2WMv1{! zc%fln0zsU^Qus&{{e;}UAU$mnXk`ls0(hI5%LKy2Z8IzoyRTn=OUy@kp_s+&sJhK< zmRUSLB|txx#Q?}y7rGj@lm$AkCZ6E{w>q45`y}1HNRB?_=V#rpU&nYijP6NV+hiDb z_kvsfjuX@wmu>~fV5o49L3`t}O;aL}6|zypd)YCd6g;%mZav=|lS;R=_nM2I8VQo3 zZt&_I+P_4eAkWl|1Okt>AdaI)#4mBt1&OxzCTGK}#Y5Ank6Qf%_x0bn%i*B;DrDVR z2N_)bHmMAEwYQUA3~CJU=i06UN8ZfnApdBKsu49{8}5uBy`!%T)rND)`x{>O-#aPw z%w?J34P60`r*3zxu1{~gH@oLSFE4wki!epp&CO)$V1_Oja`ZqbeK=J; z4&A|IUaL8=ba$#%nKV6>9voT(9?b(#V4ao^^zFpqe1LKz`HLHq`!>El!2#W{`he%nzFnE)Ne;EgOt!xqINB%Pi;_ermSPjj*lTSDDj)32R*@ zkp(&}AqmdU+7y(@m(H(RH2N+I7clocybr2gO4}sB0OcITgN(qkLN*GpnK6td!}Zr$ z$C!55CEae7%+cI(Czy?Z0>_&*Zk!R_K+6_O( z?yOJxMaFDpw4Z4t9tO07kexNJw8hBSswcM4vfVX^bvD%T_fifWl@&PN$o zt9ec7#(Q<{5hU$Zhqobn+H0!bGq$W#U&Z-LYK6N-aNjnJNAdIK<5TO^q?MIk9PnEmaaI@y8?35mQ1mMG5)R#_(P+AzJtB ze$qd|iNLOSzHA`ai^e!9_>c0$w#dEZA|L3)~lfq~XP$v82z$YX|p&*NQA7G*!@7 zKNdmumYgLpV-3xm-=<^4PH?D^EO(h^*}<9!+Zf-3oGgMng??vmtzj(TGw34t1p)tZGMJ{J;tHvWnKGRYKTOaMHvjChRT0BWXfEQ2L32q%JSrt7PIe#0Km10nZZ7*y8m|}{#Z1zag^=DEUKLy-WgoWV(@3!u3iz?{0=WpD9Jp8+F7g^I zo=`vwRzmpui(z(=iJCAbh?t~HcVQIF~i_sp7_y0<7;EAnO$lfJ8vl0Ygjs=*_B@;H_W))ZFoQvnOMwR2X3g4+>Ycg>GrJN{P#G3A|S@OdKjHtjDyf5Hs~ zVY|LkMP@oy2Hf)7>oBc*U?dwvTiTv@?r>Xhl@z(s@X%ne+`JMvS5B&8KF01F_hVo2 zxn#Beht6*gpNm4*?{zG`<5wq$(?@i(nK#q9!fHXRe2kG@e)@KA2_^E%i6N0U7ZUOO7~wxv#J++7fO&?sZg@AvVZ z|5Tp(*RCSqX!LRV=pgT6Be6>erd;OyX>Y%lCri-CA7^wxm}Nx?-YzHeYi$LvAB{NY ze)cw{cRBU?sTQOv%};j1Qgs>5%mMS!f%b>U)~xi_g{F5bM9o{T0-E(T#ZivTz{D)6DUpi=R2{2N$`qEjPGxy6-+}lf(Y9de3e|~CDh)MXkhB!5f=n_}w z+~Jj*5HX3c7<++Dhh{UZkqd1uB(p$fgjOK3jMI>;Hp>wOBrkwevGgh` za2=c2CL0^^c|^3=%{38-+jP9iwsA+5n_K;oXS_n`TN)Bug?pjeRm;Z}v1XSy6-##-M%XQUsmFlsqB&qGkO*l19r@q(I5ZcX4pCrDqR7 zlvGMh`(-h7-L**rhz0s*Q*M)jsU_mjSbn>Z`PUf4(WSC}Wp*%=YHIO@s71QvnwMO> ztpls+zW8-_hSPpAirY&|CNIN@f*N!$T z93ZTP;HXlKq?_XVnK}>N1&i}<=k{vjWmqNxV6bXY;!F^m(i_1S*!mH&U}roc#M&F0 zJsr-t2Gax&i4W8qOVs@pxdCIzu4&u^m$*v0>-|q+!MeQR+vQqh_L)lfk}RHGQxnJ_ zP#-)mEx1n{ScG*DO2RyA?LKW+m%s;bMtK48vpSDegE0dIk)#aYj2Bkl(d8aj)7JYg zqpwDytaC*sSb(C_m#l;=aB0WGNs;SS0P=Ek0dmc<_$+G?e{fM4dv^rqlKV3QcpRDt zRVMYNXjnqThtpwyOmT?6Ya#{vPE9Nib`!{(%?R?<^Wsp72NgkvEQfkWH(xtItRvA^ zW5pc@&ZCNMF7G9jeDXcu3f0|RVuPPu`2eB}scHcU)ksa0?@W1bjU-krkh1$Kf%kw? zno7Gy2;ObQau3K-MdnSzXMxfc<4AAEvR%kdy^gO7V{>=bX0GS>^h)VGAvz)GuQPnV zYqtLpPLXR{eg<~x8K!zRTS^?hJ9=XD?)A~~vsz{Fea$7@%(vIYz4HeTo_lN(*qeAs zsl!jB*X%wLYTK*@C{He& zqeSmNY(Lq+V1(k{&ekR4X_x^5*QH6f%x}s0u3V`Z%uEf;kVi(gMe{2 zX+K%W5UKWfEp(;`Y?i#W#Av5{gjpgGfuFLx8Zul9_@O~_t_f9|fWRM9NEKzJ$ze~~ z4U5;QY;}*^2XK58Kyd}yzCAKi6#I&G0o_vOUUsW8zue5Unj_^*3LAu{c1K%j>NJQs zF;J$)4BGc0YB7o*i3HK_SsZx;-ZWsSaeQW8hp}~e9uWW&%=&J@-e?AMIU~cB?z|rtZ z?$GI|2PNugpt9==U&PEaCb?nF_ch6}c3Xik2 zM+;T^NOKns*_&2CXdFwJyJpD$TY_i(5qs_$7#&2=2f?;9gZMMk* zFo;w)xO5Hu*05d+-f+C_NX>D0Sx)_wYpmo=?;eM@&1Svul)GZ$+S zLtVmrQYu9RNlWiOfAVR$J&^VM6VXHOcfs}V6b@X&?mwxQPDhI3bP+hfB^S4CI!=8z z^x~7F@;|6MJ%wV94eHlbl?iSxoAq$n!7Q`v6zuK>fn}|i>o!}x1CW*6GB-^;E1fcq z(e997iaQb{W5=8f2eNe`2+H8qy}VOvV};fmH3vZus-U$aU!k93V36y!)1cscb?Wth z)bjr!8=o3!jCukwedEA(<^PAWw~mXl>)M6|K@n6+q+z5R5ov)@q)WO%rKFMWP!LqQ zQ&7648w{jF=|-euKo}UL-#wxX*Zn=u{aoKa{D70$=iX}_Ywcsj-Vh)TWDFKzoVc?Y zJJ^}E1$jm!XKDlX$Ul;{1(uVD7`?!~?E$$yCR(m-@f_>LQ##O~NO$?>HK(R(!Da#=&$y1|0KZvEis=l%75NBKt8uuq3eB;7hPpcI_EyVeLgm;+01 z#@1$xAhTY3-O5KJE^K;9_H3Rf70p;bZ#Wx#GS3uJuMf5b$E0+vNmftEWV}XaeNY;Y zdu^Nxf>9^$8R2MC3vad;eN%|J23v1SMX(+8HwVvLUd>t?(%Yzo*McZd(Zk|LFu@l- z>2yP<9?bwt*W(qT&u=)XalR?;WM-6E=|hC;k0Z*lr&;&a3k_AB|swq$l9ONN|XDb&ZP5Ibi6~w}|c-V_P;w zqxMipVF|~(rs$6CVK_;W;*IYzvg~8}b)hRZlvk&!pV~THfO(4w@bX4XfB!lRKVJ#L zTB{U;BDa3#uC09-zs}l#H^o(ug-;UFFWoCDwY!n7RX0WUeDd`&Kl7&1-LW)h*kxo9 z#!TZf=s~$1C3Jq*WTh;i-e81l$)$5NN;ossM6kK&*Te(Rl6(*+zf%6?vwNgVKCI5n zefS6y_ST+EVEp2Y<=T=*WV_d`tZ@f9Dk`y@#3qQ@@|35d4uN{t^5DeC^+Ev_(I-MF zht31kFFr|fbc!f%}aG}oX` zk%L=mVVApz>~bja1cY0!Nc9M$77G=Z?0vVC@rzFHefTfC8WtOcZwyX)kJ?bi`$g)< zPi}SHOpMubgOT)lbj$d^?{D%jmd}eqI2|`id`iX*3A(UW^SHrvn`rbCcTBSD18%j| zwm5J9h`MYUOqQN_GQ(8yhijJ^@(5c4|dJ!;#i&} z5^+(ib64?&z=a-%dEFFIERD{3+TcNvIKW}XyHZO{b`|%|^-rS5+_%W;6h96+l(vx- zuM*|md9i1BJW0IbHbraLqKLg!MAUP>Z)rWg%$Q@JGx#%1aWS}*J@(=+xXE6Mv=D<1 zpH{)oG)YWY=zV z;kQ3>?IMusQQ7!&iQNB@cJaRf32V5yr^R~dw%-Sb=M`6!MlkhXex6zF6=jXr6AaI% zu+jf@6yPplYFfziZGDaO>VC4wQ}xBN(Abvvw~C|gYv{e@)31BmKu~!*RW#63y_4v)!8QMp&oCyCGcgc2bk7ch7ljn{LgW8b#eDZ|NE52KqtGv++|6OY* z;(I^{&O{{KWbs2@k=8HtL?~HM({vAA5fSiy-+Kp{n8xKusWR0VUfYHBaaxjDtZh{{ z{M_-rqNrI~!0wk9qI)ey=k_!##!G2^X0}T9;-?)OmK4fEN6-tbxcW_Cp#of%r=TB8 ziJd;=ulDvoh|B*>ZW8W+V?N9^()5?gz^z5kD+=EBYZm-!V;8%zt+Q0J2^d)KXZL%G z!J8dsgKMCRxzd4edZdTRPj+fb58=hysMIqv>%4P0;IL;Ahn9Ya1T@*Od%%CLz5jG; zWA~(q0_;d-lG=cA(NOboi*~S?#aCx>H|ZKRkRe9`=7|jx9=#h zY|ph!^%G5&X<6-+cPd`5sJk`EOehs;CaNUj>Z+N`>UZz0<#z6r%NKFMbvS8w|Xa^bPu5~b&c4V|Eu&v>UE?wK2W{z2Tsu+IzpDOJQrK4xF}nS zT}C|m=?Y_0Eg9RtQal!$6hI^92#oCH-DG0lgHzUVc0_KClSC`M&cztie2dor|G1W0 zQI7v_%pVQCixU{yfoC@6?+K)uVg5PO_54OxiSTX9%q^4U*8CnPZ!O7RtqRFqs*aAQ z7jFI6Bw*?bux_3v)oUlyxXJAcT>T|vU$TUR#IrklovZWiFc;jrPb@5OK#Ef6&O-c# zzyJS?_y5=leKDDVJB8HhrTs<$bOy^~SMvOHoyA2lMN^$c%A2xoGh1?V38#nXIz#le z+8O_qgtZL-WVZJ;aZZL7!?}Ff-*&P3a5@ut3Ngv4wjS@^aq2@iS|@GLoJvA=bz@3u z*a(LB2@=lW?o`@A)#U$}?f;^NI98y^>WB*J6L*;U4mt(J$~tg?c+4z_J#x8EQct6L z-UDx)6bp33)7I_tB)=V-UA*#@Huv9nE$BlpOkl-BQDFecA1AQA&<=k4>I{((ctvcu7wq6guev1u#rXf+dLd!fm;c51|I`sw4QO1g zc;sKT`~O2J7`?&&i!J;Yg=WhDdt~7y$Nw*X^FKcFf!1*SD@*wYgw(+8Od4NI{#<$Z zk4OJqym(T47yc=zf2{p~ew4}rd_eB7<^BI3FFIK)JE2tLip|c7{697NFBTl<`@w!t z*YScyY!OSCKJu={|Gu#Qk8s(rz@#^Hb2M{s|^Y6%S0!u}?wa>v}DFVCp(MpI8la?@owJ z?O%dcmJKc7XwZq0EXBf$yZ>R=kbr_31&HiOpOv42A6a5PpcB|qX+e`Gn&f#D606{< ze{Zw|7u}t|)W2*4w(kqes%obfJ3f)~)zbq;w)o_D{B#t2Fro0z%*3$-q> zfcfF@hsuH5lHrUGle$yt&s-kxgx**C(CS9mq)gg9ol`9@(>%y~y5R*=!Gy2FG`nbS2B3G-+6P)`sNs-gsr8M7p+EyX_V0o9{g#^;gqtiE`0HrQI7}nzV^Ga(*%{D+qYB_L zjEk6F|9xF5J*K(i#zX`^nYhn~bfCM)&0nawK=m+m1Add!>rp=N)@RFWnO6BvN6SFB z5=vNPGO6m`I^b?I)ymw(|4bP~q+UGR?L7k3P- z&ifx+jTp2ClR43592#a}v43ag1GV_0xj`~wZLz;ldvFmch@rRUfZL^h#7q(;t9!2c z_kw6BSdMN$rBS8%HD3WDTdH^!YoR>l5BvoL-6p6BW69wm(T2W6s9BE!`2!B5<~Aeh z#eAT7IQ-saXRf^V0n4enYSW~z#E9NS|NTy{?qT@@3yUO<{x?I`C^IT2IvKYdMx24t zdFpQnhe<>N2;Tz`#)eRzQOa{9gi~1}KA;l<2tTBpOz#2^E}KzJLgUkI;g4xk|X z8xef^9pLz+L0F>i~p5JBMSReyA)J3E@>$d77IoNC;oHc2>ItApDPg!I$6% z|03d?uQF0|u`w&)GKvDu?+4%ZJoEiewGs>vQMa*oKN#zFq7PiEbbI(ye$msKoBMis zD8}EfeFC+i8q=sh7Zigl((Ta8YkpY^kX)gGK9oUQ2xWemk8+2p^XP@2EfjL0=r{@h zX&)V3Rd|Q_t=g?s%utHE!uTMsksX|7E3{BM-QH5aVIWQ82}~ISe^KTZE==W0-GO`a|7Tg0HZjl7j4FBqA(y*}vdJ zzG48Ilm9B6EA3f??f;X6oDh&-Lt$gILxmXSYOz6ss#*l z&J#&kO!3M5xl%f|V03A}W9GkF>2G|LTcP80)NuH>}sg-?&fJ9?ZkPB@2`$!-e5* zy2oADNS&bP&@k}+ej!HF+qsUBUi&@5?;JW7#OL=HA3dnX!kCxbJU$e{h4Dm`Angq^ z{pnMv!=t0%1 z_Glr(_P=}1Hx-Qi?D1n-)O0Y)u)pFCwYPo5lafLk4fo4O#IfEUGV4?$C6z!;8^VI= zFrkANMio6En|ZnvjCayt+CmBK_x16hkXz_w)ZtI(+5WBxOlcT%F<^`*PVyWYCwk3j zxM53ZM+>!0q`_6n2wKSyspcS3dd0On%2l>Em^CVbj9SA!$|8m@qNzZK9I+WsR{#d0 ztMDmw$3gEU+!uVeg?hnVEU>hTPg!`u()bBe4+BfFGx$-Dxo$v3?=a-Pl%v5LK+my~ ztnk_@mPwY7=qCH3=Hrbq9*HeNUu76`RNᮆyjY$trM*7 zDaLOByx||ZOfP;an4e6kd2raq*6NFM=v-{jef-bSl7skvRgLjj-N(zNQ=O7j5@L=~ zS3$ebogDf0Gj;Tc{XC{#%xjKzj+vH_@C2URi}kqVG^rSM`dN!NPznX#Ii09Sqqt5=gNrplea56}5#UrVJ1in^DH7s!lO1Zz#b@!6<*eD_8y zk6|;8ar2g$^3sb}H`Y6@zGVHrEhw+cB@^-+gnzzRqt~plV@Wyp(O)@z<2kfMpTh1+ zSXd{G5A+$wMeNNC(=&Bh?cUX|H;gg9duC4_3qF|13 z1BjfmDQ6f14daKL4}!LvCF;i?GXg7U@;i6SH~LU@BVsg~sV&b#}QFUbS3}JhCtabaZQU{X&(>nJ}PM z)RbPbr>;ys^Vw=@2w@UF{XG;9y+&~AZqh4xfRb)tUqABiF<(VJmcs(J`_h~y66=1s zPpnhATY=783$?dh)Jie_=XB%O^C7n_-n#dQlvwU!TYNiM9nFZ`S{(cmZo*C>>?9Y% zp~pd;zx&`S)g7ir3PCynj_w;7GI|yVtjrlQF;V0!>b8`RJb%oZ&Q?x^Xa_{#a8yAr zAl4aMLa8HhhYGom3SudRoMkF3M~uFH@UrvV-E6QL=&Z0xX^knMZdi z-}^){d6jPzRYh@XsJD|Ik7vSCyck|z=X{!f*in>uLP<^2+8HVuRM5sbh#% zQ1~n-tFoqbeMZH&`hxmU3IA0iw$_gP~8~MPK!M?O6KeV5x?I7JOQ}a-gBiW0xbk zjj#n1V`QH&`aKfC}9Hml&|!2OYT-A@?;w*HiwW#=i=7{T=iDfclQiAeBah>U@w0u zo_AZIEk(4^MyS$fYp;26npHi2)5@?*6KyE~mpbM`aAMmHLpsDmMxK&g?#Sju+V?v9 zRha2Rg?cKymcw+6if_tz?GM==KpNRR4ByOlCAJjn2Vb446K5T%bxxrb&cWoNIY_@` z*rhZSIm&RouU%V`K z_l#$RK0kM8dnJ$p-4}!SXK%lVTR~-igvaitKV7Z)woY%gMQ9j8 ziCm+=Ewa;PNqox78f~c|+KXEqy2st45WYdkxL+Xe`gAq!TZq!+1bVNENV~VrZL8kl z%ge`{;_33WA)7}REmox)c7sScuT=tDX!u2=Rc%>J`(Ub4C8*n_TEVa^+lU+mR5XlF zOeY@XraD0YYIhC9Mbk@O|MfNX8rZ{Mor6g-X%xw!gjY3@$Z9BB3FR%3Ijb#iNF|n_OQF|XqYfp^b zlZmcVw=UEl;}UeMaiDpA;cA$UO#yrCY$r20uXV%*Aw!#n!8#&YqibBbHbyRaGl-MD z+Gp#NKaMm^n}>cXBes3-3c2x2W|!UCxWH6{xkT~~Y=Cw4)jRR7E$2y3gHh*I%kHt` zzDj3Bv%&nFS>E=8HiV+_*RV&MU3~7ViLH9hypc+ZB5P8w=P-dau*JsKp^yNq!R3!N zyxE)1Mp?sCI;1s7(p|#@)^H721Hr4Vv(~@_tbvvx;2z{I(i%*A-b9KV?sY_DbUas9 z@f{>$RSlE4Lf#6KCzp+3)1uh-+-!5IE_Ia!{^BRKuWrT_DnVV7p7<;i&%U(J0qynd z>s+w_9(@^#jG?4sWmQ?#Hq;jLi^YbwtPk!3ht8&xS$4a&K8Mz7E_a|eLq}0k`{fUf zj?IkM9%j;*>BNmSacnMzg+^^jBbBhA_|n;8Mv1+cf_UHadoqcGxbd&`+_xf#Sk<-i zI(rl-N2S;GyTfO9*2Wksr@rZiPzf)OA>O?%>4*tlp)+g_YADluvtgIqZ0Fi0bh-xt zEcr=7hw3rWmZo#4LtdR?3r`ZccRjs-PYLW@fc}L--VUSu?m4tz^sUc4kL{Q+2vk2< z2)%pO>%IeHl#MOGk}lkG%rM%M<&2{Z)e#Cw;n#ZO>h!AoiPxlNbwu4>dq>Fa@}C`V z#d_Zw5(`+6>($w1kQ+TbZkVqCRz^8i87e|fCN;pl^NN$A``{QdKu}`2k%*Y^fZNY^ z#om1$up2WF#IwBD13FjLtP7_#v#ztK<&uPD67bqgl7+4De)9Mr!4(mfuyR}Cl@u?E zk^)7epKP+BP~)fAC<-@x{p-MPh!v(?=s9$KKx68%i*e5&D2FhW`i-C{0=O-FJy>f| zBUQ{cId3cDm}qp5^JQ1?c)f+`lF5OJ$_g*cxGTVdaxBDc^J(=;w@FCZ z!|UVdy`ob`;YM8vJR=j%B(7eM`&CLj_Yp>LD^aPNXiM^mw_}P8B}Sa#U95RR>(fElukVE;Ep}=;M#n!>(pHj4N&Iy&bYcxkbw5&{u^ ztk(PzrSivW$Y4diL$LuL`l0q5EWlX^asO2D&<}ZF>nO5Y4LFA$1FaejkIYhNi(%I> z9?UE1*r(1fR#@22w=*=3=drJ;_Lw?fVEnmRzQNJlq}Q()P#a-M?VC6N+Xkz{9HiVP za&|k!news4GYqX$-p9$aywK6(JUj?#7%7z7#+0$8h`bWiK7=^J~9XcW^P z5+Ef8_Di$Co*eu@Lf;2t;qr;R9q;I6DKm<+w@u6*6cy5THH4|!Zg=;Hgm=bsw-M4x zc5KC(jyi{D%9C{rbEQ$34Sq5)oZSW_vV9l+_&|s=*1#+N8i#7DE~PniFDi$#wa<`I zGk{mrCeJ=Eh=lk4=6u(Ttf<$4Gqc9q2pg>~48kz4HhKYL_=|Fr@AZbJhL6Vmq&HXX zdxRo7VmVpi3w0dc2O9xV92(z>Ia>f56nm{qD^ z7Q3#j1)m&4(s04v0{;>Fg|_cEJ{O3?xfS1xo1|7~Yt~p+bIG_FwCq`ql(d)7)1~UlA>8>T7iKtVRe7ve8Tc=GvoqDwSxT=b0TJo$swPW`*}P zOh4PTPq&QkHBrt+Tf!9?S060Yvk^^lSRd7@AwAAtS}AgtU8#W0eeiv*9Er$ipJmiH!{+wNDX#_oH`;NCPuFrl*cE@Y8kK`25-q&0@L7Dd$$S3T zd%fNQ4pO6KD@;PpYnvH4@$@)TVG;C2j_F^P?`02}<;O&N{F5gA$=yu4{a8fd>L~XH0eA|02J~ONt1BPtZ7#Ut+2vmV-?aL`aZM!LL?si zeY3B5adN7)*suk?Lgf;-!5ap?Db+8P*294#wS@5 z)pHeBa&_OvHn0P7;*?9|NK0`Wt!zw?julDfy60xbY=hC{ef$}>*PS7>B1~D_m4o+^G~3u!ANllWl{>i1I?&_#~{!|rnB0Z7>vG3)=r0kG(!csvhn+8gKeN4e?a_;9UPg-e{_UnsTX)J^RB3 zSDHjSpp*N&I6M%~Az}lWG=?sZeMX+NZpWi>*Oujn=)E{ZY)qoCsr6yb9Dh?r#ju*n zfv5TU#dhTmu*tsgnBIGc)e)n$RgH}M8$Xz>$hcQP#Kfh-e+xzBK&}X%@FZet$_*k! zVZJFB88PL@qySL(IHQf?t~;&1JY|4BJ{rJV>dy`1qG7NhlSKtO+7j$xkzLNx=h zmrL&+y4_k^`ep}A5yP@^y@fhMC3daEVu%=R<$$oCX_~MlG#VtZgw*}X!U~<<@Ok@Q zCc7oqoh-D#ALIF`^}Fxp@C+?X`fhO87V%CB0+` zI6zO{E#r2np>E0yiEVRf>n!yo5l?NCZa4Y)#$ss0A)?mD3g=oYCo=V-FI>|$ph1kI z>8(mR2xV-CN=|Y40^=y&uf5E;`p4ud-$_pgbya5N6a7)8*~h$(c1T7`*#Q z<=iR513>=D@H_r=uu}Wviy}rpeiI{oPbOlJXk~})hnHQkKt097iUk=p+crC03<620 zq|iHw>r=({Ss>5Tfh8wO!>E`Xfs8uWbgStM8e}uT zR2mROhqHg&-fqoI5DGh73`nR9fmeJE^~gmbvg>LP#65)_FO~kiMV?@Ln$UC_wl=z z2J$}-vA=rR-Z-;pJXbLLaK42gtfs_su)qc~dWc?1#&LL+!lAe(D&YXu3%DXO$-DEe zM+7CCOY}fC$q$}f1KBk>;3+sjlI2u3G@KxrzB!+S@9RRCO8HGA4I_P}bW4^f*t$`uobW`fAmM$B~n$30g{tR+%$ML=w-Bf<_?E1 zf^Qklvxc1KH6MnO@tH458QyR}EA0E)HGKYbOdu2VKRSLQ3-83WzsJG?@XX&)%bpX9=e_USVp-av|o z(tsuE0!S|4gI!bNr4>p>cwbq;u35w=DF(sEd?HCuJZMo(w1|BBJ_l;I{~-(N6dnVN8FwM<0F%A< zlI>rxd~hVoB^f)*Bz!;-09l2ZaF7-|1tjzX(xR+{=t==og@8@-lJd7}gr}>8LunWPd zJ-eoG5Q(V+5AZl6vlUg(g5E)v^Rh<7PMR4ItTA6uZtz`(O5KC%B= z&K%UVv3?;%;(5^s0FXce>~W8B6P}?~dp{gFYo^@GCC{Ki*k41Mo_wL#tf*EX%>Ts9 zaYk^D2u=_f#}fD%_2H&ahv6b(%Q5wd3t#UYm{`XQut`&({1+`|~UgN|WZuX{TjEjX;i?h5;au%Ut1X0aOtX^U!Cc795Mq&7t5rBEKx^|PlIyYqY&n)XXbm>2#CiA1-Gr;_%0R|_300_k$fRUy=dl{6I z2txpj^fg>?;@WheqE(pp8xn)MBzPJ4R_bz}l+Vj&n0nBtEa|lSZ{CR>0 zM*@Zm_bwU$j1hQGSlckL^S_S8R~6?gt|HHki%`fk(ybg31YSB<*!R22bvlL*v>)e0 zI?H(pmr+VhFGng>JN7x87U^i8h|<_}lb%^%Ds?vx@WnYul@>tuCGYvH+$LyGI^$-2 zf1Lo)?ZALF3h#7UkZ9zjb2=~7f`KoCt*Gf?Mz}F*(s9Hf0)?wdq2Wo{gUa&lBJ^8$ zen9ScQkKt9vp$E;{70$c=Deqy4W5(((iz@&PCvk+10`=snn6Kh)CMMH0aR+HcRq>Q zcbGVp^5vnRoF9R0GQ&M~6^YXcfOI?ONxJ>#klJvM?Q*}_i1)+BnLE{Xa}h}bcKp5X z9H&=DN_*ehYq%ai=dlB1oY_;4+0~>MgIP8EVVOz1#~%+0eTJ-|Gm?;h0k9JDK|Jq3 zxxaJ1tw1}WbpodMA(Oy)zRQwujY)FQ1JkkkeAc1hVd*W)A~}Y{upn8F|<)6m3RJS@_EsM;l2skWi z2)Hgs&DPLm&2}g2v_~GyrZH#sXKPUJ)~;~-Pz(blkeG#( z#8oNT*Zxnsb+lb<6;9>l>y7c_Ej5*_qbsQN?f#mv+5`z)yV+L!wm7b|*|ua!Y+??& zx%QZFP&;T+e%i{4j&!$E)6>2!D6e24gUHJ=WEY_O>LU4woX?y|?4sxo zsn`AYZeR42w4bC=6HPk9Mjk+@_NIL^UNuA&PR=b%A+Kf~RpYoK71J4i$Y;E3fcA!- z(UTceeFnb$xFJDfMl^V4@AiRaqwBA;AM`~2L|Yn)E`YE=TRU!pxT-h(DJ^HM`2^$O z`Q9OqDs}Bjv-O)x-wIwkzAIQ%v_irF)nmVMYCEB}g}l)YIF~e^@{NTtFhUvLv;g`M+$K3YoxLgFq%967v-f+%W6Yr;+&9nh8dhaEvokj+lhWuxrkacr57yGwK zcU)zb`>VwzE>p4rbH>)uZ#X`>n4_2VX6BcM-nOV#{o@?kV9dYRhF)@kd8|(!2H&=E zO`#BSVl!$b8^~)bBhCyY<7pGgFcxKC@3G>0Nad?Z@Uzzo^tpIkAq=CcBhopQ^h-KC z+(X;s?SWEf1FaH=>Y1=e#t=S94rjUx)iPzpvLUb%^ zbk&!M=<7g;pYomU3rh=c;^{ytQH!be%WTElGFh_?HUaeqAe7Iw#7J+Exe}@Pcf!EE z(9nM0p8O>0jFCe0Bf#ZU2}inu!qSF&mk#8X>Zm>ItmgmBY@5^ zf9qTV>XzSsbN5UN9<&f4jl^COS|bbH`+x>+);~HpQb+3`Dg@cT%{d&YP=`;OM64M) zp_3tf6_($0smQrH!<#>OqhqKXntr7C<9^aXofLjJr(q!dT%z(Kg>HR<+bm(I^kki@ z$BKwPt#8@0E0e6^CkI<0nGme19S90xpUyotK;&M zZu^ui%g9C^+80;Cn}f;rCbygpey!g!-y(?@I=<3UVg3k|tCZ`t2Yeg|@B$go^k7mW zCC2I7&vz2VL0%wH1sq7696tdqWfTMi0$6MT5WiBrb$%!n} z8y@U$mG&6Vvz8jQbnDF%<6s6ER1mW|Mfc}weLk@3E{^Q@lqsKR{4FA*`3b>3=qB-s zq!GuKeVJDjqrOvd^ZbwJm!uRqyjw!3W3`GJI1D^C{IelxY|U8LV@VgPwqr)^bdq(R zlVv>KS+g-ZBF8amDbzE841m-iXspsTdDw_O-UtsdQGWjrEw=aN>TTV5PM5yowfRKr z2p#jGtql33gCHeDTEpDm4i?kyBwhoY>c;~tyBjSjo!eguYh+%L7nNCzWtQtE@D0?*>%*-oZM9mMk$*Q9EYMe*}0$p;TfRO2>=3YA-$DT(2Lso zX*MN~VEcuKdBW}u(PPVes+>U^^0__mT|_9dh^oEEW$srJoi`aVQS03Cr%9$2WNR|2 zzE!d*y{pO|LLbUkqJ4KT2DoSe}+lgCi6J+sDxgAUGlSL|y{5M)7pXWTRP z8oOI{(P}3IRA}@^Lv1nA-4FLQw!?J4?ttQu)g(@?5`kK6#H0(RUyW&jZcfDbw^gUM zBmtFIYViq(8`X8YDZh#uk0L6kTooIBZOST%l7UN>ism?JkC%_?Kt^!h(+F8{2Ze$PW0UC6vNlbKHq43Q|%i%KVhP_RTQqH_L#6(PCQ*|jCJz|BN27|veCL3OR>`ypv zw&xFZmf;S1Fzq7_7eQ7ln!M{+n8%v*O2Di0=uNMDJX<#k*i zVf^kIB&ZQ;tvAjao7NK2)|=5GKDYb*<8vf=kf0V0h+=Up*eFVmV$o8z&er_6T^pVL z#$PSZt#mivx_}Gvp=O)>K4wQO_cOl~j~_~sRLb^KQT^2e_V*}zrkz%Y%`WVC8;`fF zirPXIQbepoXzTK&@MmCdjv3trm9757`io4sI5~t^&O-0jvLwkwN8`BcV=N@5;`VDm zTg0Gl)|%-+1xxBg!scunt)8Ic`|Fe`io(JQ{q+?*#@$I;7q(Um*=3Z6EUJ#x>ndbF z-jPKRb;k)fE|=wNAI9W+tyt}DWTnh5qR` zku#@z3kaOme|tW-EVL%mXX{S`5k&*#Xk~PwO^p(jOLhV2;*93*`X}~si#AZBqH{;A zqU8+`8 zx#AI@;L;Z&kB|1(du=|X`h2^&+VTLt)SDH#zh-4NeiK1Vq-mTosaqwrhwyHCLx)RB zi+S10$cMAQF528~amc{4!`!4Li&{3IThDl+=1Pcz#SPF~a%;J*rIT|yzbvzBEutrw z6ZR~=zcs^o%2D!WV#!}t*i`Kk+r9IrxkkvyI$u{>-*e7bGQo2?e&J0lbr60;K65hu zuz+PN1pa!bA7MCo{4n^N0j2ew&chMGx|IcxJ!aq(IZirQ_tw}O%tDMKi&Byou5js( z90sh%9dS!21vd=o-SSw3aG#s<7Vf|T+3#3{m^0MP{OBm!UHR^maIJH6p|IP_y}Q9< z>0apv2c?l29=mqdd0l1w6}ZxKu-%0uSdrUe0s12+*d9$=T{h8jK!0R&=(g~dqLZOI zN@@2Cpm(-D6BS~Mcus$b?OZ2T{b_F3=UJeg&=b@jIy#WPb{-UI;>I}|9Cw3#qXxZy zHP{GL$3Lc*5qS1^pu%21g`S92cfDq8$Y4E&*dSp|)A|2yO}n63q9`qlQ@hn$x4CGj zILa5z`s#YL^W>h*_jmo1eDscM{UQM>A6JFlx1)Ke3ms>tyl;TCtnp^kQ?$E@M`Ohl zbx^v|aCmd;HS_eJ868Kp*9#~_Y`s}Y1%IV1WG%Z|j;ydETTMRsW49;lB-^mQ=Vj8W zsl$)*Y}dZ+o&R<hX&aTCZsONM~b|0~5|R!ZI$j-e4+HD`Ij`MPDtwE1$qWNx}5l zTS%`b<1myw#F%p^r#+z1$DwE_B5`9-eq|ORw8rao|NYcmo=4*(a7hgHcOim^1RGXd z6tM+48JpiGL5U@WYX}aKk6A|tlk+9gQvdoq7CTnv_22@T?;K(Y6mac^gm+$%Z9uA} zG2QqYR}}1{Z19Bcd{CTP;R0saql%|?yiRwwKmsnf7zC(6b#r=1w=3B36SJE2R zE$n$&jLO8``DUgB`-LvWH3yTg4SPJ5(L4PYROsDr{lx%CN4P9`aB9_bx}?so&g6tY zGw?KiA%Lxst(mjbk`3JfEmD}C?I6T#v+t23;L%P%n`(UQh_hD+nhoq#MYdxKW*K;}$1F-s0iD1o|x4ry#P~aqE}nQ zQ2Y2uk<9fp>NTF9eXuG-*{I`kyv@EeJj$*^>{0ALicCf}pWL!q zwvQ0?_L_3C?)hXskb7e#Sm~Q$ipb2E0?5erI*#*ICkvJLO!qJT?9wTVsBaGZ^-<0o znk`M(oM0n*)x^C*S|6ee3nT;o1jZA$wZdwQlC-4JjEewmsodQE*yf8x?X}KQBmXR~ zgIjw!S(G-Zs!P)Hxn0WM+{;(aBczAy&GxxI^Q_Mas<~b=g}N1JGaJZSH*c>DMT6F; zoK*n^cW6zapp!+Z#ej^!R7gs zcy_6nFk+Pc%ND4^soV*)dJftE5)BweBW9;7DDT(b;_6?T4xkE?3<~cpV2NJw(cZFiHQmNmN-J|0-a&TN)CN;)jT$goCUb$V zZTcMip3yr8VV75@hDM_lF&=A!c=s(UM6YG4@sl&o-qj4{!g>Um*1^1objneq#-kN; z>hVnzTqzA$kJUCAC@Xuex7uaOk&0uk96YM4o?e-#Jx`^cS1F#o?*OW0e-bag!`A`> zysE}P9$P{OEgxCw!e5Kn0EQnD6ew z)T<#U<~Y-~u?kO`E}hARJz*ndW0j?48e<%_2Dl`mB$NiGis{^YgUg$hNnu5^R)DjFCH&z+7xJWXy`xnueP{p zKtt!K;wJy{M{k*#A+y`=+-C8H_-ENZQzT=BX%P^~ZLjIDc9l)kHipkMQpbAkdqqro z9^<^yU>#s(9w_==u;hKbpEBDSe&G1fs^f2-x~bApcT-F%$2^Dhh zIcF5H((_GuM5Cy#}Ar5-=}n23`x<#Sg$|B!LF zq~jIIl&~%%=pzYts~nK#g&u;Gl5)X3`#Y~;o0R^sF~JHO#(`Y5XmCs>5L-J1l+|ZQ z!`v=ZnjMn0MYG0MTPXJ|_qQVhPvamyzUbQxI7&Uk-J}bYMwj2DuQY(>DtGs*I}IBY zg{8?&NgCTz`C2M%R#94qU!6gJE!xg4=ZO(HkdAZt&_L(MfY;_Un-*FbpEkX}VEgTv zt6lO&{Uf^by`Oh0f0XNC&uiIzX6QnH^D&Rw8pL1CE?4!%jt=1IAYIwMEq@sts0iCI zZ29%eQ8*Ssc{7L`Nrov;$nY|SG%!6qQ}&@x4Q&q6AD3(W76Hr0%Jcby=k$ zkrt(q9hL3WglCe68%)TccIugrL#&U3H(ljiiE&5kGuKkjyoeez*B=}B^MQDSFIa{T z2FI2`W4^%oNH>R-$6mWx6*{%n^>>DA3d&!v!x5SCvrAb?jUns(8MR0*KgByJ_Z~Dm zgBE<8$vBIKhHsq>pjv~ed z2Z0Y|3{Duw|7q^9EuO_vn(%Cw;3F(^D>v_m5p;B|@u%nrXDdlLdp1nqk7ww!7U{E^ zrSxy87ipR+KBoGcHou8k=hRT?y%&Gy=hx46pGbW9;+R2ztlYR}WhYrW))gRcz3(>~ zs#W#m<%Z8@qsCD#{;OK2H73w4=S0z<|D0i-=_BI)@wwgy-391EHhEL6Ui@u|jG=6| zsnSOzDU`|YaM_jZib?D@FQZumAbdR^@AG!j;^81OJ$h(MHAhDi&l;lZu_X*@p7q>#+kM2B@)r?F8|KMi zyGiecZ42lQQPmrLd8Nkkx*jYn>k{T9Pbf@SL} zhl}au8}rutx4(yfoIgj3C?P(eu)Uf&SeO_y@UQj010ZNG+7$wBu(xcub8!#s`f zeMKuFhlriY{SV=~6D0nRL5gaAk#H!wev)M4Fv#$*pSh> zb-p9!04~!K)o}koi)}5lT3S3`_WEU(<1vzfcMjFqXs$Yy^TLwfRhT}iE+5GKDQ$sM zTscb~dEauWEGHNiQyu6Fo;6Df@5YhFt#MKpdUXyzB@FT!!wKmVKI~_LKPrwUX|K+b z^St|pO6cS0z`^PhjJO>x^sd7!18-rr=v-Atx+z;R@%Y1KIY}}ca2)bvb8vgF&wO}T zi|LtXn71$Hn6fAuOd%|hS-(|6Tm62c`8m&eqhaQf%hZ1AHmsvuM75aL{XEqHll#~ z@1}J}A=GRF6Rj~`7({Sq*{E#S$qDn7D)Olp(iJuQsLOh=g6!85B(Yf+c3(@ffwwo4 zzq*K6x8SJ7B*|ScPaO1q64dV=ZbQw<%{&2k^w_?Am3?z*;R>Zqit-eZXT#x)&w^hr z8)$3C+y0EPzcrdq`pz=04(EOilmJc)Z|vC3{S2gt_3ZWls7 zPZq6B?cc;O%Ztb`QT69>*MNCj@l1O!O;>@p!c-X!jn{Rk;_#?$sw4G-Kag zw|nc!Pf!NCWY`}(*Sts$@@l23Ibp__c+HD!9<)^s}8K$j;#+`pMLxCf$vK? z1LP1tjZ(~j})8#} z2Y$z8+i9V5Z(_PDWHBFmRKr#5Rgz)PbtYHQmX`haCUy0^cIO)LX<@r+qDSgQz8r>i zd(}<_3CdLuZtR;Qsa zX34Y}J@yh0s@x(AYrJX;&?Y~S74m8Q7Sso0%KNScwMzW#G1DAB1M>= z5hl$@NE|J=DaSflbiFfTTkNOQ9|-z^+6J?*te4T(2goG&%GY^QyPiRvarZ{ck-c!k z{Tt7Z_Nml&?9EzU-sS`qGqvlVNW-W)%V$mQ?3ZhF9%ohKFBKYA@(RUy)XoLpJ`GQR zW#0X;Qt$6Qa!JbhlGJ){b=vyto8C7q{vT^^8CB);hK-T}(hY*7lqlU@(nxoTNJ)1$ z5+WfWAT1zBcQ;5$C?Vb5(hcuy<0t;tS?4_;j-OcWy`Onz=AOH+Yq&x95&o@t+DeOu zfKn(o>fvR!^Q^o6gyY(lWFx2peCTd{UAb#zAa zmQIN}MF`$@TVeBR2sMyukzZRei;QPmBSw^Tk9g_5v?>!(xDTozCY*PQ7zN-))-P={RJ5}L6+CSKLnoV70VwjQQF|`#(Ay+fEWX#PD&kjW7j<~1L+KQ#hhNq(scSj{AD$KU^Ro;nW1RL>|T29hH z57}z&-oae?A~%tPH7twT?%3)ASO>Qh6QK~IX_KY~;aX*8^h-L2bgimRG&>m*j( z>WZ3$;Yi@UQ2k_cScmD@q1Ow-IF~HgW?~i{dm_pGJ+3~l z>N%ST!hI0`olzKtsIC)G-DZczDYv|L{l_hjt*@ciA4Yt6R}}e- zk_6&B)xO{+lbcq~tBR?414UZQjP$qNMUe(lG!4x48?1BMrFuD9fK0$Wj>-@GU6$wP zY^**bjRG;v@)urut0VU5kF_g%^40ax5wTP|{~&E#PcKMJ9|O3dU6T1NpiMeoURikd zW@GpE6i;fK<{9d7*>B8@0jCKszOQ5@WlaIlW-K6ktxIWmh{TC z*J}BTfrAWLIFUQY{B7~s;?iXw7%ImqdQ#rb{_L{E>jMnD7?p`Dt*l3}eL#8J3#ql<%4qg*|mOH{H?EW%1 z&-xC41W0RP1|5HTVXI%9KUrv8_`V$RIOrp(De$7~taVryYn-SMWigUu{25-_Aw=y`$vfg51pO*Hdn@%`U8LQEJBBOA*HOsy3uMAy=v2v`h%T| z{SkHj*XNj~O3E#M@J8DRyF6;Z{H>2EwzdAjHWAYS4CjZLv`nwC9VhQO_V@`IpY*~D z>MHw9543Ek*w>SXpt94`9Zdsae`p-pkZ6DLCFb}sD|W5WgUKkT`@CQ3I%capAF33m zd7QQg&aQ*EUL8o4|L(muC9z#JgXSdnsdR5yJFD zRDUv$WD&_UM80?(HXecnx1e!9*_K$)Pau|AX)mo9ooYNa`R38iWve1kdW87{Ko3h-$Zyj@K_Jqq74N=X>V(+W(e`A z_rTVd+N-aF-Z7I3FoMxMMbfiI^+=Xc-)5`iRr6*Zp>B*+&~UFz-{Dpw+Pd7rKHz;z z6Sj-~@xs&lYq`|(3`T(-TjH0()38ar&fmA1QU$&B7@96w=yY--Yj}pCM3YbCtX99D z5kEzJls3cHi^J_JSwXpBUE|rw01f}k=%uA5?BGRu0c=HA9Is%k!v_9bB?MBWR3UDu z9U2cC`pL4Od)rNpflL>qJ!6A@S-3jS0fi!~`;b1hO^5IS?M;qI>!z#zDB~N&+MU(G zaBMTZA9l&PkJ`0_YIk_hj(|K>{3}mSf69~aPkB20ZJvl7vl?aMd6qRlfxIN8Zi7=j zAf*5>UJO})R4VLiL+@_B_8iH#o+w+@HgoHo4sWKaC>?2d+^haB@2nQ}-j|)}*&NMT zhq}Fqvg|jbspB;4(w(|}+CAB+o>uZ4`V~2PH8~$4!Q(np4~coBZ&SUmKTER+l7h~Utx3(0 z^uy;ImNOJ5Gbd>kd1sn^E((|XGK^eoj>FB4mEmc}sxDZq zCRlHLtHqn&S7SFQZx<2Led06lp8d6sT{l&Dd31-T!yB!t9sqoB$`oWZs~^sDPXtit zA7!BOKA53xfRS%a6PG|G6DKt=a|73zzG`hctXNK~5FM`T6>^i&T zWED$OG%&xxZc3rQJ5zu*pSUvMD%TUoxT;-g?8WP*(s;AuZvX9EFUw(lQw;tQt1Eu6 z7;mY~H!MOnqZ|N{p(e|)Ze&*PAvnsRRxNtbqt12*^}_lqQmnx0k)~2f>yi0)PJZ4O zxnV?t->UMBm3T$K8ONo6{wq#!x=0cBG^sI~hpE}fz0u5O6C6pTo*<$eypRWJZt9!5 zeQ8!r5G~x;uj?EG!9M8xDcGRc6Mdohs-I^OV731%_lRZu^3O1S`e$Cyx?f#TwWNpG zM`I#o>{i$~k(WO%gW^w|)BPmQ+GB$q2p7>qPIYvG~1^Y`C^|FQI7q6sA z&)T1N(Dzy6v+u6}Ji1Z-USMZ=oSs@9@zBA59EU%?ilMYVMUJJz>}ejOooaL3%`k9B zYi_R9;Ib~-ihng13;7{~Lr}-B(rt11qabmw`Q+L20Llx|7JmeE=_Y9Z-o|*rqgHtP ztV72eJ`bf$k@ZfG(ktbfgB`qZ(OiM$pxq`)d|ijAghCS2H^fIO&892nS>8f!w5xz{ zr(7l9L%G?XrHb)*`h2j$d?XS-)%)oSeRXG(P1Q8aX?G zO_;NyG(;O6E9$DR|jk!fYWlkUxw{x7bHtk3t;jo;6D z#_^qN`a4*D)vL0 z2Cs~d(6ESo$yd25)s6KR{Gc0#IgYtxM; zlXrN`W911Qy9=+DZFj_%-al1$Wq-ydx6TZot5|H6Li$b0vs{x<%L&JrWL{^@Y*xXz zM;@Zm@9Mutb%>b4%$rIKpV&aSQ5r71*u2B`0D*-L>qw@pDB;PWa=0U;C>kL!vm#md zQOwp}h}BUp=Q*QJz1S<1`{dbSqs|Cdzm zt0%LQ(~VahLW1dg=0#1pNqc zL3W?HO&SGQbn0icPXUP+2wZj|Vutstvn6DrJKpq;q)(M+;zSmrjrE_tIIinpEN=i= zu1o~`@YW7|LhB+ua)4-O<5btT87Y|62q(;#J(CCN=(mv01K^3WkU>dV=6QuQ0)m3Z zZ!D0M%rgMuI`KS zCs*hL=?|evwTR<=!}+;dPP5E7mcFQ7WPhpuvHQtR?5_9NO98Rt*~;JkO!$(*IrD}k z`!c=SJ8YV38)8a;8_d)i$^9U%t_+=dZQWSHaf~jBzC5)-@Vrwlay7-fg(qoO`3Qkt zy;hjTr00S58w9gduk!@!WOi2136)QJ1-TUfsKE>>xK|KAAFtVss^X_NvTJaVQK_6k z)Od1iu9!il6UL^Ic?x#Rz_k~4poQauxQx0l(U3_m8>!Vf2a|EF z^Y)|ZFz^3n2_QI-xabE0i8;X3THDO!k*09~G9I9-DZL3!&JXY(MOsc)@>VMPZf**C z=mWBV9hubgRyNm-b=$@l7qYz~;=PWX^mrl4}#^qlUyqAfC)^iRdAG!Ed_|;Bi!8Uv5(obvIXi zpfH~<#>CN_^q2ha{3hYE9|P zJ*WEDj(ElSFwN+s0t+A`Pu390e7FAQujMv|GY)civ$@Hh-XS zM5WRFvS_aWbwwsX!Acr*L#oqQJKl(v+721(3tXBEo9=>kryy^ZKUla(9~eE}*$9+- z4f#VurW(e9wF(OYT>E@>3}E_~`H zmn#nOidY4y{J7*5H+X%-fh4g4@Nj>sP{D4#E^9$e0HHeA_&hGM0N^g^9_I85Sc3dq zdG(2x``hPwR|?ATB?zpaNGoO?#UPOw`nNQ⁢+)}3Y%W36l%>~3T{=L(&NC(j zpAb&Su4H9BF;?|D4oXxP;`~0{wy(yeSC<*x;rCodHWt4k12ACy4=#HK943iIa1_(w zIr%xHnzF7=rQFqV-rv6$YBQqqPjP&{u)*!}0w{#e9Uvy^2t!9zhA$q){H*$WUmeAP z+zu$bn55TLRm z{G%L3LVEQ=BYBSgrk^cyQO95XPriVZ^66l!?P8JMS~<|5`{_5KoRHyN z(6F7SZh}t2!nc%m2$~H-&xOtVl`9G-hD8s&Ss%7>q50>+6^)~{0XjrcksVtjyLFeQ@`4k z9{tw273-*<_l^a0*F7&6dtNQpBqebXsWOw|ZB@E!VQ$oN4U5qI9)0wD@NHtvw`!C7 zyv8wSEBQ;6&+q?PLZ4r8xXyZGjcEzr`53k^ymuaHB{~S=fThoV`I(Q!CB8SAbJkIp z7UZ^}Qi*1n`KAg*n+>E25vr(izfF(uH{Gl6@}4+Zj2QeRbCTvIn=6i#%H@?moUb@- zOTY;Is&*&wEst|rZ@b7>m?#U0;n{%mrxxwSE#Xx}rzXeCghgG8 z^5OgPPLCo;FJe&G?sOLm>VF4LJBYwj#tkMRdL?g>M8HF%_Gs&IB8O#3Ds}f6gyjP} z;tkVmisdXVF{sUTj&eh4bE^@beH>z~bsdvb5(~VBKi@8PQ2Z)(NT2!~+TlLz-dk+C zsDrEEGi%j27zs9C1R)dw=ZPrs1!%t$b~{>IkyJ+BAa?;2IQQ3o7uu^S2U8!zzivX(5tk1kUxi*_%+(Da^>UCU0={o)XBvZ)u62jlK zSrpXga|$o6a5)2r8+V%qLhlkQ>tFI+4NIC07YjEFJouA{TtLdKJZLaGz)N7VE%hk3 zOw5p9W&8y5Es@QNcDmN-9Y9;17CAi%0y%<%0m0>OovAT^Jm`IqeI;59^J1VKXsgM3 z{cSao-y|iBbKf+TI*e#>g7hvh+%6jxjrJ@3ae$^IWRmvD!_#_?n$p3^wM4ain+ASl zP}x3!^i&hFnsh&x4?9$GI)ZXvIL-FF0Lky0G1I!8#_ZVLc=L{X!m`tO;mmrkuRtdK z{^m$THm{Q@J?LC;0E?Je=_$!zeZA`Dd935=*{^f9Q@0D_$ISwKlrSOZSWGAdc@nQR zVa1cA+cumYlG~K=2I0Cf)J7$!PN$0oO!O@~gvQBS7t4kZUhBMhrKow;)vQKqg_n^)LRBK(mHeRnp{D>evd)#d~Vy*lo zhK^E~*Et4&Rj6u`wQK>KVgmOjd%{fz%-QkeE=;BBfUAn;NLf{05d&Va0I=Q1$)Fc6 z@BlPd#d@8GC6qdh-yIcxH+&+sQDdOJIIabVAh(`87r=#?w~$atULJ7%v^0>aZsICZK?|a)kKvVC90aLW&RFlqN=mOZPs}= z21~)9LRZie%HyonE;;svIJCr5ki94)yx|)4bT#lVfDlxBp8U*HoM8))O_M&*iW{Mw z0ln#_a2_}0uKO#PM_D4Y=v~q06usB<`nVe9H_cvB)*jHB)spi#{%My5!?f#ymp}Z8 zFV9R4)K5S&6lbGsHw}c#XVVn_`a+^@9|UMi`5-UMtxP?NdHI1nV(CxLi-Hm4Pp>Aw zWpp6&C-$ZNplrG_7I5e`4>R~k!iCGIC)@Lm?qwJXz|gaddG!u-5ZMj4Z2>h7+_J!m z!y!NZs$6cz9ndP*^(~__?An5-W+C*$#D04CK!8v=(we!TLkvB!_l;Vk%Ej?egkZvn zav(Bp%v6oNcAnpLn94<(SHLACvz5X>`~Jie!t}?`v}HH2ZT%JANc6wdAYi&`e<`SQ zR5v0d-b6S{cX^=OI`VdL;Wd9vu1_Plneof19+_xvBay<@CZGYm=bKvBTa}s2=h$=E zNLjU~DA$T691x^f!6-0REgXf^`{+Wo`iK*wIwi?HKOwkf?3h0b}7rPII88zSI3 zfN&+#bRFUJ;y*(Y5oZ2V4E2qKP_>8(F!KWVQ!j)A*8^ASFV}@H0fYnBKNfFMbA?+c zOg&BJ*i1r4K{AvFV7PPguvl&@ciGRK+6nA#LNr43#Vmz8KR zyj@n6%0V2!x@*tr{wB$xU?zU^Q4~mjxDfKd#G5`6-TB?tKvoM`u#CExB{Tqxg-`&X zsfOR;fdRy-AU%5WtiTHlv(72ENjVsv>4<7`Z2*wz{)&b~{|O!NAtj}LAzA69I=jz- zChrMLDB_YH0#rtjF34NJJ1Acv2#&XzG8#aB_ZYyayX>-Oe>*Kc06r!cw(rs&t3bT= z)SJpF_tdlZG$;ULPrho;?M?dshs`4rBM?R>TTz3~=)Zg;Xs{vp&BuVECZPEjWE`9i z*%u@GzHtD`z6VhJAJXxnX`Ab+#&!8`(d3>%!w!G8q=fR}$0 zSMP14_x}BVatNL{$U%ui9Ll#EP*eoJHqXTP`!O?wqN3wr*cJtN-*QUG`_c&kT((cZ zZAK1^A7sDiT+mOq{vxU)e~N5Kmbe+FkPs3e2)X|^IY#y`L5O7ih^fcBpiMS`%~++s zWD#T-X1~szMC}R}>E8+W{v;CDY3XKyLCn+wf;tjF{9i-?mN62rF$BqWpaD!84Z!dt zv>K}11q2l@DkAA_36J5^U$Vojeq{8Q6oYIKa?qmsIadK`N;y6_X%{?X2!B^31aR2$ z{p34O)An%^tD@~h4gy$mE*0*-Oy^KAAvC`L-|clyw9J2B{Aae4#*mv5ZeAdrz|{<~GBWf-2RrY@#RRbsouc{J==M&=|0d(=Rpq}$q#s-k(JA0wd$7A?+klK= z@F#o3pO+F^fasKZp6qtSH9VkG9{23Ek^>ry|54QoznLob3h&=yqrd>Frr$X@`-MFm zMldWOk>JV^3;ippU|xr9&kK9Hm|zsa@x*de6?vrtc2ZYC@|K~3oMJ$9->xg<*7f$5 zod12!|NXBAIYQqk;FVPE0G{G=XaAqc{kOvZ_a^|GzI7KP^4fY@Q?#SgO7$(6(ycA{ zyBYuQf032Kz-8WS$_cp{G4l2QB@Frh@&do#wuDhg=)B3%6qJ4Y6Znr`#$x@|red+p z^BFX~mtRN2a?QzEA=LBR60$1T34bBY!K^@7Y}L|n5PaD%0Ko>V!&Aaiptrh={#{!J znC}pF-kSxGowbXN{A>8qo-}WGU`G}PM*EXRDnQtERHq^IOnPaLjoS_C_ z*Lg+H=v&(F_+BVh?iMK`24aQ}giHIpsS7SMXwQ7{>z_RVg$9jeYjCf^rAXQ-q6@G@ zKYThE{(K77AX5pPq=(SsfWRuiDd0639*~&l>F>HI{~Q;vmfsi+Aa_GtTkB#*l3d|G z=KgQ~hN?f`o+OW5vJq2~7szCJ+3GDb7y{^$_-r>*-cqK?0IeSPpc5$@qSYO_Ic_~a zg!1?Eld&UHxbSao#(9ysXu=u8n;ryJ1}=O~rg*HBw6FvIsfE5wudb`BlhIC7IMg@w zUD&~)_i~oH-rY7XO>B6h*FVQEFg3B&_0!5dknp2(c^+)qX}Nuu3)i1dt=XCx6O)i3*7n&ScbV2FkGEUJ3087jhxQ~ z=BUT1=@MD<#>N3Eeg)n~@SZW@>?E(y(@GCmg(5mi<-M*pl(R2-O{#7ymc60>iP zh|fh-j*@b!lR!k&yzT?Pr1PC_HBn3 zJN52bWs|z2+MoR+3lLGl7{GfZS+xfw;*^c=Y2dBbNu_|cJ?Qg0lrK51>6ImO0LKpT zzMw$jt*?NkKNSJ9^Gxqp1#6r}-Fy}N{!WpE8qCyF%9njMjsG*;@NZGc@+QYJz)n%-K8*-=RF+c5m|41vcm9;DZ1Bb;&XkOJVLcO+W<9(Jx< zgYmB9ViuqOE19X(zqE?>RTC~p zN<|Ae311&^qu(0ule?)}|+7nE#a3Ju61!y#GFg-q# z&*YThP*p?0&j;xK{jP6Bh>uI9fpBesnr7rRGrKhd@~!uw!2cyIU8;;pMAR6#h&DHv zxaJ`TUAO1=qXcvA;zvY*cnzN{*(H*VAl7v+_mf?4@{UpnQ(JC-6=G&2e`zre4q$4= zC0)>O4Hxg@{cAZdf@nHh;RL8)ro26*EBapC5-4DU9zkqS=`h@?Ka!X(#0E{*Uc}h- zZyze>S~AezejeC8-rvJ@fZ^&oI+$-we@>YO$fhT5=FQP5<0;Y2tcxvdAxO`|WMhUI zu)J?JYQE`!LTo;Qdd_x93Wb+Pc%$8H!R${s>TgSEfz)v$Z1CCpy7F+3MQDO?sI@Aj zOw{RepaV>C4Kd*34VUdSRA&+4{MLQgDfc@+1ZW*x&&K*W2t?j>J&Ut!dq$cLeTRKRP}X3E=L8k>B_ceP3CQl8x%?WC|Ly)k!re0 zK`qnoM?Md{+aaHPI{iHUeYyT>T0a9_@r+lHXD;eUZiCLmC2v59CD+zacSf5ndcQ5A z{N5+095vAxc^+frIhLf2G*4Xh_S*7t%dwLXTwX%GXTOhFnmm~}n-9(&lwD@2$n_y! zR~*Bomzan>X`%kTnJ;kh&Dr9>ZpasOWEi{FB#ZaCE$ZA_P@w62yTpa~w2R56)mR(v z4$j_zx$ChMQJwlZ*kXiu@9fOSFNo}QQiR6o{+JO;0*!oY0N?8qa7Hj<5hK^;QIF${ zsBzNULm7$=X1ip_2rjJ1wO(UFVf5j+9`tZOhf(weX6{%m^7^#zIhT#SAaR2|oE}0$ z_}wQJt9u8gsiNsIv!b8avdH&LLbfklA$o5Y@pRCe&*hm9YIa!hwmdA{9rrkrAme{k zN>r4PgNL9hRUEGQa*5C&W1C+5&KFM?qood4Ll{}I1)y-TY`3^y3_yfB2{*{--=E7i zbN|*6l=^V%MS@T&$v6fl3>Epwgi7t`7x?M@dF=c5*UQho|f{I*QFsA#}-UwOspZ8EtTOtPG2>=}fEhG;ZuJ z;aiQYS~T6>a}#B4itk3^IFjC&%6iTxCmq}#uJMR)Uwcf<5>)**z zaAg!^Yrg-ej_^RtQ_}4buj|#`_se8X@9%=HtC!E-hGbUkb zsl_fu{-QLV&s3Z{zFK`M28RnkAB_^jKy^G1X&Nprf$BPN^AjU4Be`r1C6jq@3EuF`M2Z>cvG^iVV0^pk+8iy~tF61kX*~l|HvUMLo^UYR z-(}Bc1Brv$fD+@|b_}@3;2&ecoBcrMs{_smq{SWGV$lA2K7V*zU{=d z4>r{FVL8hLq*?Zx`miX|7JiBHru3Onkqx>?$SMPt^hRA|B)cI%4p^lYe!R(kQ;brh z%sZ+jON^nwmz7F2N!DSF&w3jBFi*wYOA4<<{|!M%1P;ZkFdW5>h@%PE0Bv- zh+KT!wpK2Xbvs=+=CPM5m_tX~1S z89uoP$wQ@TCIHvsT7sTTQ7_VK*{V4JOo>f%gR%4&IKg^>7VjurS-O&_SlROR(HpikJVZtBnf&$lkj_4pvx@Z@F-JvC13EGj(%hpiwGyR8jBYKm%T-J1r%l| zKJRD+2iJofay_~>(~A$bXKV%$557J(O6w3lnE=->9|ks_Cblo~tG}OH ze>rV>-)TjxpP!v>{Ufk%O_z{W2hp!40(UGzDz^?h;dS@n3bO-|Bru9 zJ%aEOhXDY?Y&R>v>VP3+Yg+=-RW3;Cz+-sGhsgMBH~9>BQBa z9VfH4E;Wd|T#y@0xcMFWZpzsVBBQUH3zw=zK5ZRuq!p(@-)Uue0DVdwi2ETie~oGq z5uem&`hqv`lYTRWs|@};zpl6u_tAd!P5`FyhgGX*OIh`6qiT}=GdH9QP`>&%%v^{% zx#?BsJNK-Q7If6VG-xzER|tDLQyu7ui%;f>Mc*!hKt0@V{Q<##{#j~kB-AxcUo*6A zVcoJl@gcKu5YkhwJ;@!}NH6S$r4tcm9#l9?aZhrypnpB@3&oK+(~Ze|fz74h-x z*oh)EU2-J~+xf>m2~}za6l@S6%^Z>r9VZwp(w#XA@a=b){f{{drchvwz^#ozI_qoE zKigXon2KiWu5&&~@f3k`q>lcg`pL<4_tXdzeqwJ?y95y4%g&2M|*os#qSK@ZQ=5tsZ#?@<3;;RR} ze8eo?$w-EuLs5aY4a^%Y6jq32L2LrI2d+)ms|Ht_N(kcg?25P4Ght1c^u%Khew0I& zPvH}u-=;b}nay4(eo(C2=z#e^I)HHl5+AYU3E_JZ zK5hkrmBq`-FU%9NOw@X&FIS+3HSZG z7TUx$_UkB48Dmi!uGAG4C}}tkzWHC|J`aC(N6j&e{?R_3+R@GGdW6J2Qc~?BOn*I;%a(#%% z%*Xvb6qg>4#f|34s&wSeK;u)!T}9he#%;ET@1hTS|2Lo?!}^tNvNaa2^(G>AC8dez zYDGRfy<^;!ERKn(Y~J6BGGpr37Kg*k;~Ko9+5$7#PcY}rxqOw5n;FgJIxuUeV{3cd z6MNsE+{q!KW!mJ9DGP#X8OKYtF_^yBTz9iRmyFQfOF77+ASTh7hM_853&LFuejyg8 zyd>J76DG1wMH`RDQT!fW`UR>c30CNk0nB_uJzHolf+^e(1zH1D$b@#UGC@SnizvQ; z&A=?4-3n=>$*eGV^A*K?F~bAWuZa!C`Q6aQY^nlcI_?%i5lhqv@>?L#QoOVZ6efx( z-9Q?QNas8vRrFwd#P}4NH@8v~OZBy+oyQxXDjUq&DmlG#$N~`8T~H-(mssvPSG;*o^r1DpUj9D3bY+TX{yrb!YX_+q zJ#b}P_j8_YaETXVZ*msm(-V;Pfz-A}u2?4Iq>m%fiEcl6TCPC~Ew%X^oIY z35mLnUaGGLEa-xq5~9mw1fSiXGz_9`>SmC54ErFJv_h99@H zOV>D=g4Y9Nu6Jm5=UQ^=dXswZK4_Iq{2I#GLri}(j0$^sF#&aA`{T5fvPTAbpEjj; z1(D*~M6D}GM8mpO>EQ!0E{|@+2hZMwzn^%^@oZ1`5X}oFu(B%U-rR`0Ptk$~xg3Sh zLc(M6fRgZ;0;hqo!WBtAI9#@?j4!0{=n-OG4aMD;Yg!bPjqRFaejq#yCm9S>!kG9SQ2x9IZQI{oka0A zide)>t6NN!(1?z-9=Z-wtK?xWV{a^mKK(?tFeV$9NPN?Q_}(kks}l_~Kh~NRV{S>mD+*L0$<#4d&&{KfrJCyv zO4y_hUz|AG1cFKDR}a)^UtQ&$pQk}{4-4u`NJygV-J7G|S}di8?Rq~2v&ZkUE91%8 zHz)*aO41~2v>i9b`XRF*eboB7a=$9gMpS=eI}UvxCXZ|m`=&Q;gDmOqEgVt zr{jyWo!9UgPUncsy`256H^j$v!)lUYN1sfFI~W8rZ+yxG2*~K((cDMF>GnBFx#wLr z9`7RvNjA&FuRpG0?{u$@$h&_JRa`^CHA=9=PDgewnru$)M$xd`ICGIkxXY6~5g*y7 ze>nzWaJdPgZ7I1_sN-t9F^G$Y?_C?K)3@~9J?^W;jY-e!gT15#c5Z=LCna$TWmNTh z2P7mnosL&~!z^)Pa%qyr*a8SW197|l`NgLOO#K=rKGHQYvuTgB{cJ0_8}AHH31l64 z!WxRy4=kD3J#>cO!cG;lN$qOtj>>?4_2g!6Ga^bq(re?;%yg(QV~Odr4`UJjKq~Ud z&a4D_muP0FEft{6D4-#}z6!=Ee7gOfX|9trNo^&*WYM?&PBfMLI}|62$tAY@8Q-V6 zb(0PGyE7&5jprNm4F*;xmJ{_O43#HhIC`bBu654K;07$==nm9vvq;X>{n*^`hMWi@ z-g`YU56uJ%bv4VZ_`^t8pH9e;WJQo%s}i+0$}LO-dbE~6ZwBcjaL?A=u6T0(Ou6U| zMn5g-V7|rZpc8uaq84zO6ghwB607%B#e=B|llH#5t{wsqB81M-B0W)a;;)h12cwZZ zst?zD@fr17lX;!9jslRdga$JUH{bNQFLt21{P=+xuaJ|pl=ncXhSj)hj+(|}3?F8O zZ$SByjb0?yydpC*2lZrk6d&Xfpg66k{WjP)C(6=jBn4lSj3icAJV!5cnb+w`5cOv2 zil(_)s8Gf2H9l*bEGCCd*BW_vY|pRh>Q6FBuU3f0Sc3W%_1-BZB>0S0)_rA*zw09` zYS7s~twanc*dyE5kg(c=ysi-ZXn+@e@@OUB}>>vbm4Ot9EKv{h;jD;I>xDyDdl#A_(85QVT&q{ zMWSd!vewH8L!M=4R9Rl#gGUsne&MAiQ0++j!ngJe$2L1y*_!a%^6*MXdtF5~-HUa# z?=U0wyo|^rm?I&@-EVr|5~x|!X$_|NyrzYrzPJmYHv1uMBq3_8>Y>{tJ}jc!hpxI% z=_OD2&zY9^bZ4}|t;?w!W5b&SpD;4BrGgpTa}yLT9%e6xka>Q+gI(#$of@*()#_?; zmU;D{yg+~(>QUuFO#5cQAP$FZ6;kCDU8lt8B+p?Q!$cKghTdC)lzpjp*iR~=?>)3V z=4+&e#yX+1HxrYi~bm6KWScvp1cC_RzbbuD?aDHOiPgEG4SH)YUhEd0%h2i{fncs&oaTJAj_XFY8F zAwWHc`YR%77&*o}b^kDnZ#r~e(d~heRMJSJdq>IokIf_ftw}?wy;Ug#;}K0mWv%a; znqL?^NY%`_jARyOU?+yEWrh%NZ47VRKQco)TuGF(YH7~0q{0v^N$uYK?I5zz`UaXT+~|dL+^e;3 z$*VGtC7r2WK{5}$DXi!-4Q=@6_g)jS-Z@;G!QB}7RKUG(MjbLB@rDrj;v~p}Q?w*f zVuNJ5Rs;Z+Pes9|AR$&Q*x8$51= zZE8*eiQb2yi$_~TIBiZFVA`Q5LRkI_Gh8^K?7$Ng6K`Eo!`)BS$+{Aab?IZV?0b zEjIf~scBR_qwJ06?qpcPf7TuC*>>kB&3RzQP(ApC#t~49Ll}gM_y0HB#svM~ms)&@ zc8!r4jq#4Jy0)}@EhI^7y)`tiipGRQ6vQ`tI?Kd?RkLU|`Ok=-F_o=SdV!=a04pH%$~24^L1Zn_44N>3YGpN5cLO5(F?L)!7!fSDb>OlNJY zxAiW{TC-WgB@VDpdh?&^Hx%MQdj_ImsyV6%G;@eXZCQ>#^LxH)TRoY|YyDOUiT%&#c`J_4rGkF|~(e&1}+I z-SE&!QrI&y2?eMi4|XT=qdvtHam5t3mV!A|t&Hayfwv+Iw%5;EOq!1@tKb_MOWeDkaTX=kkP31-s_%Xl1A+4~?1Yqw{vq@eZY zSDM@Deh9yAhX%hne*yaSLbL<1guBuIMreXzi9K9If0`!|#mPeL+u>K}Tr0)Q?#e z8a3Hhq@ELmq1=2Dv`>qI&tWWskuSJ&Q^}Ps|Ce+#BSe{`w(EJk#ohk;Bx``e(cS2M ztT%&Rz@0bmrQFvihl?&w49-tQ`Xx5MXW$d_9o9KGUAZ}?>CeufF*mrMpxr^ezoSB1 zh$f3dBS(Pn0PCpZn$I53?dYs!=V*GQW_z;27gBe*lkOzC-5HxD9gFo)H6NKI!hLnIv%P%LeCIKu}_ey_>p*?JvFy_+`>5x1Xr!-)cRi!wBwo5eU;8aS&byYiIfCbLjox zYYdZzlO7X@Hx1Yl(7B!_fjeuCv0-pqH;pXKSNKqQ-|A|auxx268k{L@EkX9Ws8xwi zt0Xfxu|2kCNXwV+=56-I;#$E;6ko=BoCo*3l(m9qW|VXIV4_`0_uHWmRXjWGsD-GD z=2e-t+!H01m>v5fNdw=TA3O--;Va1doQG?#kuLva_y6$qmO*i?YqvJ;?ruQ?K|+GN z6WrYs+`X~JNr2$)8X#zb1^3|YF2UWUf#%Gtz1}+S*{U#a()8-B%Cch3_%3d)6y&G{LGW|_YIe1kP{B|W+Vc8}Nixr`{PIFxa9gW- zCeB)dO(#6)(``w-wKsiaxU_-muETojyZZV5|9D6if?AElcw$bcrz-{`JDU&Wff*W~ zaN!&@G8fiW_)Av!cF4Xb45JCFWgQRAWpj3qz<Op_Ly~7KDttnNi3GwoDATMxE zvJ}7G&Vlbc8efN_;B)G=89Mk}I0+NdoxUBAR+axsCwjk08MW}{vx;oW#t3EU8d9XtZ4Q#8^hZqk&5i!)fLaek~EWdMDn^R)|Q zsW7nTev2CNJP`5B{~Ta?l@cI2@TY~-_UfpHIy$z)z!qI;O3NnMnvb%NO-1F#gAj@LHLrT0<;%q%oqwP!NaIdX3X)i?Mws zRN}@%xx%+c4R6TQ?*UUo-QopzfWIjiF?k33V#wGS0KYmZVVxkck@#>M*;W>MD-4fn zlvd(C+uz4x)#8*x5A*^@W;ZnW5nevQnMrnPh+A80P^<9@Xb!OOdb=;6KOnWkU&aW1 z3!;-dYp2Hv=-7s2%UWMuaAi>&dl0Qv=*Cg4^}Z4jR#|q{y{3-Wiwgi0q|*5sE(FBE zGszN_Y=p254tlLLpm|KVaur|##OgCVHrBzPurW?lw^v~M#&XC&C!>ikzMPq(aapI^*x|HhX3(?ErP(z`Q zLKGsd1pinWBESZT!SWye5y-5VneQUdiJ|clGJwb&Nx+V<9&m+8uLL1n>3D&eb6P;S zJ7whv-W$t-eYo6p`3xFg^A|JMsUR9O?~i#CHkix7(fV{INwcO!j6*L{FG z)p)jm2#<_i->*Wo;Ra4cN828cb>K#cp%jG$=kQxNl@K`3MVB43tSYm)vjSJlmJfsY z;yz(Rvm0ITAc2LsN_{b_Ywxq6I)@NT8Rz%#K?3YIrkMnTgMttr&UAZhsl!0yst88o zda;#sn$=wYYyn}l5e(8P1%H`>P}I%pj+bx;LC-A=;>$3_5dVEh@d;ac2h!nH0;3XI zD!WPe8-vE4<;HvQLCfVv$54X7#HdluvzDh>67TO+q#*VwpaB|)I3eK(36I~p^!>1T-{+zfXbe>f`sUt#Bn)L6=rwk zNj$vubK6s7gDCj^7^=1%6Nr|%<0j+ zKLMdk!q)1+v7QE|N;D-lAeL?=zCM}f&oM>Y^L6)iW0VXH$I!K7_|m=Ty>g0FeuLM0 zZ(+VM_B`TP-p9K6h#VUKISxi$U&E%1y@pQWOOpeW)TG-00_xbM*W3i4#Tet!vS)Co zLc0*!R2YcPRTxkqLwH|bj5>Td_;U>V5jsNNN34Cb?)4Q_xd7SsXY)hlM1Ikf%#4E( zS|IrEg)QtjW}cxwB4(kawed?iy%U};F{wSgCza$zrudsTh@mXaWr8a#7?DT)hwYr>}ngN3@u5L z`dI3?*~Mkob~*cY%CcVg50kyajLG+c#ce>!Q=c*&3lkLxiL@=q-J?8M!& zD8(EOyD+4=4a{^gixm%@^#QkPl8Ki`64Lvjo#pB0D;ZsHIh&I0bR#1k@XjiAuse4F zv#9X}g~97gLL^O(r_pZ#rNfsnk1((&ipaJytz!tSwEvE?^<_fi0M)M~?(w?1s8d#y zxv0ohC&>6{gS8BUNPSa8snGJ;x6pGwKF>Yu8}^bTf^k2hBEg*=|byb`nfk~6j;Z2LPD;obT>9cEeUd&e@yx-PSe5i*~R)LUT#$q z&Ovc8*1-VE`VB?# zn2KJ0_gFoeLevi*xwrB>3a{B-INr8|-qb$Vkuq1?PL~%_zHm!!j>rrEP^igPXIEh= zTf+RuHVDdFgM4dC{B3!s(6mVmZ)&@sb#}gLAcsKwicAdOUeu)T{RfY#vAY%cOTh$< z`O)(Z^6xScL3jaXf!~{y;#kVm%#}jesaoT%Zu&pj7sj9*PvTjar_z3bouYr+y)4FR z*w^5HLz`{afbG|iyYUCD?ao^N9Sq`utDyJI7+QIZK>JnxBSdHcs$f~zU>UbWWJK+i ztH;1GwRg&wve;k2mqo;s0_$4D^WmlTTi%`nMIdNAa(roFw`LU=Tlk8n+Q%WLDtAjy zNj*~`aRBBn$6)g=%Ce>Zc>4lVRS*pG@qP%U_iV)heGK7Kcx@vE|9-ui2_*VHmpqRS zt+bXtbs49gwAqunP1tJm9Mbj*z3r`o0B-brJnH3E{l)tR>1M35T*E8bfh$BEr{yYR zkZ=>@l~tY(PtZ#0;7Q!2*!)!45mDJ`Dg_#n57lMFO*Iv!?QY`Z?|Hr^@r%u~JXJ<~ zMTe?Zdwan%$r1$pgVXgejP1!opIovzk%rF?)q;wmB4v-nH49fu*9t zBiSup`P$h?I3&y+7$xE`Ku2ZJ{RQRGRqvq2#1A*#FXeoCz)jIM5S`%o9`Un)`eqHp#b$7E^aB0J^5dC-r#sK z%H3I+lqko3(5Vp%NaOib5MuRc)N&^Q)WsaH7qb`+;`N!WjEnLcnv}fUF~vprx~#qk z`Oqh^8iWTc5Y73gb}W?*aQuJR`ulhP<}eP|fdPEECR8b3vrVYCTrr>kNou=9KDEBD z^0BHmJ@^xjvR5{c)$L2B&6-ja`&gTiNn1R0)g+hO8<(Aml;RpAfvrB8s6lXK7_JtGoD)SkjI%g`B5jc71;UD`C~rs*-HZW?4#ilb6U3wC9%J?@!VLn1h%XiY_a&-ZMh0+ zO;l)UY(YS#NRjQytS^WYchppzi2BIi-%6Uk%0bfO$5&70;%#{mbkpTrnY3R9r!O|t zqk)q|$i3EIEz9>Cwad=>97f{=f(*lx!6asDPD4%u-uCE=ljtpTa*C@K#e=7tf($-l zCQEY{&DUCSRlLiH#VR&DS;ZL5w&8I1yUd})yVb3al5xs8TXrfMuMM1F5Oy-VvztDJ z(y+;_=xQSewJ{TOqz5(v$*jhb;A;_NpPmjUtwKFRdXl+dcaN!_516tp5Z+rOL5`Ea zuoB{hMx)9!jQ#wIpH^l@eLekuY4pxaXkpg~j7dn0`Uimq34L(RtskR zXBVwJqhl$F%C={VrI2`>3+70}A73&@1^LOb)sZzS&Hs&HaT5{Bm z`UX9Oqil!$CT z`!7HK4y|t2yfm@s_N5qjvtDA;=foO-9UdMAPk~gUA>=MP>v4h1=b~gL{dfTKHBa16 zm`H4>m*GzHFxm3XF{XTW8>jnX-S^P3Ar{!q3NLOzZJ?ZKNl34nDV@SlzRVN$F`c5@)sd3F; zB&-Fxfc9MDVA9`(pa0&rqXSEONILj|PBK|fSmd}m4Asd+wM#`amTWfh&jsir2oE!&ZXm=EgY>};J>i)bh22U^}Lw7-Y1ZOr+ z+Dc>p&7rXUnzTvvoj|2DM#&l28b^5<6oloUY@CkuY!Q!;Pv>O*a9>Qg#BmSP_~P;L zKw)_{Wq#VuRX2OT8H5ii`cxKww#k4MEEfnbgtrNHibhhIFr_sUek~Isvyd7ekJ7l6 zb|+?j+~kl1{W%zoE#@D)^rS|-20c}Utlet4$24H7HjhEnO#)Qf%D9JOI~Mp^;fpuF zPSP|;pIC%Ci%GZ~cLo`(|8aUu&1E^MfNO|n`VOlJ{Uy?Rf?d}!_)bpojoe*|%d+}p zo&(FyGhCa~YyPe5p5p?_A*m2;h9u7GWeu;(N1Toh1D8P3RRx=Mjh6`ctR=ITnHJY~ z^1W4+h3H-v!ZX2QS#9b~A}rDWyx1f`SM@J>ZrOSt35%Q-n=!*N$XPyRB5Y;yRwTAM zN4ef>#I;-Ov6FCGmwqM-c*^IUDL3eEKN%dVx0}z`UPYn}PT)OxSEkVe5KEhJhc!A ztx$}H?jXy&0GkEjoAE>J8fJiwsBkV~-5-7j@I73XQ`N zrYC-_E5^G&w(6Vr64*8M<7~pk&oo?AVxFqv+qEzBPt$06jjMIf%cz%3f{|J6@m_#hb61$Eh-IVC?VHt3SeF@RN;qc$gUghg4U$F?uQC&zUbB;)~)h{{|_$WxGlC|oQU zAe3P2^oRQLbYnzLRlfVf&m~ahL|NS>ms%E(2Ef&3n+?`*LOdX*CdunDFSb8X_>126 z|NP><@g(87HS&T9aSao~w(4c-!X@fL^7WX^)f)B7X=f z)*Nv>WMRm|?AE&zwA=OwE_*V`sf3S-9&Xo*)vxfqFRhhpU8!ypxkfl3V3@rL^Zl@0 zrn<3QUqSj=4ddyXRKbo0Z!{+=Vt-v#y%y!@KI{aVmopAL=ob+WRh!qI^3i>l90mSx z{|jl}8paOArI2DOb4X-RXiW2>x0Ck4WIY>5^>ILrL}a^&`Y@x%dzD{ ziRHDK`LAni1nwe>xD-imaga+;UYU70W)7GfYjE5Fkgv)z0b&1$rR6|^VimjtxjHt9 zYJdL8OTXFb-ej3?rLNCwWl`jhOjKJzm?UI12G&; z4#Ap$Yb07VD3a5=u1$r@*|Kc29uv@A1Wh)xW}i47JtK`mDv2q9v5x!KpIkvT|07nd zt)i|WZ0@6Avk4&{e0AdqI@GgUaPxpp@{_oVK|kBBAADr!B!Rhl{l?if$qNuvH=_Tl z>VFnt_rEOYDaJg?&PAQ7N3%;LmP{+7f5L6h&SUE4Ggy-dveY18K+8|kwm&!)`tedS zDxSck_T%JeDxD@9A&J}IxAU)l+8;cq$?_COPU6kwPaR@v_7)!*q;juEgbS)31$?5m z_F|aB38@-{%U)RpyX!m@wlI8Gpyj+_0I#7V2U;bl3@f&hvb; z!=K>^QTPWZ87y~Hje9cAY0cyibKwS|-T}+*_)t#-+#7Nw=pa?ug5s4KvnMX`-X7Dt zm7K_ko81Wv3TQ|36;p&}k(_nzjB-q|t4(TgtF3zY1JKI1_eTCzo~y|;!U`!Pe^F!_ zF5ByU+B2Y;wH(#!Y=|RGA@wyZu*4~7Qvyx0-Uj0P_!I`tQ%l@cuS3P77YjO6_Qf03 zZ)or>sWvQwi>45n2~cz@*8eKte01w@5T_ltR%zwL=Y3w|9vym<$P_aj)!kk#Uz@Vu z25qfVa!6TXH`qt2r_tZ^hHpXC7A}Kd`!Y!2P3ICc|Y|$4< z6wZ>!xF4R$rZa;p@-4VkXVPXFZHpXKq<7asXCoPI5OxW2Yxel+eU-AdjC4z;hzOEcIVSr8 zbQB=gD&`%$4QHA@Ha>VV9GLYRGX~-iY~}<}GyK7RNm=r80fdP2-^HolJgGq~2lYd0 zc`Gn&UX5=~DVZEv4^KY7-b1*KYhT@^%|_T@U1}jtefew{yZ9@iahK(*|6_s*T%dHs zS3dtbSm7ky%|&wf6|h;S&Hkj@7qmAZWa_ASQji0BrA;+9@VMd)-x-nyQe9JM05=%a z4wylB2FpdqZRzJR?#FNRTMV#a+V(5|&$);%a&$|)O*1`u*~ZSHXQqK|;q)-{xYWzHV0mu%wV8zb z?M#Dg;+ta^yYQJ^-*!m3L7o&*(^{Q_HS(BW{+`k$)D8Y!MrEmZR{i@!a)qyI5K}S& z33%#(@0%EKFN;}hjNP8HA+}kIgjomXH(bTh?nd9+${y+DN=U^3V%FPk`r9HmPgc?q zBL@^~%H#FOe@5)(^&f%b#;VuUV`ZZGLc~L+VaH_^acFH*tg_N9K+S`M%72{8v$}Hl zR?OdO&Q!YIXj8s~r|^gxgmrs|o%QgXNB0hz~>N6}t>T+f}Os4caiuDpOx>qJY7C2Y?Qh?QXOm!*fAyO^`^i}6dv=ww6#8^!kD3}XhM;06Z5VMsg`I6>h`fhA3VGyBA zOF|5fD3$uz*nIzcOsOGtaAnPlQ!TymbU8L2!n&-=z~IDDi*|qAu`{RQB16=g-=K*- z0T)&&E~Zu1Te-EQ*-uPr^os$837uI>yHx>Pw15N7{w3)PEa(;c^VdOI@U~WVYjk3K zTu1Pb;GIb$Tg|2vXOWy~sLX)Y8|SlHI-CIE>XnucxZV%_$uvf>_@J5g%L%$K1hZ73 zEQ=GnOhkuOx0s)IHa?{uKC`{Ohs(Z+x7KyQ_d@6zdi9pq+Nb6@@br|2mzUdQi{NpjlrX6R_}y*#%t0dxrgG(}DcsaxhV=+Vkr41!FV zk;4_YJHU-KtFlF1`n}#;yIGIAdo$tPd?HPaYPA0Yu*7C;U;n~OkkNB#9e!!A?51y#>5xcs&FJ%0smP3f z!-9$et=!$+jkt^N6tyZJ;O9=%sgQ-{pX$tZBwicTJBcYqtov}ON}lPz1d3KSUWl-H zNEBApB9Z-)f&VwX~D^bcdx$?j+( z`R-^sOZG6>j|p(bTgx?9P^UtIgK34P;u6F93jw*!$o)Ct#ReQH{J}>mE zWMK^JR3sEAq`yTRgVxM(tW7@SR_oIM*{Z*Bglyx~G0Hrxs9m2lvz1W!8uK0716_0q zw&_X*rd4NF?aFYMKTPudb@Q&ecSca#l4is|+5e5j_`UtNXF=-}par||(qm5L~su4A?YVWZy#R8EUlx$MnS*ZwpCb3!yo-LOx2 zWJ%!rdvZh-FnQvsZ`9xL-N?iR0LkrUHVu)6{{VhjQT($u-{) z5!Uv|D!Po!f09aeJ9U!Ly@_~o$>^Q)g&?$ChpOz@adE&nLRnKaj|dm%FU!YUo@f3^ z@@{*Cf!wmb1hzg$%+;BjThW<)h-lrq)6Gws)m!3M9noj2CH!`p%%3pf@q&G+-~n0C zam<=Ru49X&`rYnj6ca-HmD4?ug1ERWTQBJL;gu7CitY>;+-eN1K?U3G{*Xw0>y2Bc zxH_j`Rofl-J*Hnvym)rBDCy%7V^U_BJT4Pt_Z(^sJV@%uAzy}C-*?O&j6=+dIVlrJ5ANPSQZ_Ti#d`do>1Co zU7to0Z@wcP!gX%UnD;*~lV9b+t>_J3r6yyi+IfW#4qPmLhZWdFicgl#6K7>pfGx=)xP zCT1{1}li$HvTVW9fbZvS2Ih#pjKDX)M z5IRiGS}pg1B+aJvm$PibzL%e+_O7-237wirzNkLqIpJ9Ya#AW14+Iy<&|jxX*v`-K z0hIBdQ(D_|#ns+^84k5M3dzx1vM@>v6y$1%KX_xFcdjHK61q&E#(4D}?C$s-f!Xk+ z7)6H#{0W>etkgePcg&(#n@+s}8@V%hNQ@S^mYm$+3KMoLT9I_sK!|J*$Lnck8}Fq? zu$3Z(ec0`pgX(#1$$INs3nV1wT?;&AcJlTxzW2|rnv0oJ(9lIo-OyLv&ANW$>h^0z z3d5~rkAmA=AbcR3auo#Yi~*AD`)Z%(V3N-gA!LlaU$MfRal)1YdT)PWBg;(CgfUQ> zi|K0#YM=|$J0xwL*Nsm!&U>!4#`>{KN`6f&oAm-a@yh6;JTbQBPno5Q*P1X8}m8=N|mRBWX< zfZ)lrmS!i~q))U@%!KiF2rhY^oVCx)uWOi#?N*IFjgBpz1!a+sLleBN*%}}v+*E%s z;2rF_G2-UWK@R4jB-V5Y2TPZsyO6v(;S+4;3U2Xc9uD$F$&M@xPrB6ggeIj}7 z4yFo=$Bg%7?OWOD5O7AaL}Z8lyV7hE(2Hcp%czw1V8Q=6>KpKr}PvJP8adj(`jF(Ddp4-{*EC< z1eTGd%w<38HThshQ}U__WbrtVEz|+5US8|E*oR+97Xv7YtX&r0EuHKS!(mo2GVhZD3LUcfwoA^RxK4lj(vSXYc{oM^h=~An2ZRMDgcS@qr>+uIIT*-%4J+#*dx2l(V_$Uph2cyly;ykuuTOV4W2N`9N!KL#&+C8@VdHeIPZ_ra@SLf{Z z>Lp61sz5-O2X457_R{_wRo8i#Qh!0ra59bmk_f)N5Q9m789p4FW8sX-XSAA=JUwcu z>_&FWAepdjSLDA@fb5UD=G9lX10670Tpznu)LP4T{)gBats zdG?P1Uf2Q9XIcnTym)~4$6vAY6;J@RtRe0bk$tk!rgszahc3AWuSg@I$gPY zopCjq4{##&H~Lg4$Q)@N(i#402yCqe)BFz;l4m2@ku)in8YIHVi8%VK4fHPBe@Lk* zq^9Vt$HNNr&FGU%&dagG@NhTOS><#n6SM~t-G&#b>BkEXE%f6v^c5Mq9nQ$7#RLDk z2fsp$zYNxGH4X}f&|hTn=^Ux0-Re+;7>B8?$Qopc4rWL*`?W{Y^OzO z$ibScEQ~>S5$n?G_jQSsHK1-8OEa6$UxufmI@9GLt7EHLiGY5Q@WG=@c*qbHmw@BN=d3Ch5ArUUBJ%G8)2kwU> z{;{g&d!5>ZS7gUupK&FAsm(rB^*LuHqEOb$j98UsjR5heJ!6ji+R1zM_V^lYG+!WDtif@MI z8`PX2(aj6tGw@FPWO;VtB*MehvaxG3RAG}Ci0?FljMHc4dD;vfKdG__$_jMAW9{;N z={}yQGjag6H8HYapZ1Ot?D}38!8EeJjIP zI~gV3p#Z2fAHn+!yCeRXX*k2Ug@tKBrdmgde4z%WQ%v%3uG|;PIS(6MAV-J_NIbKu zn->^edP{~Jhf=tRv`;0zP;(Pha{_*AaR|o3`mz&e$wHxcg@-(SpXj7}Em|N1HI`un zc$ETR?L_u?@HVQ0NCF2N@jhl040Xv8D7Ba#F02K_Q3|h2b1F{ zu929-xDx={gAO+qYxl7Fdu1wgx$+hEVkT+=gntYdGK@l-ftB@X0{)C^PClMJWrRiL z(tU}H(P4OfQB8`zzbp7TouCpd(ZE<|b%L##8ZdcBR`WOfX|uYR>yI1|B;pTF!eaW6 zgmZhM-8v{J7;)Fa_|K&vO$fIrwOW%9*i=C;Jzb^5xl&nWh{)$jlq3dhg^ICvS%~64PkhE8G{Fef2 zGaEMh@7U)3{l86-hqoFyMdP-P;Un@psVY878EXphOs2G^=Y9A$otTV#u6hAx#=?z+ z&BT`U zwIbXl;8=fk&hQFjt+Z7rV{HawF+tFYV;dAN#EJIG39_vv%4UDY|0;Uz+qMI#&!bBI z<*f?cMcW=m+!P~*7cV}{jv6Z?pxhJh&~ePsTlzmy%TaZzGu268Ai`tlIt3T%Y&Rq@ zRTBQ5)u{^7Mu*#IR?@8&X57-@DEUuB(2(mQWZIL!w1o@?H{PKmKin_!FwsC zOf@+B&G86vqWso6Q^&!Bh<+hUxy?BeU<6HQ zFk;2v5aj^zuiaI0aUVb^cOJhLE|UdCX&7(+I9t$q^#mHGxj}t*9(Gy5bD1LYE5ntt zJoZ8W|2+KP2y}pz!M&-rlt$GQngXDVyA$UcqbzeccoFt)>MCj|rfIpv^oXnhj!mcV zUtSciP>c7QV*#VezJr2H2;Kgq_6efF**oC#O!35aZwWQ`Ln!-f56`ANm ztT(0gvJ{46_G#Nbp%znYkERJFhyMods>B5w{@+F=TLmbAV>-FY3(Pi`u|o?u`U`we zvXv6v?W&(S?B(UjR|g?n3{eLxG}@x#$87VSwZ+`4Sr=PG7Y#4ut|`gL``plS4b2ez zP@3%4*vAaf59>gBq()#eU!8piB$4f8b+jYX5m8cIkw-{u+mop3BkH4R62M1d%_(6$ znoc9k?^ClrWl$#jp^O=8%)7*P>>z=}?2V5+@$UC_DH5GwT;|u6fplJGR{^Nu{Evk3 zg$vb2;f&R}{qH*0pda{cR9!DNe_F|WU=qc`N(%}>2+jT-1bec}ddH`a+r|nI_x`_~ z?Ij7d96m4c)VtI_uW$+^NpMwhb}6vRmF$(%>3SVng%tye`W(TKFB+{Jw$k#=O$Nb( z7n38=X1DLbcynH>FQny4WZ=-vq}Tzzs!9nkVWla5m2-oCx@m`hZ-O^r#Pd()W1bhw1IeK^)Moa~&M9 z*uEZ1A-(B^%@Tmu`x0oVPArmo+wk`CD@`)+?qn0Qb=KOB3{L& z7C`x~UB(vXc5Ag+z>tuZ6Js@!20K@2yvfFVb{7}e`24xnS{S#ev9F7gNjWE>pj5qy z4yeBB)pMVMKA;nIU8mO;dw9X){B>cqzR!l{(KL-y^7}DsR%v#9ddi`=wraHv`qhDO za0r!Oo-dn5#};qiASL<_^7HEd2l;scX763)#~`%DJea6&2UV1!jBoJ&T^Rnv0j0Q0zBjurwA8PrBVe zFJ1BAMuc4NZj!G#rQ$K7${9joyd2&QU@3av-@kzo#u!R3k6yd+ZMvwcW(n`H1VnTe zGw_bl8@V&}-*E4*_@M2dB@*O9Enne;l(#;-@eQ9p7omWW459AzPz<4N@Wc*Z_#WFv z_r~}wtkd82O(5`n;#n6Y7WLuBE*)1T&1$-;kV9)7XH_>sMA~6RMcAFQYX?uojPpRs zHhIJ!AM))BJ7^iGW^c%`tw;H;5A8D|H|qd(a~9GTJRrJ-{*8%mmeB~qqUBi?5?|GQ zA*6Nx9I7Q~h&pwJ?Po>J$D~YqR(819q+g)3-{BZWP;`Xal}Uga=txLlw&HSayCT0c zt6Q`=3S9J_->h};rhP-{lUjii6_xi{(|z?=wGfz*e3=b$CPuWQ_q_S-nftz{^L-F4 z!of>ht;8U1Db5`5DPn{^Rf~LINwjNQMF&n+&iTcunGjSbazf_t_r~FKip~_{s&)vXW6Kbc1tT-?``>peUssj$R4!=ykE{xe zI;!gSIUKT6d<^XN^&-k>o!Y>!r!iXIePWTj`}SA8Aaj@4D|b*G-B*1RJ2e*Csl_TY zm-1yREw+|_TXS@m2G9`PXAk%dh#ltV?bLh8)q+*y+iQzc}?yyCxX7W9Nw2_8V6Zf{Ow_(s4lkh#0{R$NG*8+pr0RWES4F4_a!615ngB8 z?fJTsbZIL6zLdUV%R{TacX_U}8Mmu;Y6#h=eG?yn;C_VL-k8D>ux&5I>F1}!IiqJ% zK5u6G^&bjo>OZAi()gfOI$bCI#;ty+fJ+&s=eC>Aw3Rvq^}c8iHMu=WvE2KC?EYh_ zK+E9%YDAAk3cz7ZM@n&I&(Js5uVx%M!f8)Al1l5+M*;unoO z*n~&RUyDYrd7B5OTqR>$j&%bxN?)z68vUtZX*p3G9+4BmZvv>mZh7)+6M7;o-<_Lx z7-7@gfH4Z3NDj!6_S^I~XUggjZTDu0IS-3vALnv7yDQ*_6q23P2dJvU_ROsI!Ysj3 zNgCUYeNQmgznPqhKfu%!RbCN8=w+DH1V}Fy=J5Mw(O&c8Wp`}vG)?uw+?(PTCF*$K zIkx=t(8cD~y5a0dCY@qRsm4ADguN_~$Z_c<*dE)OHKqnEIE{ty6zXJjX$hp}fUS)P zEGr(%7S!9P%FjNQJlv8gABQ~r&I@qE9o1lhWdl;8KXPUmOV z5@ua|YEPBplwF?v`{63p_&6+!^ZoV3AqpQgd}e$fX^hbd3D@Vv)UGKrne|x%UY+g9 zaltVV3xJUcGU4t|?4?=sJo^gm@x2A^iagN%!_oGLH{mu^I*ha{uu= z-LG6qhlwAXoQo*g;wH1-u!AxisURF=4@<*;=QSff)u-satb0*l6P-nUmWfX_zVuOs z(=_WXF~q`;tAS=F$$a_B$Cy&C8Yl`IcAw7$3`ZYjkx*m%S%I$~L^~p6Ig%rv*eh{& z8`O}fTJTBiZRw`0KQQ= zqCKK9NPA*;F!NUaB0=_Em`@?p8~b7FD`Cn;G{#|zdsg~c1ONDg7 zKPnvMKqq&t{xr|Vu^Z_(mL%Rm%o&E^W(gM?M#*VD4G}c&#p~E!Dl2^h2+Y+6N+fJb zGtu$%R8RD^ft&qRVk{~ld+c(;BJ6+KqSk3C$CJ5{+4MQ*skB=pE@n%$upL+1-%#34 zwz9Y%P05o-H@4AlIEZes`2hcVhe22b<3l?NGy22|av6`+ zns=N<0GT6}(=;gal>{yMwe|$&(Wo-Gznti^ApYMbL5&jP_+a<0mHGClu{#|>LeM6| z|H~hm!u{Xcd)YdgXCfaar>aY3WjJv>w8QigvwCy+$Z{P-bj`7{&xle_^Hk({)9Q&v z^okOfH_Xf>u}QXtd}pS{QGO0V926a*ox9UAX+L$Kaj9=bd}wV;TCIs)yzyj0XW0P; z@$b`=g47x2>&k{YAqa`Y>M{nsUw>}I{e8;0)Q6Ow2&-6n)|P6hGg9}d=%j{ykM*g+ z|2_d%ow267k-0{P9K@dc9CgXnUv{0Nyq!DE45jd@i7(q(KDLm7FMO{B^+f#4M_|j&811ESGFjL>~c-tsl6LKdfAQ2px!>S~< z)-*T}1$leD{j6nJ+zzx@f9N;4QE969vUV%Yk6duxo)mL#ygJ9?Plg~qVqVxOZCBWk zVfq3pmOV5cgPU#FSo-!?3~`Ta?5coy(tTYb)*VHvG#WR?(PO1i!7n>@(=VJIW3 z`}K1MpoSFF`0wexie4@=%&bCKL>?nI8>@EJ#p9p6Db5)J-CJJ2dx;%+D~T5fdXxON zZ1{`==zh2qaV$ag%t^k=zE)^v4E7K<{yr?bqvk8O9aZ&zlqGAJ-oth^x-C*o_P0;J~W58W>yF>n?)Tee?jA z4`sGgRYSTk8+=lPY;}Vj04#Y~hd`Aeu783zE~I#sKd>rTEVss6^T=VtF~kHWUi4Ja ztR9QIt)%k5J5?~UXQtD-STOt&w)0vFIMG;*uc*{oIT9kRWXVvnW1^aWg)^vyQ6MVw z^c7UVS66@>5`x67JJi{{HJWf)f~>7$uimhVL+Tj#XPTr3lm~BC!}zbq;Ic#fstx)S zrm0cBgncqe{h>#{rHE?D_2W2-QSJq#!G2@rbVdi4GLhWYC2D$(#9@BVnln1Hc6|w( z>dTW_-EDNN=Yi>~tCjCZial;7=}_lqP{rd^&svb=H*&Hm9G%U%>_<~N+5{S-%d9!kki%@;*Jq!uNR`f=pfVJiU{ z$m(-gv~;%w5tm$C0N+==0pWbKpESLv-7#{(*HREiy1k;AE;bZ@Cl{=*+0b!6#84-+q{Phx9k)3!vNK zepiUY-D8|KFVfxfX$^MgeGo{;mlL+Q`Sc|*TJ?mWpsY=DUaN7~`7L(p&%2Xl*$!w! zcM9pc|BJByW!b5gVe8*gD;)(ODWI%lxJJi@4N^$=e$D~5>NbeJd6R5R`97H4X;O$S zC!3>OyYiduY`I$4D*wkjjo~?7 zgxVO!Lrj^iukgrNQ|~L65WGg8vJeku%aJ?EF*oOSs(h`7=Ny*Uf#oOn7m9~dH&jcl zpeWCkp6VQL*G>y}qw*BZe7G0pA#tfcXG$3oxzAqVw?@xg=u;g~OVk_iIj)DUfi(>} zRz-K({CeJ(GbJplp9W{9aQ3=*&H^#ImfeCUC{v%fq*@GKgHogxIZ-AI3MVa!W|LZ^AIGW-G*B9_SmquH`Jg4rz$|8JtgB zYjq&_EQ?pv!aqkk%pLRIvPCA^CwVQG$7q>yke&Y?iac%8E7#T*z32L~pPgm7GT=1` z*x85jzkiYp;SSf}g0ID>nXScr7)700%{2GTyhCiIup2Bn4z#Y3KmD$<<*m|VV*!2M zdyiY($S0_S&%AqnbxgP{{La-8O|%FzdFtlU@X%9w->i{>^f4?|+yZp1` z75X=9WQ1w;)UFf1nUxwo^BZ<-ObC4#)_w=CU9c*yLarKZFQYa;x-RGA7DgM#7fL!5 zO6S5@_Hyze$=l&&m?Fvs>NqE#8oaP^qtoKE1p{BuSjiuTCe53qb3aY}KP3-OfZ3_= zWF?}M&v0}dAy;5?ztyTSC%(NG9ICVE}W7W2VDWGzE`s3x{mhQ zaoeNcg1D$A!m>^A3w~Kgi$kWtNrwkDM-7PA9qPX_E?YwS8?7u6!NO$z4##Ln$mi)* z>LJn^&IQxmB56nH|Do!tqoV4*HXul;G)T8}cXvy7cS%V%C`flnhYEspcPQQ6H8c#} zJ@8#${eJ3N_m5d?X6`v>?`J=|&OMiZN{-){5+yxyc{j?l3q3bo;81fDbVA(pBcEXu z^~4{$?mhie)R-&yClI}U-z^L5X)c5)@-4x{*CMwpewR!{Ygfcra_4WkV#e$|-^bC{ z$C0d0`TL)Ydt7Ri4m(zb8mzU{Fb4_V3Qo3(FM`7N)L-e@bRkEoczlW@VnRa!+s{4V zGI@D1l<&ywIy#$PnoW=6oQuAi>2{0|<&+~ploYwGEFrS12=V){6M*N3=hehyi`JRd zLFBc1hN)Ad8q+o6BhA0Adtsb}A}I3x1vJM^EuD0my9UbXji+!WTAD^vN>=mfxAGLJ zs+jTpnkuV>)e;w-R9P4KOq*|CMIssFEQ@b4UNNO(zD~9kX`E$Qn~(c(S`H0EVifbs z%Rw&eara5N*oh6R=kvq*HX5&vpIc8hDqAs9X-Hz^4xGTz`S816*}&A)zCxRK-*f&3 z64&pw!ZrfV1VA6r*_yx>tFujMN4C7U2_Z-zF)>DaPK7=g;BumJ`!s?bw3i^VU0$FK zhZcV2!zFiy&2+c%pv|v0K%g8XzaMrlB(yckf_=R_vRt6Z$Fm5W$?MW*3U1UxkI5rz zY|9L~t9+*f*VojM(pFQZ2uhQzGa$H)5^f$5>?3-kSw-D)2)^IS^Zy!zn3?nzR2r&+ z#8MZ6aiZOPDbt{?xXSae{vMxX?uQ8f`0yP^h{oZuO}mRWD$C4my+|~=YBW8Zv5(0+F;KkF>9l9#nronx z=87oGW30O2WsLAQ`xMfg+D-s?xI)2`EMk@5r4!*bN%UOZIkP%{*;aJ3W%g=QE9l5p z&Bvw;knlt%=)1|Yis1_nIIxCmSaMkc&-=-t;cHW~m1(!6Q*q++sK-X!ffrO~$6B7L zF(XE0Q-O2dH6Omp<4s&U#(`dOW(0kifN56_BbG~Ka2zD%yMtZwbmv0U9$12RnTQ;)Y#5J#|sbWOa!+0 zRBa~-Yuz5WPu76~cj-9ZcnChn=lxKqUV^tsg`b+5nl@?>Hf9cL5olCyYi%)Sd)esL zl>CutCg?$BxdY`gf0*zHfk+b{M6WGNhAQBt_9#*Zl)=~?E}x;t0fzzLbB-RkyC{$G z#Jpf%IgttknTv5h_xAKq+F{sacsNBy|52Lx*1LzX+7I%S;f)P;Y}@gD)gT``=AS&d`~QKVsaBHvve@KxJ= z2~~H?;C2kyTjZhkOAdnmBH1kdSig3flkbVfGLpr{HAr^{hr^yLgVAl{xu$3WyND;X zk1*af{^%!|kmdUjlrz@ib(yo=;`J2Avln{(njG{cEZ6)b&|??}NbWXLeIVC~8`e`zZ_w)L)mw*y3*{n|2Z|335k7L5hAaMP~~PtVw6YujG!)Y z4DVoX44hT31x-~MgNZ`Xks_yWXXdvtsFh_9n{Og%0qqEfoh+7V=O#O@um52$A4G(R9~>#~UjgZ;}^Ro%FRF-!}7A=Scfm?ETUF@Pls0;d7Z@iWNI; zi0riq${8yj$O`71oc(B5#Mn5WO$w4t{YHFxb@%Mm)!3*Z%Ty9o_#OHqj29yJL} zTB`J(HEbBhjbu7|RD>eny|6?_))WvK*>Sp0JG*BO4Z7D55wG%nl{Y@^`{_%(q3Y#6 zqX{C~J$}%^n#jX}w~tx_uwmzTrPeOK3I9OIG&R{EGvB?iaQ~b1uuzfF{@&L5>Bpyt zTY@Ba#r#+JXW^96IKzC5L)oAZi4hk2xzSmGm2=dHGAuel|gyboFA0h=yx+ zO>f!jt|b3RBB0}S!c3QCK^dxf;3&Y>-rOZ6OJ`u>Ky<@(V5#gz?^w=);_S_hmfdBO zop`i)GZVip3y*`1XLq}^K!7KhK-alyxNknw{QbwvnR*7%3r&kXu-T^{H~Y_*UcMka zv$@*Yf`fhX)qJ8oDa^GTZ~{~r;#xEg&rr*@!al6-Yd_#!GCNR(y1SgjGm;wUnqKhK z!GXti&578unglZKn|q0n*YVL!retgZdQUbzxm*$-mZ7;aFnBxKly@=9=s=2kbo|6Y z=8MKx38{Ea9aNEnz=UT)2$B&5t0}s6Xry=LOWk|X34VklT6jknYPh!fLyIQcA4ys~ zTQKo_-5W2{QwpVu6)B~YJ?t}yEBe`E;NxJy@ruonNzd2nHzV@pwmqMGCg3vEH8(7F z7BCQL+&I8CtXdk{^#BhMkt#kNs69UVu`VhkItLyw$zA~Bb-!!ewoMN0AAS6b-=0f~ zSh0)TCVrc+nk+JHuf&0_%4GJLMV|c7b%?$-n6CIFn~31`D}yM+g_>{EVsEWK$NKXKzqqmT~PGdx#17t1|w`b59oP-H*Pw zxVSEd4HpH=bC#;$@jURA>z>t6TFv>4;k~IIiQZy0Vw9h`IMK+y&2;G$?4hJ%ZRo-X zPd?EUZ_%p*$eE%}ix-34d}ZqWX$KR0#X%S8h)U7NnWs)OLO@?#zU{t}zP?N{itU@s zdbe7gy`B6915sCZvn zuv;uu-@KvS+}I%~0fhU&g}r*UKcyK_cFN|zJYLGc!F|N<U;;+ zjQp;@D&#(bvH4xL^@dXuBeGXhvT6I_7qBY0IE`bIPBh>@ibopHGhCWzp(@#^BP|%c z{)}A1h1`1-BJ^<2#$a$?!z->{lqItETnO0s`+nrvS03kWn-iNU-Uva+PT-13u+gjF z=X|D7;_hi>pF%^^CEt5ch%{2T*%Y&VOnwbSei{1}13)5EOZImb1AP7~k$DzyenZ#1 zAIEk7>062=9&!WgRZ8F*ATLGCe2;GRJpo_oDgBW8;t*N*i%u zL=r>XC2mV)rwKMqjM?-Ay*L*1F1)W3`w^a`_bZ>xJQ}&vO9@kgIs3VH#1_lN-fZkd zRjSeF$bYP#O;yB zmn!v_{Z5{>efm*P-K~f0`6W=#K@BnpJoM%7asB)`jqzs}XQJJvX0%T;Q?1Np1-mbS zG=Sz;8gRr?#K)vo$rLN6D3c$e!~c4y&=7s1S|6x!;XF7X90=5M(c4$Tf%8=5Cemlx z2sK`{0{ZTJwPJQ(4(!v=7!$+rwzOK?oW&Nuie-eUbIL2(Ry$%C{#3CarUwVqv$iN% z9V(FEwS^bWZ9i^7CU@%-e1CFEhzE`T+SXg>?L|Xh_8oG4A5Wm%!(NvuNXm%|NwE z%?CTix6rnVO1%a|Ese8!D>o?kvHn&QyXzt|7E4c!_H)CkzDdMQo6g=BD(zT%gUCYL ziaT83sX9-+??io|9ikRO6oLzz<(s1ig_^!ap$oh{{}31H{5_^;*^{A+mrwn?Lz=rfUYca?g&~X| zOjBYT<~zgi`OfuzFR^p1W!)ZIW03aW56idu5fJ1^>g_v7=hC&wP+c8??Jt2MaO%OF z?W{d0;5(nV;$CyI+LJvx&>;oaLRtjbQbbFM@pPro_mW{d`?PXpJt35Ie6F!v z8dd8PP0;9S$8AT$c%Gbci3&wc`mp^ z0P2u(r44y}$>8X}xbbO@o?EZd;7XK%C`Dp}f2zIXZE?MZNng~y>+KLzOB%w&^{6t@ zTTt)XDI@(QX3U_*>VUd|m+$-J=u`Yo{7i7Z@je4L`LU7DbeRDjhvuVkDU!DqPFxkw zY?!Hn>Am3bI_B-i7H7Hony_pls}iBNT-|X8BJ`{fi%g=jcn>K{U9vEOLF>&syi28} zWC4>Nj5p=Z@iJFIyF^jix%Ua$BlMi|r;kw*SLWLLi!oid4EW^%Kgj4;E+4qORk;{N zmBu>h9A~f_CQBXdiJ*f!9aW+cUJE^T$?aWvSP6N5<%B=s1zmpDYs`cj@G43(?Y#0O zOrD8oVUR;b>!X5v@iur|FAoVm35dg@H+sRXgGi^fY%vS=0Ey2(URHd&FPE=7>`Qk7HwI7p4$_mg zMf7AV`kjx%gHE|S){9s|-F?u$=pq(VW}P&n{b=lr!ZOptb~ro*?Nk`#@ujy|fZBcU zZzUJ6=y|g%fRfmWXUwMlBB^H|%m)RBuU3#(I)({;u8`hQ*R@Q0!OO+WI!NAPjNkG2 zT1^&zyUWSx@~scHgQFok6OzhB{7B&amX0j(sz?94k%+i7dG-u6kUyhRxG**^vF!F68|_{#=I%{XWLPg7Wf(9EcV-yYZS< zebWvBAdDsHv#%5N_#k<9{9O3==u+SNu8lQg8BvR>sP&tz!l|rNk$YS67uOXaydj0z zyNGABi&r4+^Nwykxub=a9?OY$wbp4Xil<3?g&vfUG`MjxX)XiHAsG4-6~?my@6y-E z;WOf)4+pn;=wmZ)hI^vk_LzS==UylX7{jRC$M488(9OW`L-9cNCIC4KXCqg)Q6x21 zY2Hi>gmkN;CAaXee|Xhfq^}KPT02gcyeGsQVA?sgqmi z{-zdqw@_iLUn~@K2TE!9B$Kr`aOIF7X_ub6fp!)V>Wv;zDD=Qch0;We#Xm0^*j=L{ zz1*1>aNd)-SW4#e`0g$R|I_Qn5Nv)`+G`EYilU3rw_8y*j(n%Oh~md~&rC+H@SVUM zrDwMSpWN8H!+>qXmnMpHQJ%^jJn9$DdZ%$bk6SGb6d9Jl;Vo5sH9)H*p_Oh;7OVAU zNeTtG*$y;KuYNXP60NxADE8qLeeorm8gb({M_HiSYToBb53y|joWX4DSIjGOJ9F5V zvvnbQK(l?-fAEP@ZIc(=WS^0MslMd1kd88R}*p>A<${Am3ZEYk-;^9+psB zjvma{a9H5QOIWmj4xQxAKckwYA$U-&&A*>9`NJwE)9xlJcc#XAvcj|Q@)Zj1z(!iF zmnGx#ZJYJcXz;#~L>p&+y)$7Os=!2F8e#$cXU9*0{V1^_xNOLd1^l*xLpkD7U=SsRs) zt#C>Ehq-#9KW!!J7k2e-_I*KU#PNr4gc*4svpoS5YwFpj-lkK^S?|boSkII$ee97K ze3<=$qCD~H69|Q#{B$^b4!^;})3EB&{Wefyb4w}T&2D$H;t8Y*QnDrH*&1Q#cw&3M z9wRi?c!7WF@dD>l?Id_LgeZqLKRb)6Wq$SSW@pJ?G1qgS!*Ah|(0I)o%`9U3B1@n# z&cil_O8UWcR`nLgkTG|u=}FniW~AA^t>;0_`O1?k)oGCSWUCkz!`%d(=GPl%%WnGJ z*^~8+b>eD?QO2xNbq|MAH9UbW*8>}TY~;4i&&EY00n>e|%a#r^XVh$LKBYRf`!X(P z@?0u2O^r%Pz=7fMriK|mGmhG$SrNO1nwjEQ33Lz>Bz$~a^s3(C4A^%d?a&Hggltzm z>M$ZTl)B1!sbXc?CSk2NyoZtP8h_dBL|m#5NUl3dH9 z!JyOQjM|b13g>;NqHJ2-6U4uYQb2)9iS>6~>VPc9*T#6C$W9hpYB-dgkD|3{T1Sv{ z2^C@1!KV$fo;^FAqDGd7#-9hP#*y-<7GLVp07cBo2Qq0p`;Uf-5Jyg+r< z9(}vNr-x=mF=Md69{cu%+d{3!yg${<_Hdoplj&+fK|7y{!Zz*L<#poQ4)1R_Sz<&( zB~7 zPbYRbVsFE@{lVT&%Sm*E0;WU@sw@*dbzn@`=^e(kUYsS(#|BuUVVk1}tO8InSvO@&3SwCA@>o6VMZ-i+|VqxUHX;0wLNm zrxfSfrrXc+38UMLDA2*y!u}W=>U-MsA+4v6V1!m$C`mAWIN^eQdAaL?D;&RMyudeU zpn^F$=r%qiv?3}3Pd(%Qc9Q#~{b|Dpnq_(u%54H8IyOA4y5h;3#oPzC+w@~<1l^&7 zm8}%gwjvo~+M(@iTjI4YrY4g}PYdt0n=ENiR6f5c9@x~$9_wp`=)Utf_Be`pTojTd z6UeQUk2dr6wOO%>r}(XllR#9WR57P>JEID&{b4dFb#tD=ROi(rCn>Pg4Lu`+*ghJ5 z%r-LlWaSdCoihL)n-;S1r&_=IU4Sya%cB^T?_I&1A)R#8OM| zjb1cb{YDpY#f?H?A;sS0GA$W=PTL%@{Z!qp?Ib$Q&(3>O6V1*=9twh8HdP1{F;(S_ zwsVzgB|7Sb1|?|;^Z||%DGk zLS?GZGB&hh=^5}cN29Sjw_3Wu;X-XyQ(`)Qv~vq{aD=wF2T_s2+KDe?#2%#!crlL1 z|9rJTLOSzme@^TUg!}&;4_6pCfoN&nGJ~p*wRQ`oCE*2Ll2~?Q z5)@@S_R!^g)Ai~G%{LUL#St^uRNzITCZxt}-fw(s0sl5xF%5nL5V&!L*3feKL@?!FT2W<7 zn^bC9`{Us~7B*N44u17)!^(I`ud(#Dc?I!xgHRxj{@xX{Ldje=GKM{V9#j9@G)|TK zb_+!czB5z*W>ZPO*(zueUew1ZWQ_U; z67yS;EV;??nPS@FFhf&aox$1ChBN(u9aWTjyZ&YOpNjqp8MLbdw1^iJEiCxts&r|; zul`Jz6opyU`;I+ktefDVlcl?d^QYt}j>n5J)7nJcRhKH?XJ@tYR9?deoPVC{F2{ov zCK3D@k7~v)J1j$7_B+R}k?3;2{oXaBEL{?)O`_z12uN@~@-?OBom8dh=l1U&U1q4c zR7DbTWvJA)Fvoy&L@wh!dLjd=am}0LNXGV}o2E(X0w|PT_5@;Y=Z>Z{O`I9U)eZ5p z^Q3<)jkr(O$`YSN!-4jU*xQ#P=j#ZiQxzklI(sX|Y8xQj4%-M?i15?CEc&R};Ivub zaCEoRj*LGx=enmND~l9%1^(GC=~ncJc~J%LjYyTP_i<|^Rl>Z#vn@q)0Ks36@||O+ zdPXn33?Q}ySnRofpeKw-{yP^04s1BdhCb8u0Ox5pa#hGuVfaom9mRZiTkCXyRssHm zi4BvvESo0~;$lP8BA9I77@OTLfqhA%T4iISW)oI~@r0SLy|@AA?ThehgltI8nMUbs zFkuGn8A-vjL8-#V1SrM8L-quh6W_se=L^%lZ6R0|!xBTay!jYWM_E_eA9ODO4skDf zz3~J_)CgP$8iSzli$Cc3zl=RUcj@ny23}ZD|Ktf;NsI^ZJpB0nTKV<0$}uc+G^P*! z_kdYIB&KYl&wg6WpX>cB>|_fZEErk?HMi%GJg-)|$e{=MdF_Y379SUju4Ce%D313eOVUpkeu^dYmsr}`oM;(l?>AY*u1eBK9 z;BxJ;ar3!Q41}5KW}HfKv+LsIgIG@}&ih#*`29rX+5HMr=gPHC zJ`0Kgq77YUSoDTN%*ZZk4qRBIbq}R=c9f-Y_M}U~6C39!2LG*b;NX52e!k1aSGbX~ zCRueH7pjjMp4k%hUF|?!yo5Cl1%Vm{I2k7Q$B3Y!Zmq3)6TOhlOsRya0ilegMwz`Q z0Uz>5sP4Fc&6x26G-08d9>fY^DA>QR&lpNq*r>Vi+*nc~x7kws?5lK;AHH6+ZM5gS z#FS!b%s}U8dVk!xwb74}qbT!A%AL>gv%59{95gERKJ1I~m+MOsjU@Wr86lp7Vc3Fq zb8>r^;>3KP^Lki*>W^9$c5-8(l*G4d8H;@@WNhP%YGgsz?^Nz2cRX;2=yPxq{+4A0 zH8N~C+)pf`r1P`yOknmi5{XGwSP7}Y{599_zA&c?>#>)D1D0s^Nn2(_hZ?E9@UW4v zl~q;NTl+lKjvbQo^`2}8ixmm{227dyb&Q$b3Ie<#k6calG)av5^C{03f!;FzeE3~EFDM8T)rKYP^7jr~C~b#9`u|LMO>zyROui z(DltI&m)q!42e=C0ym*hH>30?MgP~`DjO!SSnAM!L793ScHVG5-;d!7Ux##cfWr1_ z*vF9bpQwxhX0|r-wW~sMC%>ygU76}H{6nMm`u}oAP~HgRXKfgtfA+F|U(_@|WgJEV z0dn$Nk0Gh^rD2B%_OEh9`jT}sN5WWgn)Krr?@maRlwdQWiJMS7trp3c7<8T}dx_NG z_5+Ii-<_n1-WToOfm@55z>!Fvpvhf}Z@>iYKSnhJkO_x}jwJ8;LKxxAFsc~gjr_&$ zAF_`X`fZ}isJYvUJz_L}zEwg;Ea7#A_p zs+?CK1vqf{;AzW%HQXb}$BNC4zh30P?O;NTNcQ6E$)nmswkQqJ!$`J4O!8yCl7jt< zN1-2&z8mXy*@l9?i35*=pN45%zi9;NOqM&5zv+BL1B8c89@~3;((ww#w{1+W&F*S6 z`iZzU6?zriH_ZRDa&io8%qyfgNat`3h~l9;lfLRtN<-@F$UgE~7Foxo&LA z!~j;b)+8JU_0CU+A`MaU4_bhLQTP+pZ)c4K3l3@>X&3%83+hV1F7X4-$re5^6Tl)x zLUqT$h{Xi_Qur?pmi;--ufS;q9as6KKKr$voIX-%CKg{} ztUq@9-sdN10juNl!bE_P$O6V81AQp?JDe3pqj=}(fo98|@(d;jm`I%HGzoK0Jk;$s zO6+~ue^i9}R$6#TS2&gk3SOkKgXqJ~--4+H44DfSTpLG92CV%ZR5ywX+@A#lRteBl zO31d5b0PvvSJY1{oc`QZB?wqymA*3FzrS?&O=z9~p*10Y=Ku38HP0`>#ZPP%&>+-H zK&^-9QUJd0@9yC&)jQ96UyV>8SvQi0S`X#G*=NuLVh3bR@9%hjZUsz{_f;Uc@4t;KZK14%Z$Q=SQ5z_Anb%_L?FXx zxN-Ob`drnWn+)gQfxy29%7PN{k`ba*^8fR-G|_*U6Yz|TV?)5ygn20xZZq)N;BVOm zr9y431~s@E;Ui!^hj}R#F3FeVLN}%;=Y=8+`5QFC-wA(pC{nJ3G2FmEtfR9@^v3*>}-k z(kK;UQXd|U5+|E<&6CIZ`zGuUnkkr3^o9A{y|)-1wDrE4x4GmWshbf8UMW0~2rKN>-`pdQ7jl()c$YT++#neJ`9e0{_20f0zSM? zqcB=#*vc~fsB{pa6iXCN%$5L!82kShJ2Z6F^;eP>#HZ={6z^FvavrJKl(41#nGg}e zTf#$VbO%ysbZXgme!0{{2E}MF)f0HQ2%Lei((nFUyBY61>vBj0*kU7N4Ysj|w*Cwy z{2YTh!U1XA#v^u?9lbyq26#mdFS9N7pQ*$Oi(vq3`V^9!N&dbNHHY{cl7$9(Jk$X@ zg|G$H-)n+KFGommM@AZFdnzKrYx(r=d*mUcbaBCflgI|0WO5u4Kb#Pk$%pn17>aLZ zI*^?%IgshML=DhLkC4UPH#jL+KJ@bB&lR`nQ#n6O3OPLttC?py>%Aq=!3936#+)^} z?sLZpOa|^de)H15vN&rPv1H0cu6`KIa5iva9m<+{;%p1CcwpXE`pjndwGYl;P# znzW&rF6O4=GWE+R| zmv%aSX$Rl0U`_;`n(sZy!+MAe8t#Rc8j%+rnZQXUR~X#yY5bmyJZj2UF|(E~mC7zw znN;7#Q>2N&;tm@aBzzGV3MDq!9jP}K)U&ZF&QWMi1B~W1|SbF`MXX;t{I3U zfz`w4MjY%nvl=CrEI1J5`p(WumF~mF*(0Yv` zV5pZpp$r?{i1Ytj+piD8o`Cfj0oKD$Bh3_JT!H!5?B2J+4z8sGFaEXl2!@r;;f5A^ zSU;uG-DsV`C2ZkVHlX}VQC4a&x(w;@fzu3%i7rBW$C8_Uet;Z5kP8c;M-w}1hCm%u z^JZux|1)KFUg$G>5fLwoTpDQymjuhlKm3vN_{(-7Tu+NrVRw_>5kFSUJZHHvF=@F> zJouN@-okFB%WO0X44gHP07Jhmv9Jg5FZmPy1>_4_kk6T%(;k&v|Ki{9H~!H8{I|W1 zyZB4)>%Z~uUpyy_UV|l&i5gBe+6^us@_15Enb24? zEBq$PBogLw)r}uwG}2|Tx*ug=C|0uHWnXzd=CQ#eCjOKIKY=#VzjEOFC5LWwYH5c{ znja5;8RO<}4l^B$m&s8^ygnhe$-=Nvd-mzJ=5CjT=PhK~7Pn4-I9aay+~Tox@<&9z zyKK6tv<6>|9M`j4T6MyK11f+ju#4fOB{_@J&w!SM{B^YZ3`IidhgPV(0qbILp@F82T=hC4VXnCN7aA zyt$PX@e-sZi`pqVyQ;n!o*tjoXdh9DUH0J|B1#Zuz&Jh=G#r2!E#RdCfUM|AZUJcW zuZZFe8uJl0X0BzZYODdngsH$EWsUm$B84q{@5>a}cJ$+QA3rwK3BMI7cT2*0O1#q^D#Ljw-U< z{uznMLUh@Mb&0)>33<9^)BX}wcA3|fvP2#Xj1w;6W+Ce1%4Lox3!r$`t&&AIy9T}2 z76d6rnlF|m(m$&vwIzhuqsMhv?2z1#Kci?@AF3DPy4rJQWrA^`q^IOIU(-bGn_&e`dUTay-#`sS&3?>1UC}`6jBNYH3C=2|Kz)kd)Nn zA-iMe{44I)uwILSYOS%tA9(4EekqCeGjiF5QCnoa*vQ>RT%gxzsovcMuMcJVw%;sd zKZZ6Vg8L&?ImNy|5jRq#L1C8+3DW;6UTK zw)CO-QQkiL31?UT$C0Op{v4&8qhtf%dd33fHe<1rb@#T!*tVwAC(riqnM0m?kS0FL)ewd&}ZG5xa6`Ze*9jW67KT;YoiDL};z+TSw+2#YbO~N35;K@GrOafV z2IWMWtJxu42I#^Ye9^@u>`+J?28aL7nC)SwiNFbNn>y_*NCMKXv zM($F)t3ZdtkD6`0SZkt6t|yFkm8USiH)NJEZ?aGbW5)A+;!?`(sjbm!|XU_gzmz&WJ1bv<((~`|J_(*Fp~h55G~Gujk#)k97*ERDzx_@dR`^T|b??$*`xNv+8Sg zHDT+F$bPk%aj*cpCT-ci6`*nNp9{A9he1d=iVnPQjeKhByoGUe z!0k%8Gx`O*XK=$_*VDo&Z2P_w^9B8R2E&4~_TcXrb+$wHk!-%4ZK!1(`6f?D;z9pG zZ%<4~$mTZX2xskz>LsZFF(Ex3~{25Da7n$M()oWQ_*09yt(9ueAxs!QamE%v4+18CtEvoKZ z(M9z)>>bq87%FKKM>2%&-WxP1lJkYg2Z=|AQuu6U9pIfTl|q}?AtaGTGSu~s zmF69dQAE9NtEv?J;6WiyAz*3+W=SjNoqeLFWJ_l2jr&Cu2xLntM{?9(%7v=O1yqle zp}x2$y(cs}LM-2PdwjBb80Mbe^K1r{<}x7jv|v{s_k`o!^O1x8@EHAMfk*Avu`gK8 zzb({mVp%lN)!)q3>z&jybh{f~-PYmYzx~Kz05=uF2Rk)&{CxO!spp!u<13C!pCau? zDI^n{UU-&ULii+RqQxCq)MPQU=np5}o5jQXui`N#hl%{DNO6wlEqrC5uD(A+w+&$D z!q>EwKUAo!Tii=12_Z=usE+dP6xp?(PjMm>1fBda-ZmSxOiA`4m5$jH-CXYAt$d)V z+rBg`4{Ip}*R~3-Zu96_yGg1McaF@r4ETQ-vWg+b@`HqqLy9}j>Vho+dPvKJf=E~C z3j3aCyXg?GcWV5Je}CpZB^bZ_zhrp8xek|hZOk9O*-d_@?E{1bZKk~+6NLTIfB+!) zMNHp_|3`ap+qzbgmy9L99$_+gRR9RZ#={Fdka@iE$mtVbCy&$AQyfVF($a1S3N z-YHg+R%*jBS2m2`F(o73y4oPLJOdo0i0;%IT3zniqi*7K-QBSIrN=)p_Q+q+TI~iq z)#fSRnS+16S)g+xeQ;Md0%SKj-)!F-kw=bPU>hGL^nIJA@_jMhg}@tl^}00AJ!i*; z^Ra3pUgX`5p@>%YYp$>*5E|732k<^Vt_S$@7UasMLYbWFlo%DUuPc znC}aALs_F?Ngsowdl!%Vk`k30D~vC}Un&!vT#*)I8e62Y9!T`-zV#eAumG`{La6M+ zT#Ssi%VhzFWyVF)7l9yKC^wk&4t`fEahDzfraFgrEuu<-5$kj?U+o8Pm^f4*Ip4-x zyE1y1V3*6E0hvuGK*C@E zy1A&L&ewXQNe{fN-QXEo8IS;~#R0j`4J~JcdYteJAp3st;sGcL{8iziK#l!XCwdYa zaQ^ONr|sWgQX>9Wz@bz=c?ysU&QJD;kTW^$!PJEIe8{k#4<%?xWpI1A?@83VFw?PG zEY0sBm@p3Wlh+Vm2{sbQ9>rQKXem30WAazf`8=_AX`P%(xHb{frr1BMnM+>0jiJ4C z>7y5Hqh=MVN-DYNuivC!074L!*A2LDl!*QUlI}%!mT(qd%6^EgzBT^b@FmMBf}jcO zq&&TG^MEOVRa#<7^Pz4ie9iXud^<5Q{IRK6h-t1w{-G*|u+jH}`%Fkkeb5{oc)#Nu*e~7L{<7Y(j;?7l5P7X(G~GOau8+ z?O5X+jIxouQ+TtcZoS0xIKu9$C{I4dvRzH6_FNO-kOit)+}rm<*e`pUURWppXcXzB z2;C(uMM*dvQjN^)k}8^4fQyvkp8Zrx$hcQ4e!6-7+=2~>^M&4Lv@Xr*D9mA(jQNd( zMjjY^EXjQk;~BT@5KVzA+c_^zKc%!(=h^h@3BwtAx^fBIS1Al^kFC>8BHu?+F()4u z&(JnKFJd+c>O>#N3Wz!Mn(v>MbQ~}-kGM7SLl#*s1btkjhO4Z9#{wp^IEqJ@;%b~> zt-N+4Q7c7D4;?WmFW9J-IKkVvAIp}#T``uX6uZsi?X5aqWhbT`mKJpt*F;tERwNjTdm4w^b`zMjP+;RDA z*ZyX+teSgF&mFnHJ)D3Ecftnowfv3qo5Zub7j;2%+-+kG`mD<2?LwZS_AYNj6PvvD zFApl;*hIg(I#+ICy^Zo%X5n3tTvCdaaycN1kpe07%8(yOkH8nt(0Z2|7qNp>L>88D zMRZ3mKgAe)c)L(&b^EgCD`MzPX)nU5vZOecCzYLf!~v>+@A}#J=S#XP+vLsdDmw(u z>kP@};FRk?hxpTq`ApT6nA2bA%YQVOU^psR0p&)q$KQeR#+GtX+T{C7X|(_ZB93RS zEhv=OFUn+U$8qe=8*-5~e@$0dq=|I+{_062KKkb{^1^zVS*>#L&E=~Pfzu92OD7QmApg!GQ($#3K$*Yzo8`^3io~Z}t{0x+g?@d!2={A@GZpZjuyh(1btls|O z{VdpKa%;iW6kX+~b4?%6ysMz8dpuqb>=^809$|KG@vz8Q%0uVm8e*0g|B@3y&*L7k zUsZdcy0nMGG_vYcc{U%S;(P8zIMTo(^+A^}t?|CNL!Rj2rz3rr00jw6cp77q1f-`L zQo#rR*6VM{fHFLgJ7n#=u^Y;}GZ)+ZR?f*~@u7y^32 zE_$0~GY@%LgqDc5#P^z((y3V)?nu*utdnn#rmhz)ZY?t|QF+F+$5ji4RrTlxSN@kF z^8U(f7f_KS&)xfF;7;Oe29RnRPcGVe35R5XX}Y?|Iyk)|s}GlQgy|TOB!pu=roZJiaZ4&gXd)`zs{h7p5Zx7p5%Tz(KhShUWIX5x-c-ia_Qh{~#X(3l7bJ zti)Y%fBrh<;@piH!h@1ZLrcyf+bGl3J+P>{I|H0(I{t32=uJy#fz3DreDwes-sS>D zK1mBf>#D<4TA5=bq#+o~CGh*(C2FH_{6-!l(4O7<&FG@ejpL|;=T9YLiKeGt`%c|I zf8&36voj;~(R49<&QM9{Hq_7Y3ECoZ-d>OO#&!j`3#<_l&ffI-&L z0R~yDu*=OFQ^2=opTqfwL4Y#uf5q;>mrAkA{@uN4e^Rxav0vUUN^pFi7%?#$D0!}` z$G#XFpTg_%$@&r0B{!UfiIH3Ob`4Xe9)?aRAJ9xbyR~A}{!OXmwM#_tDdG=zZH`wV zwpPfhhGV_U9qjGyDQdDC+}aVK)lE| zQ4fSQNNx$^ZhV=)d<-ckC&`*yL`lP;Dkj;Q7$#(Dq+xw|yBnPf32}TWV-3(xUV?Mb zYM^=npCnw<@W%4SYV+3SEQ9oj<&&AcWQ30{K4ISx#s@a>7hRuX7Zf!k7%4Gv2t z1w{5o1c)KmJ+e{h35rTWyXyRrjsHb3cjobuS5pRd{Rcw#w=2xftb=TW`ek=>wp*JT z1AALV^175;tIA-_SOa^vpj)F`l|#@s7sLx@IPhk7|0z&zhCv4^VZ`M~7Ub_of$qd7 zv`VOd^hE+q`O8*Qp1^@KaQ+TXlE{p(L72Ik;miY+@4G1hOZ5N_o2yfK9j$_vAcLCN z_9ft?uI;tX*l=cCxT7MA$nB^c$L)<`Et&7JFxD(lI$gQG?w5i`7M>}o*oQ>PZ-QGd zr0T6o_Bt6B%px{zU6aBRZRhbefbtNUdFz$D%B9L`v#Q<^HqU32y^N_O4C{G@R*;Is z7|2X~j45lGCiJBP`seJn>u-heY=|b-8}7?h>zhkyLNp^;8TjT88{UZts$3=Bhz}g< z;(jw$AnEMKM0AAV=Fm8{sViKRI-}xP62kQ70ktTu-e|eR^?pT^3LJKj;{~&5Euw9B z>qnMXq4$}Sk7ySJ0b&W?Eg_UTun;ZV=z^|pJ+?OYLa0YFthEpsz_bZ@ySlUE`J(Z& zXDEdr#wZ)(XX2alEeAvJsC^DOamyVPOq@#6u?CDFB}Ngim%8c7zGGlKYoRqekUTce z{%RGmUG~S8m9y_hF|^ScK1HuocRoX zUA?GvzDPdkcO;wAWN=l5Yl<;NbJ2N1IPHw;PY$`alr^}ew=N+)Ru8b#D90i315jX* zT!sSH(A)x9yWsNPGJ6N=r|1AY-Z9zH4Cp#a4(OaX_SD9CN%Gm{j+q($X;1(amY+R} z|2mJR!XjRexa2l}+7j<2sr~~8*R6DwKZTMXP}2_46lvsh+OART+MLfH|Ew3vg;s%U zqL2`dI8Hp%yGUhaW}gtfT&x~kY88=_0i#>jTc}&KxV~#ICmnTPrHB)^@M1sq8===_ z74c5eoxyV|+I}jhW}rH?Umw1tFzLTkU$4hDjZHPe<*atdJBy9Ho|Rd;^HH%?F+zRG z#6*wjm9)R9sLJJx)lpyMw5w*+Q~z>uJpRt(qAXqsuj(fEH!Wv2kXHw+&ANiO!~aLy zS4LIUMeQnrfJiE!bfct5N!O8(RzkW9S(~uEH zs|`g)Iu1sutumi+Q>JkW{0)8IG1AL-!Wi`}Wvt=FeYx`VI09yNDW7VvRY*+oA5zi{ zOHNqYTfV3R1H3L~=w1kz%`2(6$i&fCY-V#jpuxpSOYMe7Xe7hJ_3E9Enw8+YPl;26 zPZG}`k&8{nPRQ1q@9N$H2DZ3^z6gH2Eo6>9*)U`2nb0upnejW;Ul~gWSr(~nr2N`5 zW8%G`W2DttVw(#xOUk3QC;ZR4~V~v9E3xIn{&I1AaD0u+<>Q6dHWqlaw@l@Ja0k% zn%;Uf{h(EIa^8FqhP(Z5qZjbFrr|Y?z%)JQ_6!r+*Q_xrl$OJ~{E1I^!Pb82K`0|E zVo@~4R6D1d{#^nZj~`~p*BC|9l-GirKH6=O4f8af!J4wnX>^51aH1Mu*0K>3pL#Q@ zfo73e!&z_`TjLo348MFDSdM#4muWE=OKHQ==*q0C|0>Cucha*^*K;p*3Cn7_TqKhyXOgBr%+S0 z+YN>lcZU06Pdfb3@8hIJb#aD+fp~^1woH4zWK-tlZt!@bb4&9a?pREGh0e2z175ZX zttUg77{oET-JdAI>y7h6crqwa&U{0b8TQ2GR}(z41QZoCtkGyRMI&li`?>in_AuW& zQ4GQ3GM`X-IBJp@qHBKEg#TJ<=V{rZKJYCWj1fl(1|b$G69SXP-%Gk++>;A)AWgV| z`V6%%_?gMi3MJG&8B<&R(SMuLSeQ~tH)4;4oC?oI?>||KD0zQJcii;Q0`?J4>k%_WiiMpH zNb6F};QJf&_x{S`&egwF%`6$OdOJw<@c5;Aulpn7AN%3v<0ocs+oxB!-^-o<5F0oy z=+|b!UFRB1bAO35f{wg}jwm+MPdPu0X`~JI((Rd$O91ojv$T4h&-jFosKlA0EFWc( zO<{^GtAE4Cx9C4kK#kmf_?RNCe&67y^b?^?u#uANqpPI!>=wNo-oH|Ntv(U^W_#6* z6J!R6W|_O{hFFc>TA-8Q?vqyNG8su&6KGctQvspqGD3M~~+JN)_<0Q25(-kye?| z=92zS77QqV$Ac*J$7mED8RS>V0xUzgC+@(sytzwL5bs`^h{IYy<0`{8m}_h0BTB*9 z!C17WNc@&x{^{1G22wMRM!)%U6pmxB3p^aI6kf<<0Yg?_3Jn2o#BbEV`_I;k;)xURxPj6&24zr)-E@Bi$oO*nR%YsS_qMYFn(^s)T;Tu7@}-3d7xBdTn{R#W%W>Me7Q zcHR?Kr_-kLiB!X8vP+`f&)QJAl#o_wl2XOBeBu7X_+w?tQy18e`J;)M$MoEn$|va_ z1%Dq(3=$nPMh4mXW3TZfXa194!4U+UMl(%E@~o4cRpOWT11 z0(U3CAx>Sm*7P1yVR-{45eoYfU@=`cJ#XT?{eyb8%Y|%lU|&f81yso~UG;J+{fdZi zj7hG}9QO*j(~}f?iqCA2A^9PR%ha2Smd;E}#j>QKfl)=zzadZdF+$x!y9r&L>(`wZ zR;1M!8wfk4zb4{^Iw!sIU_)FznxF#STfTEbK@zm{^PZBWrh8$XgC3sMi2u7|xvs}z z;}+@BMCs;it5xsbRT}0phS%GabEp^yRo7Qoq6I#?*tA@2wX7AEI{2Qo!zyj+-{IbG zys#X&6>&ad9s$~{%j6g{3!eG8er}Tz#43gS5LmU=kU465WD+yoV@87RcNbqce^`Zz z9`@1(Xc~$ud@v#Q&&e8#ksIksqE+}wijUGG)wuMYHfXyb1$;q^qRzrbGpaIvcO9=d z;hh$g_o#|-_N08gRhqzjG7Voqy1jvO`P0&Q87zD#TVBI=t-`FzMrujH`81Q3akyS_ zb9+E+v#{ZVj~9n=gmje2D&>-|pL6q3#pJG44Q6!=^7Gul+mf(BidSPA1slJ3h?P-h zwiO8J#R&TFLJMi_UY7;z^yPzTG6r>fyxc)2iU@wE@u<7=5wI|;n1kbD(#>}^)ANU; z{^;su&egd!@2BuamriHW)-it&z8FG1t$nBEOqO^_MeZ9^U~ZDLsAqMCcR)n^rEcD; z%@FSi${SO+6 zz%UInx6zxL^}t>~{$Ptlu`r)QzPxE{nbFjmSNl|cSK18CqF5 zTzO_B;#nLI?C8^rK?tvgOila467ES&v%VKIB0((_TRK=zH3;0_Gj zs?;at6_C1i!^1;f%{R_>9E@|kJKpz_eH_YG+`;7%!1IeNWYS`Ux(wD z5=a{i8lTUZy`2WL`(Afy=et`&|x9e*NEM3{)XRG z20tt)#GQ$B4a0;8{$Jcdz!MMx;i+9HzL$guex9%c@{z9ELR;pF870l6Crkl(&QzO&4UoC)BNxfJmY68FY@Q zSBo~Ru#*!a{i(YCaiSheG*|WV=uM>69zOC>=f9Rib%kwx4p-qoWut*3q)@7>4Zm=;pqfQ$7%od>!2t2S2rJt#=6 z_Tc5a`h~v#}1 zeD2qx>vv=MzaL1iqQ=G@$9mi_vpO;H+_xDJpwK#7aK&)QP9GkhPw%Or<@HFuU~%bT z@IVwNBfZ(%;i9lMKdID}v5;(g&%h)%$<{%IPM;hs4_yqDoO2&a$ax?<=IuHLng5oY zYge?jhkF2cn7XY(er>XB=vQyo3$wMP-WFQ=J+6&fa5DlY5jDIA%H$>4YGJHS=x zq{)UUtcE#IRoui^(StR|e;5&OrSndUAW#94&x%;AW%qhvTgjjk5qnw8KmYSh@E>X~ z$NjXC#L%MW*RQqj(G@*N^r8HL7vfj3Z_vvMT{Hgxr@SKhft(TNVaQ*5Ac!!HVpgmE z0z*BMx4&OLb;h=$Txka+!XAbcz$Z$jYSg=?Ck$_o7A{`j0qmfhv?$nG%n zYZLTny+UO;hbsW%PYxu<=WUo_6jqz8@{UWCp9XSR_;df}DJNf|=9Y@TIM85y?1daj z%gybyU{5@Kd4CiEtU?vA7hjrEHXbZc3i9-b`7Hml7w-|zaIU(f?eZa8Bi;@I*P9XY zUitUYt5Vjj0lm|2SSUXP$X<~CN1=W93dj8#_=7_Ug(y;kUl(z^LIj+H6PU3y2`JfAC~>@Y!w4y^X)DVh_xcgLAs; zMc?m#7Cgs60ag)*6&h9V*U2*jS(uoCR5`Cl-BPD~rTwGAMnKatbXI;`1@d%Q9M15X9pEm!*b;`AIp~G-XEyFhZbsdglag>HZ z#YMCEY8w6>jV{1*j+XM-oyAywHp;a_nocx6z43)4T5p|m*O$xp^NU5+()~(iEfpn? zRia+*KEL>Sd1#xn^-jd2HhKLrY_8)$BkQ%TFk0)z1m;uwgJjF3r!C7**q39^?L$?b z_d+Z5z;}EZ&{OjTg-rPp|;}t>x zV=uY1(D>Rz8CzXB%rHWPYFUJvD1GjT?^_xEu|2HMH%8{jZ$9M#n_XWbNSK9&;4(eb z@}w~IpP$L?{xQ)nDP*-v=8@>vtY(iY>fg1%$R<|N9Gr-~B5;DvM}G|-0ZOtig1Rr~ zui4^tDmfa2JxT+F4qdtz@9k9OYZ&7HIf}6#{v5@H`)Qnko5iWutB|w3dYGa9Vqu&t zXF#&I74Y8p9rgYMK|x};Z~y~2P@2Yr0B-Agw;GogsDZ8f`_TSlpYIV}{@D6AVC#dP z4)DkS?++kw!}*(4M*20Oz`cpQzwpZVA3yv1FGIB7Y#sJvibZjZ*}&rja-;-~k&c__ z1<^bI8_b6OM2`>v^N-WCpIjffWI$AgllgmCmo^@whWp<*7poKpx%l5F;it^|^|#x( z4zysC-N1d0D}S1qgD5pT>`=gd{bJxA1o|KS7m%zprDOhL6nl(cL3bk;uM2&s^HX^N~9Mlg%J4S{Y9$0SmgQNouVuQUJ8kXPq z`$FK3og!E4+Bpy$9q;<5UvCaPw*5A@LxU!`1fT`A&;9;#Q_jDy0~Opz*^{n|%3}%c zdDZ7NptZpBuD2IX{rf|;Kkw}w@VsSr!}I>~yrO?R??6@&GnE`Mu(zSPZqeWMRJ zNUJT~J|lnsV^=*>5uDU)wTo>Tl-sFOp*dIuC|8-bT!0sT=x2l7! z;JA5$9fuVBCJMYYyS5LE|I&7y{%b4}fX1SJof-Z0V!@009SG`L1M{doRLQVf-D|kX zDWERc5%zUKfF0>?vp*~xJYenz*(iJMdlEb!9=*O4e@s}}U3eP1Uw#p`?IJ@m!gstA z!M$8#(OKpk&i^9a3Z{Pv6fuRLy8cnL!{EM#M{0)NfFES#%bXmdmDP*oa$I*?b%&5Zs<~FJ-4An1a;J$T1U#tJ8h# zNGQ`1!U!(I#=Kl};ZdKk9N$*ax9qf}eh?cWw9R_BR2ifu#naCb7Jos{gX5$flsD5& zI%d5jh{iI~^Y~R_)GpWI3hB{=BZh(F@x*tNlV&ZPENTSeJc?pj+M*+($13lp(-=6L zi`G=%=r)vjtN7wGgyVy~iTwlu$SZXFDZ5BJ)pC)x57z`@cmR?9!;b#okoI_uk z+PP~4#yhxyp9a=&F1{Iru4Z^IIboc^;olsK3hR#&s|he{W?P%%|31+zSJv0U^@|QX zm=;KvkegRV`0qs+@ppRrZ>mwju3X9Y2OjMBG97iM)CL(I(}=P6E(6-xWyK68efRd+ zIl|2_K+_u;!88jtVH7xypMI+0>%77ZSBV)v1qVZ#W7c+DW85SGgDCEAR7DVhUG$GE z-~o_>bqkOE<I`1VxJ35qEW#vge|;q8_>tAAE=43lkgz38WcFAUWr< zc~6{VqrpJq5Fjrbb>iB0TzA-YJRd@@U;laWhISpORkAd|T1t61%wnVgi?(yF{vL8trsBQR0;!%rZwP%mNx5 z8lb^xNlj}SNqTGP-)ySZrZQ15#tqEhjkXW(9+%$w06$cKa1BUwk~a0>9<~(TXo8>T zVt@vllarGNlAx{>PiDKx%n!AB98ui_s$|mT|2`KnK16Qnrud)#-TS@vVmS zqk6s3FPQp9UfqTD$%*O%rZRX%L5I zaMlQRIT0C9TcWTxFO%|@8cMvN5G&_6^ceZ^Sqis9?pD;&L!t;=Ru~`Kau0LZPRHS# zO8A;8UlH99ttQkJ0p7(^#DCMlv3Cw=F^Lyirr8>g0Fr!@udEozF zI|X{7bv406*8fC6P(hK3Uw{_$3r3)P&rq$~>m?$Yu$s-m-UqMz>F~)jGxeotehNZn zgNb%8XxpN%#Bm!@W6wT*xsO@JdfD=i9a;O6i=mRUmL>5ht*?v~K^&85=Hgsm{j(x* zH`QHwvlNHVSj85CJ1F3lGUomz?X+||jNGh9f%JA(v|W^<-oWSK?@p^9crQ(ar_aBZ zBxC-V$%_o`SVJps-{`C@mRY}4A7*nD zr~{XBLIV=enfVchyvziSF7e`j+7485Dt~;M(VYQ$)3*!>*P$vl<|Y2th;DoFJBwDa-} z;+dm);(GiT^*VDug_!vp|H0l0s503Ldm6vEa<#`a$d-MvlpHO5Jet!9fw?ZzLFPpV zx@GM{DxzQJBV-dP_+r*n+1ESN$~!(t;xWxf)xWfA&g0u@D8Fc)&^&zt@tLB;EaWu2 z|E7GG*ZW+VS5v)@b0h$}0Uw3zO>fkbE;tp{!igrzwa$5JnM3)@QDI#9oufs`N2i92 zhhGb8KkxbW&{=QJrQmV`(@s9Dn?l_hQ~V z-p$fd=n^MVnQQ!p^NcOKZ`JtFN2p?S-R1t&&LF9)tA1bQ(pQSpOnjje?&TJ$xpywG z={o6QYcr-w0~d8B=iCZ(Bf?h;$@R~NC3}e@)(;Dpx7(xUtu9tHAKJh4y}Ka=tNqBW zNtiGau?ZBoD)-sny_Xhh8upi9o2Q%GwR9M-*UJ6a`(2xo4t#h0>2^)>7qTfdYB(+f z$`;Ur8Zjju+d2VwX5J->KIZ?V9=~%1kR1P}B%}gaC1W)8#9wt-;FXrvvip|-R2;Z+ z;1_LS!4i`gx7A8i#LNch;+Vtmw5NB}q3Qzp?$5Q2EJkPuv$V&lAU~f|X=P|5GL5JA zeCjo~61j^}hvgAx1E@nv_pNphwQ=Q=zK%pCG1mhMf~O6Ng>R0w^BDQ@I?HAQGvbD` z`V?c5!({wyCT0Xynk=U+WQ#XTgX}X?M6Y|6vS{7HRIB~qN-r{33J*U0jGy(?A;U9M zg^Ur7x`XE_vdxx$Q*-#YM`=FyB+7>Huh%O$KnY0=m9d?(y~XE6m{-?c8NJqAG;W$U zE$ysZzmJz1SLX`F-mbBiAJ%Y`FY} zYwdtUNUJded|sM4Sqr&BWI0xX(x$k5@zyI^*Z*b#RH-b?rSjy~{Wyyf$kdO!ln=3Z zFf_g$f3oj)8nn{Rv0h&Ps%U!bz`=m|CmEK<3zPw`ZXV`9(6{{G*|tEYPaWZPLIG$T zFo=q%_uhrf{j?~1#NG#Q#7M=`HWp>$Dg1=pw=l}akNU9rcFt&58+_Y0)@sL#LVb2i zTOW%HxXcg7XLW6z8nsilb6^idMbp!9ZkVD;gdR3+f8Oa;mWVNG&B}e|o-a2gsbCd8 zSB~kY?KXI^Ikr;ss3~Fc{LxV4e7SK%UL*{c^YY1KYJs)OmGX~>0ZtdIcw;{rnU2qs z27YKS5L%o0KAYIh5xpn6_b!bc&m`1c+5oLde(8j6LyG$oza*zC*$aN0d`o+ia={y) zyaVsVG3r^O6v}X4bkK^|n;qJzIC@MLp6vOiiT2BO2p5ZubBM~n|oVCwr+;h$yq>tVR z`nnM{z;?eVjGZ%zCyBIlAM!MJEAx$+J|7`*H1@swFrI|-C_=+`BKZM%?e{FDLvAoe zU{M2A$SK5eIX&urn)8=tXK>(vFd3lpPMeey8keB{-xv6>aq~JU0x8RPgg=EW+aqx( zQn5ba^>IyR`NQ|)s@uQ?usU4gylB7@vnRVcOop%x*vx6A)Qsb&rY~M>xv((4_kLgn z7aw-1$FXryAJ!8@T{3*9=;|SiKvJwY^S`4*&Rct$?tM13C|6_nRrEL~N6^kACiUqh z19nUqD}|P+_X5masx$5~Dm<9DKVP}ecv7-^h$4<*iUP|w*85a5DdfA~_Tz^v{7k60 zA7k0%y40-u@g!}Y!Dh3lOCSp~F!vW7^hvY}>snS5B>|#8G!04A%uc_b`e7t|9x9RT zdXvPhN=YEW`55LB4TOzd&|Y6Mhv}HU;k`@ab!AnU7@dQQT1!rn$;=M*g4(+aJafz6 zszjL~FRvNe&h0r)9FUVtmMzg!fm|bQG(~*OLMDFN$<7^xg)_N;C$5R0f zC*wOODrzIVd#y?}<2gPkPYZCQKU67Pv?|=V{UJ@pQ!UBgXfur{_l#e1hp&s&k@X8~ zCu`is|8DGyg%;V*!%^a$`vH3=xuzpM*wriCdP^p$KoDDIt{X6WFBBMGJAYE{y%WqO zUXEVvU2GQuj@M^T9-nWxtqwwZZxe+f*N>`umi2JHf-DV7%Z51@kVrr!W+$p%RBIc( z<7zt|ZD3B-dd(`8Zb3-+xz%0W!PNu7t6OLg%OxEFW?zD&h1+oEHe2dldUG|p=e`}y z6tv7`_`EV4=ambs}&er)s`gm;n!xquaXWoT=c6CSb zWm&MvU@a74C@KEPrE}i=U|A1t9``nTn`zP*wR_eR5|aodwS{Izwc)>Obf{k!z zCq!ZGut|hvWG&hDD*y65Z1wpIHH9;`F}+GpMhIS=Zm^w-)mEcxmGJIl3IfE)lkDdY zjd6xA(YrNVAKq^CVNdO>;aBR~`EABG>R7uMf@AOLFjF^Q5U6U9d6|Y3j-_Z}W1&nc z#O7U&^_`NdWfJR4V(#5--U>?JP-USL7do>K-^Ly*+1;rm@BAP>Ots7&ucp~DqRibq z;zw0?Vq$X2=D2enS&aGUvW9qq;ay%f8X?s|m)}|s*Yfsl0#~9kOB~3`g!(`PtH4FM zXN|DMtj^&iO-6Fmyq7z5 z$9GW?)G?XZW!ULsX)Kns3k#v+F;c1!r1A2*#heJUdkvh2?f!Ib>>b zhWmxo%`39t{=lOnKRlC81MVSQvBwRH|aiJhCK74(g} zD}fA0VApMeq-Q_b8tYhgvM4V&i*&ir&`IX`&ryAy&uSqg-qInCMr+Iml0r@_+%=*g zm)-uI%Q|a~U7UcHL*7iGUe~{9L*#$CtT4!BV`wLYuWS6k=T|j;?mGiZ@niycE}H-i z>aeemvWOrRa>^cP&6(YP>pQ#6gi9e3qD~iMT;_(jYMnibmoicJ6goLhc5_lPRZQe{ z-~wKN3N6~1S4}my_c)fXW{qieE}Q!*zmT!}{%Fa2rj$TlJ>xh*8#M-Uoa38xYn-9E zLau_aJZ@)@3cK1A5_sgV?o+-U`TL6$-$RuaX!|{+0A9+jzf? zFo%#~@N3q~X}=^F;^yE4gIAn;mr(sm^O!k}qq-QyIX62F*9~#^RDR&4E$8Hfknzpb zE)g+&I}W%zz+dla^RrFV>n8Qa{dw1v@tbFKpRL1Qo=9D|7q04xOqDE7evi29tus2$ zWez#rKz3T>KVJ)w&{-l&;#c7^o<$;A;Jjy;_tfS&4=>&L-Wjb#AcT$}w1k?r5&DWP zR`$tdyQj5d|VjLfTL)O{cAb|r-s9jRB!%VCGw z)D~AWVE??5*I4`L&I`O~KdjHpE$e0Xno{1=gX?RtHRQl9A!%E`m#EJFnzAM3y?!OO zwCRdhU3T@Im@n>k)bMu>&b!3$=Q2?dFwsQx2DkuA5pA)(hd(n>;ORqZ>D? zIXt^x0;~2Eg%}%+YcB6Db3>)A{2ktr$WJl1L-;Vd=*r zY@QqNnB(*LJE%Z5wm`o(k#GI05XW<`=K?>h{$%ZP7f(ez1cS$6V-(}viOpzpwH7U- zdx2lC!Gz?;2TA61kEz6cN#jLbD+ftJm#-AeY@CM~xUUQgwdUL64|&~HjfS@gNgX2H z(zVaKMadv1{PX8RHkKw|QFY7TR)LdiSk|I0;juoVV(7;=YS!KXCc2Xy#8%KsZdb*bdDUC`kIE zbd%(wqYY#1XDJ@aJo4zNUG6|Z#@L1@lSWQ@ZXHsB7b}L8SWH_6mreK#{7r^$ESU`(2gK};_x)ui7Tl{T-d4D&>;e3ogJO@CqEj3Ir z!@MrF*f6f}=Xb5)eB>T);Ov*OzN|`j2fx^L*u4&8~scowkRx(DUxUwaB$f~jDd6~NbAC(sve7fIwi#v zat9O?qB){kJ0JCX_*&co9}G^iW?Eppespi>A+|ks9^vM51(UEoS-<%^>13XJa|8ws zKF1p@o%qx>a<0XXgVzr%U~{N*b7ce_#0NN3NUOIv2qr5m^Noqh5aZ|~E&6z6PHPUb zE7WtamlI6Zg8X=`t@1+%c+9G6AK}y^4ODtxPe{$kf76uOY`Ehn2uARw$8N`Rei3-B*dM#Q3RI}gI;&S$W!n{gF zB;||C=<@lUwdMQ{%OSZlQcj*p*NNSC5A$KpxvtvQr_i|doSxnBDb9s`ZsaMg2|-Pl z^u?-owZn#3-#^-hvI0W2&gfBseJ{s-ZqOFSdI%r&0{Y0H_#pgJzaioZFFuwl3Wxsg z*TD%1SA+XPK?iuaju9ZbZ72B6Mc5ktr7$R&D-c!}g%iN{aKw_U@+57~B@n)Q!A;{(CcjCLRvcM}_S zjOu=^Wbf4HdmWRHAK0L^lDP7a(*~BLE|DZRPV*_wn&Lo~{ni2>rOwN&^`UH(W!bfL zRWml^RpO`Y4&jv{jry(Kk)H^oT@bZ-{(YFqH_ItXQO$tF7v0Q7T69K@7l(3 z*PIEz{^*ypDCO2N>1X4dcc(L zR}C?{tCKtY$E^#E8$587soSSl;y*<4}^Xnq=mh$A>3jB z0r)MM6RMae$7ex%;$Ki?vE6DH)eOq{^(w< zRrROd(8&#LLLNQJM3D)%1pbN0*a@a>Ftls&L+1Q2zLsaN&>`y$EsmpY_NmXE(M(Is zbsR;rxpuWOLQZlVAA~C1!2lRec{uSFF!bO+^~>t9NI)pzG0*>HX!&n)|ME{Ou@STq zJHNkXx_0FuzuOWrYM}4=QxZ^CU+)rG{*V&@lP=hl`D|Y(EqEVP`=p761c{eozWJ_? z(y~jvk~V!(pGIBdcGRP;^F!!Bdnepf{1-9}$!bsR#`Ng!lUSnhPc}Wd(|A*Bt z%1XUE4Rqc)y(RmXRSWb}6?3$V{6-4H8Z|ASGHUmD8M&|% zh7pa^tef!Mp)R@AC>}=kh+ne}H=C%YvE=4m%SmSYTxEegdfKNP00R9+Io*2iIXo?6mnD`lFLiNhKFt8ap`_bZ;%I-bUSg$HNgil|krR z@US6GENC?(hDeK_1@uKJLbVcNkS**ewStyi&XF83`ivKy^G%?zpSTDRVN6zJ*Z{OM zx8A#_J+CNHNrjQvRF*Y*y_X zazBO9&B{L{kZg)knBl$Lm?$rz&zKnE-bYI0Z%h3t@$@62v zx9Jgn<3LhnEg?&q0`*uQgl7Y@zSHnQLvt~Q{Dx`B*G`#oT5|)5w4LKbkJ$Tddia(A zXh?iW@#B==w7jzu4U#8bBkF=t|89`=U5q?uCXkXc`Vk#}{=~j-dQ*Jy2W|wDNN_Ko7@z6ic6HP(rP= zW2tNW@L5Zlh0w_oz8$E{7sr3``PwbGpd!R!H;QuEm%+B*Jq9PWcl)j5j9Wz#Qtz_A ziB}TJs-}u-4zCx#B&bVWU{-^6 zu2Blzt(#b5=~Bk+^zu78)tpI~dT?JfB+>HKt+Oq1!g2Y@Uqit<1bNd_oC4N;3Hb8! zmTnyO7mb|TV4Qk&DkLB;)dTZ~O#PO5W0Gg30xZw@KIS9e;8zdoH)e7JNy?)(Ib9^QndCPB}tGG0cu7n9^&eXIfT7X z5_YeYV*Ns`rFIuav`I@PaY_kjbxv!8Od(lhv;yi>T>xlNeOG^U9!?*J?GP^3)`00 zDM*lJ{dgvMy5pRY3lMzN@w+#fQ!-Md1Xd|<$J`I}@_npeDtLyfUrNp$yU**(p;K9BGx=MUk%3H7({!5!Mp?(d)3$08;^zRbaX zd#L;?RYE}KMF$>TfQ{&P-zPiC`Z!KtB=zN!Lt`GK@VNL8w9;Wl`4xFwI1BaYJqx>1s61{ZNYnLQig7AOpDprR!$Cz~QZC*nAq z%uYb)rP*|FEMHuHf~gccgI5)|-r^>3%>Ve3a9-X=D>)fdjQY;oacLXryCij!RKP6~ ziVt(q+fS!J3-H6rzE@sKi{2*=$4xZfG71?nl3+(%)k(CSU3ea{RW=AFo-CO?65)D{ zuPrqzR`>-~9+u#S4^xHQYg=M9dxk)0U-gPJdyeaqH~TG)mx%l1;hnXC)d$j zhUTMzj=cY)T3H8P*VustM`^|Bk`YGZ2oLA!Lf3jw$`)Q)s=M3ReO;WTvw&OAMKN|) z`WhAPKY;?3!8f|-K*0TJ0OC`&qZ^#!hsK%cPeV5T9-9pqqpW|LR%DOFw~hSu_pe=P z-?uBe(QOiayFf%b2#C)smO>}rdi`)+$C8gEkSfERUm}FAWJrS9B5|X-DwEc;(tJBR zMkex6Y9M4M`5l>j=2>$948~zzy-zkq7D9V_^W_Cy%)VsAw!yqQ!8QzcG z{&E2BCPi)`o0+#9ry`JFdMDi(nJ>k}lAUqWu6|!H!Mtwhd&_blq$6J;%c4AxBJR~U zo@7Rf)ZjO;F$I>TP4DQ2c^y}yrnEw+oU2^q ztVCM;X6mFL0GS*4>8ZaI+Gh1BxHp4KOnQ_Jd}c6@<+a)Y~x{*Gzq& z4)vhJy3%-Pg9{<$=;8i#6w6D>IRF z2wBB42;{C2!D1gt%ex$*);f>J^Gm3bjx$S1u?-`mt6uBSJf2mP&dG7 zp7EWWq_lL4>C4Jwv>GqVL-UOvIN*xe!b-L9GQMMa&r6IVcmRs3Y5&{i;(qJ zc1XM{k1U%Nq}FwIAEK1zhf7*l;-nc|`9KiH-?!3TtzmN55YN24&EN&u8m+H*W?j3hr_%3|T?Igg!iNj}dc<@p zj40T{e&sqMU5JNa=$mXOE$nG2)?@RW2i_Zkt5=R~Gk74RK@$=NK##EHDn0>Nvm~dN+ zZkg$RX_h7z6&>?_QBn@cD)(y}ghGdRhCle}#vg5JiyET^v!c-qqf zH^K&FM4oS@xT9GV0i1^&4+TzvECD1@bu%Sz8f*aipxpws((7InAbHVwuiZ z8~b*r%TGuY;hsbh{K{wsZ>!RY*c37+W`o_xhkc%YH&;2}A zBQ1_Xf;}}3LFX{P)7iCvIdL0NwoZCE?p1(UXYZk!RCChy1=(^&oT~nJA)mI}2KhOj zdlh^o&vO8(ES532hlWXLRQFS^1gmx>hyc*0G;;&2Nj4(np=qZ+P?g-AxJPGc_KHLn z9IuZ$mTN|c$%#GpByAFVSYNl)zCqr^5C}%cgHMETI3TN}^x*00u~&Z)lE@s)wX6?! z>5M{7U>+RwXDxI5a7;1Ble?m886mr*FT}D1o^$5T(bH?6@f3cHYy zP$#ASpHUBo&$5Y-PFxdpUUj)cS9^PBK+`aeFMqhSTMSkil>_C*W@n4Ab5zsfL9qdN~j9t%|2FsqzaYSKcoCMH=?Xs{KP= zQ5X0OQ*saA){GVEfc(~r-SYO9YA_JCC&r0flUL#S+&#Ag36z{gUh^uZij>7qH$M`+rI zDG52La=wvKt4}|9A4g{6J=-61=i^%K_std=><&AvshXc*q65!k-3S7e@&MnCP1^Ip zy%ec1rD$e4Bg+}XkAppxF75DPlY9_WytN&H ztOc7%B&&(v_c3pZufm=qz_p`z^R|%f?4zcT{?d!gZ1$C3BEI<0OmN^vCn`@2-j_ZN zJx1H#%*UU%nrku%g;usPinq058L>H9le&>Qv65#_tqYu#6bD!l;e+WsW{1isZ@2mZ z(|VxYrg}t^;T#tUGGa$-R%0dV)wU02QP$^r@=AUMV`QG%M3`cuL(AK z7!+A?3#FFFOJuD_c7dNYJMCkt0C4c8fMq}OWb=F5b~NRu)0&qC3OiLG6{enOC;~RK z1{Y792WhaD#@mfcrS{bQP5TcK1m9oyO{fe2V!`;rLjCL7&)Dt?I4D*_IF&`XfJ*{2 z0n9#5@${e>!=!6P9CY-k*RzxHO|*?5iO;5kn(xki7hO(=#tds+%_hi;pVFYzt>5lPncx&22hYD!* zrD!0aW=3AdMdO{q1gsS(n=*XHW}lsPXg>U9=EkCy&7aH!*>5|FRghBHF17GeyR0_M zmA$Vyx&aziN_)FWmnAAG7DjvfqA#pW0W+h$(+`7Ul^E+wH+-CiY!agsvJa-5FUbYc>fP(*t^`c=zIEY04X z#xfm_eMR5w3KHyzsoHO^)$=$G+SOW4XU$}z^}uIiHrme)%sMreYoY}aH>rtJ37>r% zp@JZa>AwvlOXM^5QW}GB8h*Rv8Du^g`rxb?)D=kAEV~$+<5rkL=`7$~6MY&z(Y4-+ zI*B$Du#36c>gUg9WV^_O%YX{gM-<9dDU8tLE|8OLAYY87IXHggbDMJ`J6b-rkI zfF;KI#P4h{Ya=8w#$l0({*ImzfQ~iP zoiIk+)dPCoP-l1e1k0EP!9)N^mA ztl+H)Rvj!Afe*!i);<%lQ3i@CFG6k{fZz9CI0R#H<^E7S<+vI9;0B$hxuxP<`~j*b zGc#(Vg*P*v2Bl8%if16p6EBRyp>oT^9T-RrSojQjspWLoHhi;;5xxmL3;`O zKh%SUmF6t{e`x#4fGC(QTtrk5r928$n?iT5imKN!* zrCIjQ3NP~A-}nC1U3O+pJf~+w9ICp|CT=4SE+x<#i#_pvL?rqSSu%%5y$Ho%ovo!M z2B5@irwI>Yjjx;zRg_zV-WV;ty9J+ZvS(V^7Do<^)iKmhrM6F1?EP#Er-W9$X)1(& zDQJ{AvAP@&5o2~mHTTw;qrE?*Mb3HY9_eL7dKT*|tn-=Pl)!F0-K|(Hs9}^R)`BBC z?(unR^&5gI8xQuvCKQvf!VZAeQAlL`5<)-ab8!`;oS#b6J2iKUmnYAi`oYs|(2Om>AyR^9F zz~audzo58oabqtmuK#})H}kPb^-TS)8yrmmj$#xG+L~Hgrh}X+=r4Hp%PEiHa}Tvs zV*qGITM|@TtP(M=K8N5 z1`prP|0@}T>nNZGU%*>JSB@z`!rx6zFH`oMhX^0T@taWVLCLpAQ4P3EV2~MpCIY0} z#x^^5(3x2zXU;;S&v?=tOyo(&X$YCKW316170+K^AIE;HQ`*y+297VlqGbQZ_kGO1@fl< zQ%|Hw1e8sJ*C=>eAH9=BKAMmr*`tyRzt9qWe}A*LxDw_;?J zy}t$0ipKBvcM&n#Wyt(szxH?K9S{9$$1E}b{xLyx<5|gPt!isG%SH3mwKn|i{Wl^f zgdlD^io3nA(f2uUx^YTjIQ9zy&`iGE2TAH~^l3{Ndc@jJ^r>D5NH1d$+)^W)D{RMG zpO_`or&7ar^xqCLX>S}(Ac{Cu<71?bT0c?dy86V>4AFcifUK_UG-gBuu;ANKkoD$9 za9m=5S_;8D%HOj+CMz5(mKw9qo2{kBom-1aZ03KWMs8N?NSF5uPJg#PXm!pIbp;<1yCq zxi^oVuc<5`iS+lo!_+^ssz$v|2AZ8EOsnDM44CMpI_FzY`vG*eq zDlhC~EQu}-mC*2sQp4~iNAQTG{y=IpmT#8j%_gsz&1ZD)ib#NxtKt{D-9$uMu_g4! z0VlcS`pd+NogL?=dthWf@yI-%n>^)xH_7o#s=z@am>j+)1NMWQ{{BOH4B{!Z8#-H( z?*YSNBN~6Pt?rGOt|vTJ8lCMK#~&g@#Ku6^LdH!qpDPv*Q~$=BK_1BIikT$;>4)^~B&d(3b^edfpWUB?p9eH8`G3+N z3nxED$t^cjK6oKOULqSO#9M6wje-Sqh@rPGc#Q7s2ck%T?>HjoW(!&zs$sKFKw6G# zoywS2d26-zQ?`;g!UV=iAIB6HT;)b27k#|L z%~rK=TzPgH5q(xrz&8Gr8BUUU%AZs8#XAF1W2R%R_$iuv(i%QXCPJft*awt=j}3wr zt#};raYdt<9M^YA5cJ{H%!5o|iW_-=&>~ow@;^3`{bWFz{6i?>lmnsL_#nK296l$M zn1~UUF&)q)L)tn&XCZcS$3)#i+Ecg$`?Jw+Bh~|(lP9~c2=$YC`~;V#;(PlBJ~d?Q z$s`?^zt7Oppd1x7sw>#khqCV~34L*~TNpGJH@Kjp6BUu zr`m!TY-ye7@Dgq%hU|Hq)>=0ee;*z2)e>wv-LR(o^{vF8iCoIR(6UP}3I`yAC1ZF`7#4PR6*rE8qKMdIFc zoic9W&lMGkuA3^W(I^>XapNEjNvP=ZxAoWxZ;f z6;0?F(;CFpF7Nrs1U36okXU3iBYjt2QZQS}nxZcjp=K{{?GFm6+c%I)9{|zyw>9+V z^06HMMl4fuU33+7FE!f%Y2`7A2hN=<{Tcs!p+70|8Ce*a7&p!&J_(23zs@rXjMu~@ zH}EP%5fC+i02aTUefJUbHR$~DQ6#=%ACL)7`b(6%iJ8j1BAGsZ1%b&Z$dxVJo)DU4 zrfg)7+ms(*hJ|Qfd9OZ4Kr|EiVC$f1!x7*mtOnM|_KWIfeD=ZeC3*0>3(>szt_K(G zZ#PXcNd8Zs5lZ2))s4jci#h#8Z6Ntn5J=UiTiS3iKq7H5g&zB zFdBn6#kk8{sUK#T2_Yd|gFpliKaBSc%owQ|mK2f&?UfYR|BmRvbRnX6PvSihkan>e z-RE1H#r}PM16lPG!rFHBYibqbQe40dO{So6al7$-HijTXZiPydx*Ks9PhVpA!5*Z0GoqkNR`ENqt&P< zM1|~%7yzwahF)qK=$p{AiXWuA`iY+y7sq&Nb0^}1unK0QJExkxlE&5N2p)Lkjl8vD zyrKHS$$?1Ql(t?9{bJa!GEFu?%7%q6>VvWr8n5j{p zLVjQv${-#Em9N_o-2ZnDMFVh*waFVe=pJ}v05Ck{O+V5+lAlNpGorYD3}fL7;P(bb z@^(rf#g$MC$6@^iH8WlWLoTFFvJWu2n?;@Om&@@d79v zF<8TFbb<2$A};L*(yS-S?74`58@S1#)Yp$O9(>7-W8{H}_jo_wT~S++^1=fUZ=}DC z$ovce)-ReMrE+b5vED$Y(pMN5?t*tt0i}NBe|>uc@yj5>J;AGu{-rB5_J!bLQb@8o zGKFAV9s+!pjBueF2slx|(9$FS7D^ZXj&ijHz%JDUxZqNzBOsUZ1HtXGx0K-wwj9Tr zlBB%06*5Kp3mCCcCzC8VjZVF`gdWOY6_4C_OK{sG8wi!^eddQx|Dz<>1Q{(L6;J>B z8$fVFf#6CiPCi!$=cxXZ@&5W&O7C65hzw?GMfb0C@+%!qp1pt(?==DjBoA*|#Xy*Y zlc;Q>6E_R1D^8d~J?08wuAH?lTLaNn%6Wp@`C)(UFVUQ-{7+4iL5xzj-J#Nqb-Z^x}_`;R$H z63D7S!U|FC@j`eSR{5!CZ_mA$xT|ZM4!fOKqgpf02HMmhGAiB9PjPl{Y?3Ttd(I?m zZHL|((3`~Ow-}qShx)H2ad1a%)SRv(2X;dtVth#(iNC8}6D%>-T04bB2~My%$P3Y~ zi|6JcLvAnaAfi0vApMw}cN-yqIYyXgZ95c|8u&6lm4QAo4<|5BJZLrQvSYsg@yZ3k z_#iBYQaOW%JTUkRGj-JFOzFXu1R6442w7frLaS&p1(Ng*X_DNRj!JO*>M1JQ!w1=t zkK2&|Gd}>|%(T=-xs@d@cq~1L#t?eOw=dK8V~DjSjkMv1p{MW^GQ?r;3At_?mEOq8 zx1PCbwOCJbMOwr(pv|gkexDGK2*Lq#Mfkjp^)Oz?>;L|ZdbJC<4+WI+uPaOd8ac5f zusW_u*WC++zNKV&6NwCi0%O=c`T;0x!Cfd1jJmJ<$9LhPOoW)j5*t3H&a547{ubg- z`lajRQJPg@7#<=nbzyAJ+%VSX0Y>SF?w0|=L$(e%Wl0A>T^Fz$gTniQiL!y?^wPC8 z{C@!7j_ksw9CCR=>>eDHUIYa2jSczywT%Vn?$V!xIj}#k3iZ|%H2aHAcAKSd0(#G3 z7cO_6=JIH%iuI{9*rbR{v;hcjr6{cGovZVYZwpYfHooaGj`6t~tw1i14nVdc4dPLV z{%YPq9^+i8>uF--bqxW7%76V!qio~>MxNSs)PL$lCixdm?__O11nT20j04+&ctsOE z&VPE_+piqQA)3S+54K`G6^sJODqRHAwrX)$pdk(T5j#>pL zN-RjLa%7#nwbQm~E`X$LaJRBVCsU0EAQugK{WTZ|v`fRhfR$tLSiKop2ne9ye1ual z`OyWeMC00n@5THJUa0R{(ulx+W|-nKbU9NR*pV6jrcjTOdC%kRqNScR1wJw-sR!NK z_M!>#V{Zf`V-W-)Wz^1aB^I!WDkLd%u)sg`cyJ*#PW)t&25+zZzpDQQhw$F)eiB~8 z`us=8t}a()8d_|t@sl0wun4(ZsH8ouX5O8;IX@~#=j2<_s+<|veG&-wmiec7VEhML zi`Yox{=(t54$dK8%K%%8fR43<{;7AudL2I!CaCv6JhDZ;&@op(vO3$_+{_+V3K$^% zmq*+$`xHr$IR!zCr2CPBS8qUguELi9Ky{u-a)!*j&YcDPV)OUuCu!%4=3DVXfgRHR zHeWdZ3gaY-JDG6dXwy5k;bY!fno(%iOyb&dpwlN&k0n1AN!CCHQ6INVTqZ5hI!z#U3IjN%|Q%WD}$@|3ON_ZPs9(hTDU(a?O(xe3kcn8glHc}3r@LU<<$+Z`rkqCVwJSG z)c_$R#`ZB^51NZpgSh)(SJm2Vl?k z6`S%3h0uDXbjEr~pb$yEqp4(Xbq?2D6PGxXpn)`GhC4c(k(4B~O8-m8ZJATFKO+V{ z^CiChXHUO6YEcoGe-!S*{9KG?QPny~2E>R0NCa+^B#jzP-OX<&{5(_{ zQ(ScDM8k}OfztH#4QDL$Yf0ny(lIcTGSEu1V^WA0z$B@&XFy!s?Lr@tJ$wvyyU?Tf zW6^kDjGWwr@hD9kH&s1H;p1HWuRd!S70n#lGHykXuHs>IuDw-~Y&?$K17el}#rh2z zNg;Rq08ya^q0v$PuL>g?#ZS>jKK7&Y&q*BJ!W+ zNZ)dsCj#zo0D$)c0UmuIw*7Dv!dz+n89nhoL;ym5nS|L?LR!gVFEzSzB}|6=7dB|; z#G60_JOBfzVUjRH|Fj=bj@xN3OD7Ebg<*N6R!sq`WP^0daiVo!@}#9?0my4eSD&1# zOO2he29Qh6Sbgff_hu>8EyP)y1T<;McTTaQNx7SOk}34BeR@_wYdXffmQ6Q2Mqh!} z8K9tdS2<@G|C3j4-tQ;^wP5W$LEAaH)t$!sHS?ogsT8(4(B)*U6XC@NUt*JLgmmgZ zEqZsQgtj?}OA6`Mm}}@Mx#t%+K;Btwk_tT6kq9YoE+7ncZy+*bQ#2&0spSz(o`P#x z1tH-$>J4NZ2>@Y4w42$i0K#04Rd}z?4)@1DU3!2iz#rE$97MZP!JiHn{`lPf{l}id zDcAtSMubvSXwW&G{4;g(-Q+j<;bOA}p4lbLwXuz9wKSo26GR18w&#%2%;iC3HOAU4}T(D!o~CJis!`>Jg1=#%4y%t+CiZg z4?f3ccAnEK)v@L8;{MTO|RC2Od2LPNEnvqzJfX>bWTHVUl+{7#YH^0GCbHRM-{fq&DK$>79gLxxF?E&gKatXWoAZxvOo7B9rjpE?kStQ>zkEWmj4kYerxR_{Ly(rt9l{kjfECbwdJVfp2hPS z+dn-qgt)PKZfaflaimjj3-T+{Lh9$-JoGRJ3S4pl&6pNJ>)izt;!qRwUu9zu=6i%` zl;solB{kX)c3pgrU7SaFYWBeeE`RuD=)3dcj7DB{CY2Oqk{&6HCmCR;PUsW*qcwaE ztTB@Ct<9a=*r~wY-NYY*7DW)YNK`qI)JsK~mTP5e&#d`VKNm1B=GO0J%aDviwM{c;-ziPCTd02G2xtuh4xQ0zZcLA(wW;~tklF#$rw zH@^nOuYf|4^a4Qz&**Uxdt~8R?2|M3-6o!G92IK^UXHV8>ovYIhYi^}iEi8~M1~aF{w!wS zPfYa}UY#@spWhEej)r9_mCDg=ty|NX>bvMDBm%fbE(^2nKOS1hXW5YXJ&6i6p zJiJ1tiSIZjI7?2cZ|+V<#ke(Y3Rn%ATc}7qNhspb(fj6Z@J7Umqo=_5`0buMI>|<# zt+QsEberr!upVQ{F9Z2{7v?=J>*m&)`jN55WxNb=R-dfI9Ne>(T*5YZ@1|$HxT!h{aH?~b__m4JmDM;(DZ9g~WHUmB_ z1KgJJM9!-UfDnDPh}z&v3W4WeihUWPkAe_=a62RKUr11XVav>_R0xn>dVn~kXya1? zfnM$VoaR4`qVyz6P1`qaG!>#$?)mn=a-;b%ak?A6%w-(I5@agjUt8b5;!4?Xs+)a- z-_8HKZEWhx?{%tLuvLkA#jR?nQ!VoPSSL!Nbx7TY){9`0TGolBF!P7uah!;CCCWz{ z<1E-y7BiHLOwq5tZ0=U1_%5^7vf?&XsS47FH`wXu%s=QBnlZqRiff;PNV(bFQXS)mz!- z3nW?lMRIq5gOdi)p+p{#S@Gv@mteugDjw-%eK?0&I zCzWg}B4o{>tUm3x=@i|3E)MC!z-Ezt)Oxv-bg(&L3m0FB0$z*&%jo(X`dJTbfOX4# zL1;;9+@(!)?}^UKe39e`6u5K3erFt{hr{IoLwWg>M*@M@W|vEk@;_di>}7dH4|r|- zlScnSfK1kl7;nZc8Fb%J!Xp6xJ>JLCHnYzcvF^C-xej1s+@VycnPeKs6RS-y8jqfI zdtZQWIq|9_bB@$KAvkSuCAi|(YXHYs8%eVO0eaZGgfe!r9$qOJMK*;L^n5AnEV)|SYOb*8?y?-RCWBCy z;o;+I2rI{Yg|D%kL*);COSai`4*%8K11VSb+}Iz=$Kec;6+NglbhZb-ic-WWp4XVr zuBLn24qzTwxZ1r4Z0NvGCH>ZaSn-0qBq1_NtRj&d19xh{LV{MZLUPz1R>VD5&&t`^ z5OBxew0=>6kBa2yUXtBu^5k|B`PkcIdDge50|Z7-UU-Yqxl5$PXp)lfnKyQ&gzxAu zi&TqQmecih89zHRk~oIKzK%G%Bu{gd_k24LV^BMX0VlR>TV`YnHsFSS!a{gTBUny% z>~91yPLf}ADd%yFJK>zz#^l_yUyLgj(*(mmab|F>E|djfPnVPciDMskOOqe7s2keb z>`}lj-l&33FDAsHZKJ+|4G9)i#62ql!X~XfiR$s30#rk%r8m-5^46-o^}K8+iFU!$StUfr83K9bbKvyt zl{%`wq>1OgsowPVd~m$oL^3S~nnXz%#0RP?yy>d7Dl0Visp_g<)&V*fz(dYKb~$iw3PVEi(n~=~nK`dtX}cx-y)$XFPpM$v zPf^C^LRDSmI88i%#FV+mZ`dz<;NLme#tHf0zn}u9(v1D>zkGI0N(feLa#zMv_t+*{ z5iFu>pvkq-@9)PSHXQA(S0#s;uY4Za>&6L_v0K||4XZH+qKJ;#kjI4W>BhxI-OCxW zD&kS(3d>osOzK_VmaBeOvfx6VgPmJbEWOe2zHa(?(Gk~DD%X?B+N~$4>iP-OTMcr3 z%h=dI;`Fem#-nZU0>|At^FCV5sTko_*GL5^ol*pIF13k@j=Vfdd!&=IPCD#9nQ`9C zME7T3e!1bX!g82zwWTg~zz_^SAf1@gxw(-VJ=0=fv#_cb+Ib#bc1^}GhrROt?8f4=voj3Pb3|-EJw0TTHJZXL55zpIinjV5J<8X z5Zuwwj2&4XHd#~OC5$G^~$b1}BT2uo;{?dx6Bg+Zt zLB_DIOz(ff2~Rs2(q1OO3qUbEV^%@)s^B1Oc98%dx60r|E=7a`Wx^m|-E!`y@dP=d z2RIFZ%J-*lN(2yKVrsfMXBVD5#I66-gEWKp#vxXT0QdQ)IhtM6aSw)F=NDpbW1XeQ z(=@FdC5B^ok0)YKK6CTt^|?m4s-0DQ^g~F{P&L6ujhk|QrvJ>8Zw4P@&AdWc-8&ju zY5CD=!un}(syJ?gX%IHl;VgXf z{bXwrvWQf}nobrp-rYC(EyE(Y-zxz;gIIGqS@i21)n9RqTGD<$e;&jHWq58K6q9$j zJAYNqtXb)7TgoVz1flq6*LR6QLuJ>X%z0*^FC6UL z&4H~>J4tftW;JpCUjZA(#!H>3N*-IJn()Fdm%cmDaIT}w)qkG~Pi;nHn;FAyGt$6& z!7J`gsut%% zwm@;Tc#$0*PHNl>hr{8TchmvkmI{or$tgnqMXXf!0)pRaL3_q*y>Zf$6mVAgEGMUr%tRs=9`$e`CX7|bM1fqS`ncn~`|Ilt z1_E+u2)9pN#cPL$8L+W9yzZnY&T8B|8xA53htC78T*A8wLfb(0nYxRT6`2zo?(XUv z07{B8o{h{mavxvtz;4i+O;ZW6vTh*xIlwtf=O6PR7I+N7v=a# zTh8|f3W*$DrHRZj!}`XHXeq6%%4_poT_k&JIns>pfuZ`*_N65gBuKJZg>uH3n}E)j zqi}K4X-!J5s7_WULoSh}0#!qe_Ra+rI+(N$`l!8dSuZ0=!kE1O3+Oyb%8$U_crs`K z!BWAI)k{CT3J<;%quPuYo+;Ki_ zU36aw+jc0a#wkt?Vz9BGAN7xEOPiM=_=}|=EHPv?15N@1JaTqSpMT*9!jeu7YS1zB zpX!0Igz_>hIRs(J9A|RRmG&ibU4|tNDtD1y`hu`T3034IlF_EKiSSs|#D54na0kV4 z2kIpoAKv}>tvOWDOyq=a_*B59)GrMBdXxUmYiXT$g1Zb_iEmb;T~1wB=Z{!J-+I0_ zV5>XdJN}~$nNRA$jk;wJAd^v`xo=()Zjxy@^CJ$lkU`&Ht(qt#0{Y>jI)S6<4B;?3=5Ta>b9KRV1)BRu zvhu%*HAn70I~H6R5vkhA>b9$KVRVc)dKp4^(5oZQe5PDEW8rN6B9JhOhn97+Od^ta zgYT}hjAtEl({$}?i`VZXnKyhpxgUs4iNRwXJu@aMGz-uIS$2%Bj^3U^(86yBsFE~n zam%&kpgPO2p)vj2r9ls%-(%xijIUZ=+N>eA@8^EEo@LSb{$QK1+3NSq?OL0KBf2OZ z>Y$eHIq66VZFnsWq*Qu_SXMz{a0qU-L0FSz0|aAWEClPyr-Td0_GMvI79;ia<41)n zxj=vq!NVK37`q}S$$p~2QTZjZP;>t))_QkImU9;N3*_V8N|4I6p=MHI+@F3&+2LXrk9t9t$usg zNB`U$Tk^Z%xSKFbkF)|EKTBX)C&jGlJD!WyDp4p{oU_8S{4=V2tRc^RRu7W9e=3?O z$~EWh3}{Gn+*iVpr@cm;UyZQ#$xDWJ8+;x6W3^G=|E*8+6`JDC5+(UgMvg998z0(A zghs~U1dsfuVcML=@iDeI`!4TsArSzc!+2Z4gYIh*)a6buTs_O%*DV!}%;xsHz!kVV z-1UVu!DMwq=bFOYJX#rZoQ4eo6*|)=oR8(rp8ja5(bHy?2sX#8XCAOGGYfq9EdMw` z=ypkjI%-4d>RaO7B{QfS3}j>Vx*86g3DI-q4`{qh4%Z^48^fm;s>9AeOToN|*6 zeB44ddiPTngA%D()HlY$iZmL5x!Ah!1#?gbdPX-(FMoEOhrlM?R<*{$4sYBlTnQ8_=z zYCmVbDB4l0V;2-rd!E5-oiXELuR!y|IA&weP~cT2hzCdR{hNqh3AswU3x+JhVWR%N}+2^~ds(yCZVQIv|*3Wcop(NW7g@ zaoSzJ$Q*#Z+kZwzxjAAHcB9)d2O+4G~ zO9I&x*V9R8TcbQe-K?LFH~;MUyD8JcVZVgp3UMyGc-cx*Bj%UY8sy-LLFi~+^InKh zK`8Ax~V+0Q7*DkRKED^JW zio8uB?%QQGTgpOvM-$D(o!^LyMVzdI(jIq#$b9YK;ESa~T5Gr8OSkv6W46xz-gR=6 z6E6b(q%ajoYqITC4(NrFBE^1!7lBJkeVu?*l!0EGEM^O7#sH8n;WSSL9beZxJ@!jL z(ZWbIK*P9RF;@+|2w{m-tUQHl;32Frs9G7R;X?9>kvX)IDZxLvXG_JR(8Q_w)R_JC z>vxh)=S^@a3Mv(S;_GzN4J}`k)zJJ(bgYiPs{z%TR@>Q*bl=#MKie67Rea3qLZ0HT zexLy89Q=x9HHv(gA+od2>U>PgvPwAR;C2eX0jtuFANw^r-@YaeOE2htI|JoWOn)kp z7AA^g)U7BM&vG{&6Cd*;Ikc8okl=>84qe4P&T*2;*t?*9E7Z!j(M%U^x#&t@Gk9M&&i znHNn^2XO`Jh;^Fv(#OA!+Xk6r1^26WdpF(dbrpe?daT_X()ns;krxjubxGP_N$89> zOX=`*!RLIaH_FtuD?)HR#cE2u_xPa=$&Bi2lBh5tBl4f!@G4|nRl*P`IsU)u)P)27 zz>;FfE^fmbCiY`I-7<_+O@T;dwv{S^I)S7Yd|wGxKAsDb1=WW5y)_YC18MNc2+{t^ z=8f3Y>dlmM9x`+jPKT00vMGwS+bwHNkeTz~@R@9kev0RD5uJStRo^{{-(vo-jxadX zpxS||;Rw#6>fmVKB~{lK2&~B+p?A)Yc=g&0%BH3-Y{J@ktPYc73(Uzs3eT(2+i%(P zAJ9?lKhusn^cRb@e=ZrUEf34v8ARjJSc{M|v>+t+>_5sA1gXfqo?NAR+dW|gym!Mm zsX+r!B75Qo2+P8^13}X0xmFR>-18ka&7GdJv6;ux)nWxKt)8AXH4QO~>NUs%8LPe> zLI)c=F8OOJacdJvGpwL`<<$Uo*BChTNCoa~}wwE9ixt|Ao&R z7o7&SE?@x3?S$w9KWidIFnx_h~DJ&!`ai_wtgnY-0lVZ3u+a4j^3b-?cM z%dp|nM8Ob!iAjy8)zB7hM;E9OmQLIToyp2?4P-!QnPfYfY-7?0Ay!`@z?6=|fC_C< z)f(0on0P^wBfhgnPg8uhU{8M<8PCVUAwM_j@Ga6Pq+g+0PmxA`cWrGTva>C%V4

{j(vMaH&!^Z=?`4(8MLQ>iov@FadO}@CH$npW7gZhER(@Y zpj<2)?NF9H2@fv1J(kWicpWz-tK91D+`K4t4 zcme&s6gz6*Cxf7_CPF*MKUUn+Uv_35`_`+xUi?H_4 zlkP{Po9l^D-V2UXg;vq^_nXJ$6wn>(hwCk>L^FtLL)0O%eaiuwRS*dd!9}{fmOhUY?psz~Z-D~7 zWjobHHq%-pIFU#A2&CHo3rQ4tF6wnwvK=Uh8=|0INB2Bfo8FG*cbxn!SGzOWTX8hs z%#ccu8%yL#_q=L8HiP%-Tjrm}#Qw=0=A4~#VL;wg!CXx}PaRD+<@Ua{f@e1NjV122 z6Bn-3p-szt9V@nmSV7YnLsQ?6{^!%^FLkhc@O0C5>1np4XLLP{*JQBO6-MOHVQY-} z0r@1YzfMwXC~01u9IFtcj{P{^aB+9AxboKo5Sr}k@+|cBeHvp?YRj;6IzMZtq$3J z`}eWa%zqaKaF(p{ibF{s*YJAHK|s*UP{C((f8zVYKzIk2-WpfwMt z&yQ~no+%bO2>+i<@$>&O#SEU1=LFNGpo8u*Q_P9?w=OaL0xD`o_VeCf#~pPb&O_(% zYP3wSc0h4EHe!H$a&Rzi!_cyTB$SLy*^s0e8F51z?hL+q*l!LxQiVXPtDB&f**(D5 z*5y0K0KWd?9fF7cWvlri6|i8W14E?$)@Hd_yVCVp582O5_}aFxNqQ*36k!g_4WyOf z1)0sBr>_|TsN9x3jww^;qGB_IvW%RuqO&EIc5WOmSvrS)9mc@$Ek^Uf2VRATDQGVm z%Y2{T54y7m^CU9Sue%ztm)%)Y?#Kgi)?_LsS9=7<{yQ?k0l~BN64u28zQ$$uThZ!_ zUm55v_p6hRlO}%z>!yg>4Ap6`URzj=s;8s29{|TJBYOm6;tjIz6$&y@Lht+_Oo;{W zc=~ao0umg_=U_|wTQ`jrqk;SLh;s9i?3d)Zd)aEN0eS-Y4J&V6kw!R~^Fro1-BLY; z`QW`Qy;T)Y&7HVLdMk|w4m2`?I%+vuBs!hu!n|C#>3Mb&&@l}05#yNS!d*uRdMC8b zl(qS_yo9GvfOT2ZmSRBg;DvW$6{z6SE}lDx;BiOal$C zk)i>d|Hm8^WRskG0b=t$!}!m2RKO;=*}&4=GoEBcVX zreS2Fdlc!07Wc&-r=--xCS8&ERSmTNVNLq~|c-bm?j_p+JO1CZ!vI|bo264756VEUi$5kZToFj+S2 z$n>o&iyIvadYuPd9l}c%&<9D4^}@D4b-mtL6g0xP+t@@wmu9kboF1{D|A`{P3dG3d z^B6WIDToZ#$Y3-l8Lj)SihyWK2H%|zgEAG^-EXkF`_m$-eIR?PM|P!ce-$s{vJrRi zS+KV68!?h=bR4_boT<$dFu5{MsptoLq;zuCzT6qb2PTg|1C|2lfPI&e7RGKj_NQf; z#U49tVuH}zoty=FMD6p563JLTxh<{@rKp04OuT@{?XD?1ym2`$*H;xEc$Dk_4kB)Z zxr)JOzdS>(4-k_5Jh&VZ_zGsfHkObOu0&6G_+oF_haQ?PIQ$I54&;Yhx!gpKfPVcS3%0BJyjnGZ! zvcS4m6xL@i-gQ)k?)o2NgAQPuTQW@BF^5B}0f$g!Qn##Up*?X-`PMygTXsYGvO8l5 zB{bE}AD29nZaSt~D5ObI6Cba?D)~NC`;<3g|wc-ulgl@_oo86cr2WX|Lt6MybRK!F;bU^S+&5>-+xS_`}j9VW4%csMgB7# zX`9q+e1{W^|NVz@$@xexd`5AFK0QsQ29?a0-v~A9F~1B(ZoMTyzHTF8|C<$}lq~3| zOu@K1;?Kwn-PW`d3h)-z04wOtQnFpG57v>U05djJrc@xVL zk*o3q*Cw(6+Pv`LmXc7CL*{%0mbht;rbbBK_=H%b>CP1~fMyCO-@7b~JVRR1`$7|X zE$=dak%HTM`v*+^1b;#}r$oh(zs^bUD(DLz^@oqfo+jT1+!#*|(@ggLGKjbQEEfLL z|0RGuZeQYu{cTUk?FSL6VAAB06hPeI!(r3kh-FL26ve3cDW&>v zm_Rsr0nUypmNXHJOddchFtPO-1q4ad!jvf2C4=nItqbKKxbF#}Z$+m4Pinzoz%6v} zu_|?Bx-sG805Hfio>^kV@b9mMo58T&^rKfIVq`x{;f7$s)=bT^)kxEpI{=h9u6_v) zT7O4HK7$`K%MJM_nug9727JQ@5rGUN25^I&9qWq5a88}MXW)8^l69nK145~_$SnTHQTUDw2`{6`C=gA?6)F6iWftzaD8|O7WC$a33V@-; zL3{H1|LFz%Pes8Y;tB^d*^vmofF+uEy;k2*H$Lg{`xJJ4i4VfhF6H(K(h8;oM#YtC z0Ba-nmlyZ}U%*tH2PYi(IRNm_UjqMbfaDC>3u2EK-LAVq4>JaI*n86~_Iq!!APJ^# z{=}ZOo<3~sXq4Dm{IV4*Q-5l$J&1a|i=vHM?mq8w(%f#JBk|k+kZnR>WOdsSnkX32 zk7wU2gLU?n*6PJTuQRN8=21&pqiC$fa_xHA>cYe{LioqK(GIQ0bmi`B@i4KrMfKo9sEBF>wjTE+pvvb_ea@Tr)K zfsxa@B2IJYFOB~pW^BY?*tHlTNu9>XB=t}}Ycs&sW#1gDoIDqmz3siKZZ8c+LH~z5gn=A_b^L0_1z6zprOny49%UkZk{dn(}ook@@td zh?DL;*=w%-%aDsrv`nYTVPvv^f$`=a%lJY#Q9jdj>}NK%ISOF3=ieqiKYKka7FK+k zy^o(J@ybWSn2H<7fEk!p8Ni#Ggao{c=W@j#K==nkU=SAR(zx+qq;mhLOvSqTy=3ak zDYmw5S!44){mPoC_q+8%Qa=iC9L7(c;d}K~5tPxEAd3 zb$mD|^y-`7=!!;gSD5w-aU@$xSVV92@)^jx`S*sy<-D8%%pm4$nvb`#%g2?kWj^`G zE-?G$ciFqj492^SU6dbQkp&E{{q?|QY#9SQgyoPy_y5x51Ua{Wj145#=H91k2%{R& zsNY4SH&eEXPb~o0olVwG1T^*~0GsrU>hkO-bESFJajjQ)0>>od3$QWz0cP;IM%o|z zLw{2xM>{_==mrR9w2#mLLu?o_v38AYT~e;=Cx^l89hx8Iarby?BN!fuZ$l z&VP}lFmNv*Hoj+TWJoK_{V|+~h-M%@-8$wHdsadSU-ADMHx*94b&1;v;F615;*aJ2 zN$P#+i}54FJ4(#J7+=on5LQyRsBcVDI_Y_5=~DhTg=01E%tI<}>%U%DoEFHcm@4UNC-nqHVJYKolTU)%kK(VPSUNQ z$@)YDtqk@ML_B;t);UK6B!}0-%x#Q*FmTt^tv=q|*<4Z(YS&rCZ>u2t`~@A0OmEo9 zsgi{(%^@wAoeu@smp8|wGRLk?vi>@b{b(94ft5^;G-bv6h@zxo-OiSz9GCXI_>bkk z^DX9R4w)>RWPQ^2?^^Y@$5C6)n4y_3U)tqBHc(yG(DHRFVPA@6qtRS@@IrR1Pw=Vi z?Xp-4x7grOA^CP!P=mdrBR4kFRL#QgtQAR29F1MY)oJ2nq( zCrmPISe|LnA?dAV$r&M7Yihkh{g zuoTv+VO#D}RqIVBzE4sVYZbW3dg|lvu!q~_T>q6;s6G87ne1%E{)1w@r{ANe~ z-Niy8`?+M)w#;e7!?KQ8aW?|^Em zQ9EzgKJ@|>uI3CO0mrP>UDtxql3c|)=_+l>;OR7HqCk=f<~8OB=aP@dQ8!^+WCbhf z9?5DZ0iVbOBxCwOvrY--=UwOev2k%%O&`Dj+mq19Ye*xUoOtP^XbT5x_svR;sG*~G@EqRjB=&)H z<_>01nQfVHj`YB=SrwK{}Z$ z3$EET5r&y|GNuKy-}e2}!_MUx70(}|LQ?GqOZ%gw&mv3|Q9WKUceZg~)w(AZW&ql@6!$(PR!NI3Ggppac;Mzn+yUrl@dH-SBm6SI<*tGt> zmR)6l%;W?QCn>!6O5~8M*5z>TZ`tdW-V&`hu&jb|?b%d&{Q020a(}ra@1VdXI9P#} zMz5@eHR8`TVF%BH(N<-qsyeGrqio~_@&NIhKp_7d zSaA7&I3bas`4PzfKag z(Xq49lKp9*WSp@Q;CSeECZ~K3-7Ty=31KC;H`@7afQ3WHCpTGC)Zf&^h{WCGMIu2` zunR@ydqJfJO`J(*w7r%f`FMw6&=+?UPp_2Xi+NaSBMsW6(O!v(S)uk2fW9054jWJ^ z`YJw2409AsbY~@}IYJ$0?`$1kUO!j&SunwEF$1NS#U$!YUt%Hk*(pj3n)wcUL~`d> zo{*iTy%s%y>Fh5DU(C3iK2@GR=Fjr%^5Q>hEu=zh(O;Y3OCpWbVaAHm$rQFY$r4L+ zV4M~E!9rCyvAr}>Q2%qU3r4dLY49!UkqmQl>%a(s!01@p4Ap!-KKny6-gFiHSgyz3 z*d3{78~x-f3TuCgQ3RF-@82;wD}B0nE+s2Fd6Z<2HSG{Ll;rB7IVRMbFpF!*64!xi!2X1GaaNJj-qWRymhl(lDD4XM8w(6l=E8!7sF7 zjggOFZUKKi7~Cq}QOoc)J{56phgaV$pC58k_`t_14+jRuy`*Epl&an_a!*tiRzG31 zn~pQI=i6T@k;gxD@t3h^^WReF88x82=PcOyDhH=V)a?Dj0$(qOifk*i>9E59W}=us zV)^2WhR|f*`6Ou%#T@52jGmxL>y;<1?D^p06zHt3Cq3VqPdz(_$$#QDjE2qZ z?Ve*c8252a{CIahGe(+E5-c&(4Uw=jd+ACOojiJuH@M{6lf^A0)k%0_>L3jb==qo& zv6dk}o~=2eO)<>1=h|>KIRE-U%#~$dLrcCvqlJmRT1?Vje}Cru%*|1KlvsbShX~)9 z<&M}w+w>E%inYD&%RcEd$br`6Uh$2-wqN^VOP@{V&&(}ey`AY{skKmKaZRo)ijba# z^&K#k+&62`PFKO}FHR?>-YxbkCw1pz8@Me~7k{#swz%Im!?${qbf^bSMAPys8k! zZC}|C)%Ufnhyo%d0t!;{lMa=VMv!iUltg5p=g9CgvdtNC&cfRL} zcOHE;^b>b&vNVyArM!Km?{OIG3jx!YLj0zcL^He~ z6PL%ix2c_y6=#8#_7GBV*f;fBkt)>)X}{0%Ukr8H%^4lu<&6+(?^pb}M(k;HpH9?k7v`d^}h&Wl8?Dq@@z3cHp_P%*z zjUHfOo#?d^vL-!1eCeF1^!o~O`mvTkgYLXpWpG+Ei+Dz!`g$x7s3zzn@2`Jw#`T6Z zlo(5%z*O(sj~vk-gH=t-dZb>yn13Uw!R@@1y|}qu5s4?{y+bYzmzu1#^eG*r6(MMHSIynuZ1}!P0d!A~D z`4)3>7^Ny@?Jjup?#xNS;b7)u)z&i*Ydc7XuCe3}DmM{PPN#q3^P((6G;oOlC|^tY zsMkzqi~o~dyT7V7HK0lqnR_QNVs(=ntm{uiwU1!swuZR2mtN)_vW5_qrlsLFAJv^? zsa6~s*?V{O7ax%}rb08`WLS>jB3AQXPvXIubZ8K|&~ix~)o=9FNEq(<6Cs?6m6Tyt zEHaRqJQuOZ2`+fl5>T;n>g#Q{&QNEIxk2}=Bmi=2Rci*{nl3ICCww+BsEQ8FBon#}8f zv0^{68z0k;Ki5%M>xs?tfAq9Wyv!dcKxpcq!Kjr6Z^1qdOx$6ou~&})`kUBe_Chq< z^2y|@YU-ste-u)fZc~htbxQD&^duv^d%9`Urg|S2y){naAcZ->81$rBa%djVxXx?< z_4LGf5-W+1>XdRU#E2C(JeOJ(C}HSQ3Q%A83Ib>@Ntu&8aG(8qwSZClGh>xoW%6U2 zrWCV+oY#a0{jwf#@+iVZHnWKS^a9YY+wBUY)$=x>pQSEECMP(9Z{%~{n|3I<{mHyj zPMI$sUPUxySXUPnz04YR2@m|A@+adPk9;`}4QABh{f?aZg&$bh$I0)-cr&Z3@2UOemQGPR` zwLQj`KLLMsc-=+;J{niEXbf`-INUXW@oMWJ8qFx?caocuTZG0Y5M;dojndz?>X5^a zVak1QeJ`5QZ9wo+~JxZw#p3i6}xMO6S?qCMF1GQz+8;%9p_|UPEdT{mO>sb-Q&@ghTGJbK*dt49Je3 zq&tW1Yj%tQc@oFIfV}5_?K~iyQ7HJk>QOv+SD)-rH;^IyE!1e)Um*pkM#k_H!e`+? z)ni4QPa3U6wM*CG;~v%pCkDXbYugVKU{YX!bUx^>+ zE3K+$@ir~T*kvGSVV8Fz`tn$1dY&0j(SyH839Gj3MMudOrg zIZ~FJe?7&x^Ql(MW=>6;ItYtoIk66(vBw^Kh{D=FBY-JN3pkDkgA;-F@ex0tCXT0Z zuBOYi%~JGTAP@?4l1{Fx*ml0|JPVZ*9O=IlUd1m6=m;daN|@;*M+TD-T*yXg`DYs{y+ z>?BdBv#6N`<;Pf2A2-)K{Se{#9M&OZGJ%lnRaUaLC-5_udFlS9lxi|nJ zT6X5(Io45rbXOhwA66CaQm0AUXpQT(RM$xYVHmm4oilHW{FgU1B};D*cjiqUCuskl zJk{9maLzX2ki7&x2if0HVKv!#6jBnTuEMOz0q{gRSgk#~z2k_JQq`&*vO8yZ)#XCc z)Z_w^3S)Y{1@xn zrsJMrrZ$|KH1Ks4i!`zOYDkI0nqql1Y#hefNd7}rBc3?+vC0SzVti=tBZpGHIKpf2 zx_Q+$@wZtYh+CNBrVik7ZY@op{MD{m4O$j}yR93?EwN?*(Gp~ju3BE?E0P|{J{Bb= zUgxe~|GBDmF3sw~9K_p>Xa;G#% zvHvKaUvmzYrhIO-#)hb#L*-x`>ii3C-(+vyhD4G==CV?c^d#p9r_bUo+6vaF1!oz> zhzT?|yC~FVjJ4UrYdk`ByILL-ZHGF@aKJuYPvw5A0&%N~jCiutbU)QVxPSV_#6$CW zFLd04oJTq-h7h60Xt>Kal>ds_GkdT(_cpX@>tmHyMv3{9f2f2#_Y;o+9sT+ak4xcl zX5LnGs$K?7D!BY!9PsIl6q7>gFZGNw#qf|0-$3FfDYt-d%M|<<0XJuV|O%Zw<2`n^Y(U? zj}-${oSz%w(|=Op_)5<>W5%>TA%f6_4+K}5`qUwdpPPyG z?+1GrFisxzW+TL^BK2X1Dlq7V>(cs7&qyj&@EZUB4^yhd-FHbbe#9STP1(wj9js5I zHa^5#pcN0FZKIjg!yhoVePcx_Fbhy3cb9>>XiRoX{e$sw!mbY6Tnp>+;tS4xRNwU$ zmK379ZVa3gZM~Fhtuf%3m0js-O|d2Xm8W~(EQ+Z_#VeB2_{LOi94T~F1SxHe^C>)D z?*r|Je3ti2ea?84+osAR42@11t#_VLDmvNA?7yP!e}hvg&Kyh*>sO1R7NR^dSa=#y zd(46&r{1A|+4oAkWUp%s>StJHw#WT-)&Z}9G}Lwo`f4#Yu9@qG$yxUB3=(egc!G z52_KMM&R4P6)JpexE=3}SQPbEN9bXs!X6qQ@^fA=jmvrUHepm?akO6i{pUrv3b(IV zN=x;`*1Ip9%3Mu7ujyCpAct-MRS*9)yl~VtoMm#6`GO-rm}ju9<);a1tkZ>gHOFT} z^wmoG>p%m(w{m^LPQ1SL4b~aen$6SJ6(}8b*d$m^eR9f}rS@pDJ=1!$!_&??>efsa zUDI^Jh-*aEk>%HkyRFt5@MQN3osneaV#fX|oxg=;aw2zRO z;0^}3A_T$O7U5ZHreIC&IE&QXdX`ud(opb00e>YSSIG##mQo_XaW`J8*J8lGeX;!$ z3sC0uuaU<7_IDW>1yG;g8dm+!Z-?gvGyd_lK;ekRS!S=q%gr^r(ZOs2AY<>`dDtMT zyRg~uOq}7yagen2l0nYqdS<5P%FdPI+hUUuR`fS68pOM( zAeQVwO@wP?w^2*Zd#=GAI#7K^HN}_3@&TmsAhNNRn~hsWGG`f{jh9H8@wak?)ODkB z8s00d6q=iUmJ<rh zYfZi$-xyh5H_bBNs+Tiw+G>7Qkf<-82h_XHNZs5+t{#`S`l8r|4_?eH5=MkNYyHfb z5M{%J5#4A7F<4Oa@`)2Tl9uGUw}f3mBa1Psv26lwFn%^gzJh>6oijSap>vkmsfk6P zj<>gfD-J~a=@2Lo|0X03#IRKL;*n3V(S1QKxW~zHC>Nq6u>Fgh(N7ke&Zvv!0)TW+ zyiDd`ku$&sL#8R}>o{hxrjP0lNqZ2JtmdMqPBlu1b;XFmOWprU%$Q{ z>o-rU*iy1_n>nlFl3|o%s_Xc=4cNtjCBZRR-)y56!+OnoX0fO#_UoHws}B3gwH3fE zPO_QdQ$CjR<{iJ!tr-2IAb%yY3#71LbReG4(9~k8L&bo&a*IFWYT%<~HS+#~x55kY z+uXbbvz3n94+6IuRE`x+f)6DT-?wwVxP7@Z8%^BU*K)Kto#tDe@#NZ9Txq##En{_` zU^eF1IPa)T7iPZF3a?b~$cbzk{LhAZW%^D!5Wl7pGm4oyaG42^sQgmWECt<}T`}Ny zB5fQsX>7h|&IO{BAIQ!pj`&f2Ry0rKP*~ZBCSjzmo7Iy97&E79dL?m{!y!^kSp82~9&}vO&l{o0=thltBA{Y0gv+F_XkWHk(RCY1}OZ-NOnK0$06wR?@ zkn!BIDl@uwxNU)#iie;Nf3vFiqrk^8{A@0}smx@XtuD%EgpA~5V+Ju!d;AqkdSd%8 zJ4`^W8Rh5H+q>|Dq2OI(oKLsci8~H? zt(jF*Fl*J=dpXgVcb!<(&~S%dcv!KHF+d^@V3RN#W4^`9)PH7@Fpx4VqOeW6`WWsF zB+gc?9oYfBEgXojsA%O(W09G3m{#3tcpI)EH6Tp#kxDFIHJ5gmS?dJ;m?|)lpRH%^ zIeURnzY1=I1bpOA>^yau~Xzi_ctjc+=pH3U*3 zu6)~7egPAW^*^$hH__LryQU{eaD#o1pk%&3a0Gj5_^dBIttha4G_`>FuXNHnf zzcCsq`xeV`R~`a=e!}^kufN$32>O!HwCSGf>eEGqRHhmK6*6pcv*_)xlMoDLC87uTUk6Qu9U&U*0yMT$i6g!Jn;{@aG z?wyb*@WPgxnVl(3wj6Iy-h8Y^v;TD8E1K(hJc9t3tD$_3>$oJ3MgW)DI~K5WVa8Xj}Q5S#-aqRQX%H$!&d(a z^t1GH)!j$^$QMO$OXQ!zh1_4N+e027Xn31&Mk9$A4 zii(x8lvkhqKGGapSTfd}0jEid5X{c!r@UEKOpzOyOJT?;D#{Nh(O8Rv3sx3axh!al zZQ`%++onYgFOODa*7x4+br+YFYuU6}7xj0e>U)mD<+L|n{=wb z*sI&f{uo~0fuv2umUsJy)*nZr8~IoG^3-#I71w|GLj9noLRW`Pwo!BJ@q+%GW!m zh;+Kd`F6_(#0pw)R>cez;ko3SGeS2-tE^MQ1|4PPc^va?J)d?X0gtyUwGt@8@BWo> zth^gXhab)cROO~qdH*Buz*(vLU6uPDsLEyFe(}Hlx#aB8=T*7UET5PMqRwa)Zh&e< zZ@*gu4GuBWc>{L+ceA`ulQ{XL0Cy#6c;&{!Tw#`}#fMaz zX}8HY>8pI84hQbWnU*LKfVpjJsZew2EPGV>?si09M>gh4&U`=?-jUKJB@Gi;?AYEH z641pIR2`h^()NN_>(h5}$Io+WheJ39t7e1?F|K^9^brYCN zlgktiYOYcSxXfSu?%gRx1$F^Ma?9R+2`hi?S@?c)TlegC0n&XDqUo+O6Uz>EJ=)KFN<=q;e4!s058dt`5~cl|0AHD z$e!-uX(pJRJEfv3NlLY|Uz9OhU*wc+T1sotLgXxWaNRvgbTiUaH%78BMGNs*fP-AH z;lsLH4YOUK5$@P1IgolbNd&jDeP7CZ_+ry7=d}qS^>hD`OOhmK@7t6KLWM#nl%GI z=Q(X1HS-DXJF35c8E3SU98n2I%B( zaQyay6HmJ({2Kc>_b0gPaQjG7KxLEms5SXvms(hurfW9BTF6C_; z??J~m%&ZW-*D+6)g7R9*M}9Xwj67qo+e!pG!-$B79L+Ps24CC1mjpe&o$bCB2DRss zhl;}QzwY0e1-QP{KpXaf)L$Xke(EP{+Dt$QHpD2G&S}t@1cXG_s;6(P=rIJmoc^@J(y-?-(XiteFAisw}4x9czJKsM74?#{XB6C zx833j?aD!)#`VgR;Ggdav%GLVoVu_(Ji1c9>EMk#oJB-BYMQoM39Z`Bk9=#J;Kf^g z+0`lRGmla8&$w+VplOj?peXu&a;mWGA$vO2H&WCYeL+88ZKZsvrXT6r`fFM z-u*1OucY#s8JUA1XJ0GD0L`o=ByGY0VCT!o7Rv(Gq@nQrd^t)e$puzklKo^Cs(ai3EKkp;j2gbimDcu!%NWvb zdq%dV&Vjr|XDr2nRj?W*fmDh4csrWnNa&R@-&!;x5GKxjmO`W_!Vnz zNhJp4C9Ye(h!@=~rJqym_Ugy4#re7_8jUs!n)=?IMA0I&oo)~-Fyb=$H;0|+;%T0n zNWe9IAi;N3nbvyC$_t#Q?*)}iTKesFexOa5NtRGiA0u2gw($UZF+})fTOytiCIep)gW+Mw<_UR4Oq zE#PNuC`EEm3nKM2Z_oo>jU?KRI3nCBls0I)}cmu5e$B~aBM!&y+Fdp2L0O6TXmn`!SPqx5`U6K#x!TjEAx$AB^ug6 znQI-XU0Ye#p#3D($kA#@%&1oAWF}MPlCg9hoyejHbyF_Q$P>(3+8n#)QI@6(mK!;B zdQ!P?(yA5Bg9wxQPa??S<)s#8&M|)`rZEpsb`S0j+?(S9O}!QoTTEiN#93TNYqb$t z-{x5)9ng9GuIKBlaeFX?DZ+!nj~knJS5T znblaH5@#-d9sS_I9(l2Tv5Y)V%k2|b^1d3E*KpKK<~*MeujA;c#x85POkOJ1p>gUX z@91d%5?L<6>{7@orN5I7nx-Kr$H?+Tw{)HvIlJVWRxsh(Q%B0@buC@tLeLns34<;A zG+KG>R)N+E$7v8U&IwV)v6We zRcd*651AZTu&uw8HSo&%6Th%hK$#*TyD@Z&3)1zu#Z9M8{VC19?gos zzAt@rvx#x5KVu2ztYWKE3gK81ZPZ>`cu2@Z&UbM!_T`@S3xn2&k@oN*-ZK&UB}W(W zX}P{j8jvf(>nIhS*DowM{1rVeqF)kLIU@nQE4R7UiE6slLY6L5P@k@g*gT@XBHl$)#T_8GXTOR58?A zTMvqMY|}I_-KjP>MlH~bSvF5rKH?;1aQ1i8c>d@?;7pN^!(4;mxC0O<=H7_*!Imp5 z9D0{*fVPoKM&EgE%G4yZhzau79Z%8AX893kY2f7I`}9VkBV(@J+9~&shw1hDC-{b8$OH|Ta?dAG_5In; zvaJ1SAY^d1K`N5}#xe}|aU+Y4n|rshA@ma2WVNjJ;FrRXM}0%-Fm&=ok*7Msd^g@h zVpdwf9-WC?5E9}lL?5+|eeG_02f_A!jr)4G`L_g0XIh3=HRJ1x(St`T(=!dbbe9`3 zi8XVVNv!IhC}F3YA-)?7YaX3LJ8Taet4tuJB@JC160%uv)PJ2oDg)%*j*NHL`1IeHwcG}3?T;tKPNec0ZI;iLK zV`*pf=zplNWUj+m!Bs%AA&xq!e%>8DUH_CiohvY)#(ctVIB*P;0-}3VQ8_l`m<%%& zIt^~q9+$+GUG!P6S%>=L8$)Lu%N{gb_C(guk)5>RwU-CPe5e{7z%lGPo9`d53X_#& zZkFC%>gk`{ug8)c!#g-CBx~mFfYB zeqyI=35D*4?LEqR5QhZJDk1vH{s+L!%1<4AJv$L3 zgf!CU*MU8}!>}YTw8iX0)1A78s0wE#k)0?>pC{!(R})Hg?2^nOJEt2{ zLM>Dn`-+UnmR86zc|*RtCx5j4o`l8hhM85_0Sv`*AnA>`acrw2E|xk2zBS(l`LyrT zkY2a6^-RD54Rw0<$5;@9r4M*O4VAO2SHr_3=7{Xxp5 ze~xC*L&9GN&~;gD>Xo$l<#QEZ?rkPiEndbItLHeP{R~_AZS=9DV84&RJ%=o9;&?Ue z9f=!u^h!N=pQ#zDu~s$%Q`&DMWo{vo2cG9s4&}_1tu)yt7tNK)+`Sp#$#vD>rmqpn zw72Jf514tO`hbHv;SOAA=jm`~XSm;TZcUp@NS*uTgw)4Gj6kX#Gh8`*Z${PpasBA7 zEd6{)@N&z13T#iu+CWBh_a^~uQJGcV%=e$g*rutYNx!l71?d27uR{IN+ksAoS>9G8 z_EYz{LYJQJozaP=> z0}?e_x5{usI=e3y&GqzV#QS*mA3VF%CuFP(bSX32pj=X4J(R1TqI@9NfhIT-cC7Z9 z3iLl-j5)MYv05ai!{qhypJKU^3rfmqQii{7iE7w+jXOp8%IYlhqBiAtPN{Ve&W3hY zTZ{x;;aC1f{dh_Q!5vatL$Z+P=`D!6#?e$V`Rrpy-oJDGJYQRIkL_M8Eaz8F*_` zyJYYX<}S%!4BFKW5F7CNqmMJ2@iu;drF}tZBBFS%H`uLD3+0$3qC)%Zo>pwxSFN`C zP}q20M8=g$;@L>y5kA&>CGC5|FH)4{)hkN&rJY8^+YCYheW2FU#l5eT_#z5C0%`HMhJ=I82YN{22E(UH61T+Fu%j~D@GVFgO-&Tp7BT@ z)Y!?80+)+jeyK4QANQIWFjG8fZWePj&i-`QRxB42U;{p{o+y6Dr`ajt`xnCZcL4tH z<#$OGfANM~I`hAan8{#i->3*_U#0ob$Ul?0h|fvxLNkj$#P!9T24DY0X6oU5hfZ}5 zm@$%6o(bfUqwnzOpoTj9HgJs=(N=m%+Fb=10L5sTKTqJt_U!)`gCw-?54wQ2M*%E_ zKgjNj7R61&SFWP<6b8S|sb4GdWf^ay>T=L-Dpk<#AOXogzdd99&f83stFzbvG{CQv z7TM|W!BlstX|O5((6=>vGKHu_1mtn$!VZjza~uL*Y4gKcl)c>?_U)0cM2G68~hYDX#1 z7z@K|583~|M%tdp0}_!cV5_N}KG6t1?5-nqE=6Y55He{N0YiU!;xMjy#@O5w5&ru> z7w~>koDY2zFYW6O+-UXa&(L4b*XC`hbs-tv91Xko=|db+DEZ`>ZE@~f*xm^FU-8bQ zy3AUdn)q7C2D!RyWwhb>l53yRQT{?u?B$ON^80h6ER0O|6ahGqxvNhtq6sYYeAo{#)}22rX|LxFer9k;-ge1v(I?K_vi3j z2>OAy#+{Z38dK_OoCiZi>qpC`*S3D}{Gh@ow|NejK-%a-n;NhyZRu$?33Nav~vhiu3Eov@(#+j(eNTwyq!gYh05UA&`ycSJ(xb zM6_V;ZKl>4Hkuy@?Mn1IEIKcJ#;%7=leS>DAta4A4+I-NN$PF+Y~GMX)R`mTX2%wg zTXP>z%_ruood;L4-X49C=s)Sj{z$K)dCP#f|A3$phX+3vqV@ggcW<`=J%4PuoF#eF zH&w*#=gy514X2Fn_WK@-wau@O8LW(YkDI>i%XWMw(Dr!yU9zr#pYPb1yVfv^t;!EF zjhmsrvjgX?oDQ}<+%V(N^D%qrQUu6uT~uxhAiJ8cd~%tHH9~>QJ83I{dt?7u|HsdM zZ=*Ec3I%=FGrGUq`09KcgZi_>3E!x`I%Kh+m+}1H9_Y)wTH8uowxSJ_HUu_mwY#Uq z9w?41rTFjdU-<{rR@-kS8YIos)HX{Y!1(9=g*m_8Z=_&B8Y!)M`Jk*bek<`H|0GSY4Bh5tttJ*(4SI~ z?^WwlLZ*A!7j`<{i8sFc`!mq_0>75DoS2J1E;A4Rqk?`>1JKT}-PWQZ3b>u??_=bg z=UTVS06?iTI_{ri!=!zVIVJT8?MwsY7Z;;L@3~+9^DA)vw}IEg=f4B6RwxauKAwP= zZ7qOla|@~eJMzvwK-YmN?mr4{uy47A`skoT+~p={a5Em zs{*f&R#F(chM&UzMSSS7zq(;1@OleQp~rue53p516!^b9VUhpvR#@Is8=YzX^BpWy z{i;jw8HHcG(s6M@@hBmaK)fW^hKR`B%2RED6Ng!iJqWl|bs5}h@Sut@oL4KMwE zy#w9~$xYAHzq-jr>(?o4k|Vg#C!cv0cs=LV1l6Chfq7lM3@ko*)C{?_iQ+3Ci+8~U z%+w~Sp|$RXw12o=vAp_n4_F@4|HJ_d{&Os)XU>*K>^_?`(?0P2ip?Loz+JMm z5l*=je;xrI7BKvMaPp`?EOY?@G^4E`m4TlyF4t)nSfP_3fMgtbR6dv}p3? zfCXdM0%Y?syO4bmAUvN=Qg8qB)CA$VY#J&vNeG#PL;rq%7Ui$iksTL&Mi?|ed@t}# zb3`H2|7L#;n0>Tq!O(yBwA_Gfnz*Rz!~(CsBjoiz`2%F&)wv2MkY$p%Cv)&W>BzJ? zKU#f?BII~HEMHCr9~i31{%_x8{`=zj@}21GA|`SzU~}Jjy@+@YcAYY#S9ky4L*#`( z)2}mxAW&s$QfG;5z4K>p3E!QeQ@;r?12;hHbW@r8{S};x_(e2;%+aiCzg8m~}wCv4=axWum!`rW-Wl=uF@h%`b(8Eg8AM z4NB3jS)vO-$||(TFcb*Wx`yx89tlVB$|dU>f{pFu9wiR_3@(*=TpN&;aQC zLBK8FLb6Z>nn|mPfK!lvG=IlWY}wnMRQ?_RMVieQUaFWzv4BMk9vo4213-A5pfb|f z%T|>Uqarx(!zoI_kRUZ z|6CB`*kX{&%$A3YOWVroPHGLVg8>fpe;BD#97WZ=CMv@m7zrs zYWRU|UVN`ux{0rc--i^IfrmkWIY6f)*8k=Lp4J%*$na??v~!sMmGv_$uwyq$?}v50`66ZiH+rV&Z~PGn+6|4A zPzRnqAH6#D*BL~*8EU+yDAdoKKAb=CLs)D-eKS}bx^&^!LlsmXtKIZTr%dia&^e~S zdei+H)LlCYfHBwWz@Sf^+o3$~hY$97c>WzU_O~rA8ni1A_qSI#H*^Tx6Rjfm?|X32y5^zbV~oB&3JNsEln35qZiJh~7x zK?G?0G8OYlrgmHIhJ^ot5@4mf{Psf9rF~mFnArY;jXuB|1K`#- zS<2s~1S~Bbw^V+$Zav2(Rh&8IpA4qoICsZu{Ps8 z_jDGxje@?%u?Gc4Qy}x)nXU$NNCQ{XPGEh z*#DZF%pWb0X?ZSHi7Rfj;nID-+&`%*!+gx>`pEf*0!w(+!}HwWd;Om=)yU7s+ycflz2T<(cTBnSF{xKvQo^K3&cYgF z5_wIb)=tQ*jYCT)a97s-XF+O#2$qZ!VToNavTn+k>u>H4tbiZ$s}g>IHOqcF!B42J@aj7JP&xE1*WY?BS2Z;z9+KOM$jDt7B^z?vd3x!A=TxV;K-TjY_A5eq zUZ=U-lFl+fkS`vHo%qbRRyUrSDl-I1Bpj~v>!9H?<4#Ghh4vfEd@Q8ESkStg9Jtopy&x@-s2|-)|%<$q+IsQxh5tB<@X* z|2h8{dCbkj*Oc*LX;!^!Hk~ehWNT*H)bfRnjJClYjviYW0+w7)`9SssvJ89UL zM8yL&0?rO$C7=l3zhMW=iJSqI5sE*e2*AnKZ@dQq(T9`QiL`$qN9)`j%b9vTLKn11 z1O$c26&dR?+U>}dlER_+g2Ioi*IhjKn4C7x=9ze@72l11W)hKAcy&2fi}`Z zEcm&^XoKZCef%MNQ=LMm{TCcTyMB9C89$rBpm6%0bgAuVkelkD|xzldwWtc;8sifnOH_3{54ZUN{tE zXT5Zxy0vxjZaG)b8l#z@nmp>DT|2bAU8_xyM+3#u&$#w$Gco)_73(!7Of%Zru*FQ? zWM|0WjMSKO^~7ljveDV>GnQMk`OA9FCa$ck)=BumF2QMfTAfNkNonzqhRUM~{s&w& zjABYj$MA$4+z6Yu6ywBTlkRuGA;M!92cSXrOvjR;zg+@!$rrXS;y+ryo7!ByS#V~x zhW)1LcmKcUlKUFOM*iwM^@yn)(T@~VtkY@j-?!>ALt#(7Qr=C9tB2v%& zTfNE@mXoEA?k$F=7b?ug>&IT+yAlZo3ERBLEe=BnUzv8X!30f zd!+SW>$pO+zj~E7Z9h^Da7z&80J*p1zNmrG1VhpJYjcc&=JzP$jVyl(-CDDZXlrgW zLGb%+FV^fOI>rrqZ9C?6ytGnQ-tcqPY+uO=to3c^m5ihDh0Hr|fo}~ZW!QT^=RUON z!CM)fh}0e OI~QtJf4ww*dFU{w4^q2)3g{pQ7~2)m`S6SD}?Ni9dL< zZgCx%#1yze-8AHICJpO`W??w0Yw+$`RatP}nHq_26cQ{wW(_S(2^gcty3OPuPrZ66 zy_e(FU~we51=V)NCw8F@TctvWXtJiCF3Y2!?apHSp}q(J*GGxZ&<8vD5D$;=8(@9^ z7u7@y6S@zyvo5Iqg_7sSMUd5R>-z`@aNNHp!uAIw258T%WBXm<(#M&JXEKgRNAM0e zGho-zDN6{?J}+D6o=~9zHH{t;+|8Ixo=MhOto)P3@dO9h82J(%W8Oky!Id^6zGv6# zkCiFo%r0sfV7(SM9$O(Wk{Z_9g`hb@zTTVDv)Up?u7g5)YL~(xW@YG|!canZod@U)}2yp;81#<{wy9_T)W{Hb&+$47y|Plx*CGGg-QL^1jhS#BNgt1RLRCY^Mm;u~?8cfj==Kali_ z$y?G`<(x`OuKPy6`@`=TRe9WGvaKYw+*=ZkHiC+71j=m=+^nhV4Bp>QcS9AxHhXrV z$1?#_e$z-PA))mQG-kiR)<;kF0O!k9WT-F`t&|H~`(7kX7#~e2h9qbIS7; z)Y8O=6YSQfe>yb30EIz%8%>z~ikFN%(vdIggaANxMje7ZPY?V0kguu+4i++ev7n9F zBNT?rr)=XOBp_=1-m#&=5*S&u^Svu@rN^J^tsX-jr~%z=G^;*}8A~j%)ES3jaK@e` zD^{|W+mx5K4GilfOPwDfaN`K?ZXnILGaYE1Yq>V;hbm^+lvrhy!VSxoyMKNEzOAmo zFK8FUi|zyWyj9m-c*LTkxH-IXXgv;DnQeG;yU|p6%owuukmJc{V}W1IntENS+L_PQ zG^X5@o4_TdjSM=xd=`!YLb-56tnpb?B=y1$p)B)N09n|9)_Z?#SinL(`GYJJie$LV zywJbM^6VU03ip}z&_CAyv1YDA-SE}~xsuCpfY16NYM6ppij#-3v{Cse|*NvTT1F9(KdqQLW^0; zNSxEYb}nT6b=Fc%)LIvQ9$L^q@bI4DPhU!sDr&B1So(*Y(}^UaP<n7;i;e zOYyw7Pi&hKLCsg+{uK4p-xOuHYBv`^yq)-Toi#3KdVm&@5c4(wy@yhwHG+WIc~4z+ z7Ue0$%0l*edeM9|8arIvy=t!DJ>_MP2rU(2Wtgx5+0rdJ&H!IY2Y#`n6gzASIKW{#T00;72k`|lVTPTTJf zBi}9=m>ipyJGDsH(>vReY{10=Ub0O@3KAHjLKbE!iYuzjPwWu=roy8WJI1SOvO=>C z6~RkD(SpJ9={BfobR`<+o-w_z0sj{Dv}EGML>RBuj4Q|4Tf1D>W}1pl!=4ZA6CfgS z?A>Mr<;mOo_@@^DojC6#LJV5k-;507&z<%5ZCF{_?07#&mp4t}nBHfMJ}CfucB{C2AdT8DGcro~q_iQRseChPMPfU)Y{BvFO+{}6)v?dE@ewMYt;xmg-tIb3AEX23^-?9C zU??osOrldXme+}byxP&_i_qV?L0yR(-2(~gaY#%cgV}l|Y`MPRmc^YfZ@EcKSD@S9 z4|{LpHEbMy*D#=%Zm%K^(b3l-&N24hVlQloTiZNM_=*Odd~B-^ zvegfFmv&@&*i<(t4Qvs{QA2e1SXYDzYR$5i_SAdJsvY6zdhLc23(8G(Qesxd%;FN* zo|4dnz3`P2H-k-2KPg9dt7zcjKuwc8S+LhwnPFv00Npp6Hf<{?)l@0|rIj4CeQ`8` zbK&$!WLok^wNJoW85_V#=X%qc?%SCCK>RHLDQ$Sh zeV_m3e9VlEiSX^(%FQeEmCyi0NP_q7=8%{5Ugs0h^s+%$RQEn?jh>cxqb@c;2oTZ) zQu7AaUZ>s4Cra!RH|3%84nNbXPyB~ZFtHLEK$v0mGN6*nPaSSwyomqf%=zyyiZlk_ zEcm(hRR3S+pZ~Y>55imNyBV5F`N!k3Avt&c1&_emcQjSboPT;=oAHEi3R1_4bGF32 zQ>zOtwFe~8Yj#UetJVamBcp~Bhn*9Xg$~+u$c5F)db7`gp6cQ+NbGK#u=dtZ zU3aYf%%_Mu#0^yf6(z~!p7);;x9BTE2ROSSS9c8R?e-5`KR{EeM&UAFv>Ca8!XJq# zdaw5tM(qMsO{JoZ30J0Oindz@s)%#pf+{jj!=wFJuIq@R%}r3^hfVHjR!X*)IbWh| zxoDxb;gesR^K_?NalRJbMlk(Oq`2eVZ4!?Jdt!@vv*H!R>gs5si68nU#0*sK+b2>r z&gz}Mr?J{qa?ddAZRxH-ikx zNPwh_4y*T^A7W!dDXnre-01TpG7#ch$F*-%=byoQ2q{|xU`f%N*_K$#IExE({2Dz5 z&G1i13@C+X)4s*u= z7T>DF5Ft(5*`fP}pGNxI^FjPUAuHIn_Kf5@$z2%rwOr>KvxXzT9Y~+Ckn`n;KCZXR z!qVb7!c?_x8yknSH(fVj1pLJd@CQ~0r&f?zOHeY5z$uWpaW>$k0PAc926C`wR35Rj}OIZ2i{q7p<25=1gck}zZj7#K!H zKys2ez<}f&28J*Ua~EQ|^__d}d%n7V{;Hsg8gRS!TD`iT?tY%$o>N<1rIAR(Ld>VR zTF1cww>t=ihdp*o#yjksk}2qfs;arH6wk)d#c!;|$Xl6jZ<2IFfv_PZ;zEIA-oQ!bS3Dvf^>{t_IM#kxp)ss%+4#fvX@@>%x=(%{O)f6Ki&Eh zi3k8Bf_a?j@XJrjl^r4xtE~&=w^A8?;Qz#t%O>q-5pM%N_t24{agmEWb#%$K_pYbW zop-GdAS`Z`dMreXyxoX=18XijCcs~2_fNenOE;&B?bB{r3I>CL0YLh!EhC{aD((&4 zs?E^lqlT{C`N4#4u<0=6*Dyfp?WzAIH9 z7mZQw;@P@cKGDtJc9zo-W+*|UV6;NNXShm-L{E z{_K_-C9A>XVOoPzqeKfjD#a&AZnvQtn(i5~3^oI*T#Eo~wH=ZY7{&k5(4HK&wd0}5 z7_rp$BCx-Qde2(6S$&l)6_8YOct7e+Jbd`jKzfuGGCQmjWUQ{M;W5!iW9YLQa8=iEUSb%r95EmKVQek!V&j<>AI2{nTw zOyzuP^{#)`OE-$Xtvhm(4i^|0JGwRha3u%PkI`v4>9FEQXluipezJp90a~J-xuD}o zFCE>D(XH;$R_~@~BXkbQlJu1oct2W%wV%jyUyl(fn4EQ|q zwOA3!Bko=g$NX+2LAif0Gb)VHJ*LFDvLL(iQEAd9U6{n{MQ(tV=ek z{roFkqR)2a-jzHGZ@4`_yZ5fzXf4*~0$3H8UPNtnhk?CEtcc|Lvk4HKV+~RmE}m7Y zD5?&)3>^sRfvuPE(lw8+3k#fptTwQeB#^Witp#mA_jjduyqy1!(E^3;BFVEU`Zc0- zIzHsy${@QPz$8R-4klYYL|d@am{v2_a!bl4197y8pWmGnsclWsIXxA2JPvIid%?%w zqf}g$ez{_TOW5D1)!n#cP)8kYs4G?gBSuvtN#;_TkIgL&KqPY|9Wys32*cbJc$X*7 z%9+C{w!TGRz9%CQCairiU->8T->WZqD&ktvCIz7}eZ9b?JC(y)=LeSWXYSQ(#(heQ z!Ke<(RE7pX?6ut>aRr{^Nb9eoIvs>%vB>Lr+12(Z;U(&z8oOXCmOH%N=qV-bMOrPkmLNB5o1hNFoD9S7fs!9cee%lKKusKI_<{V(X*4?ok1887^8; zZBX~pa7gd=19m)j!U-xU%E`EMt!j3-={9=qVPFUXHdBv?LpzF>8E}#4Bv;VeIY1Vo zT)8+{eMWTd%4#~oou)e5u05I&sgY=xmhDz|sbIP}uNMy-6*ihih5NN}$ElCSr_Smf znCF}DFL|JEkqclibO_UBBQ_zdk5{z+g>8X(lI0zmXa9BL?z(IDa_E1RTBv;wxg@*3 z$`sUAWe4pPcy=@$miLSjdMtxAPIQFA*keA8wQIFfcD1T*5CK2^Sl?kMpyG7pQt?}o zUVK{XOPWaM=LM0=AyA!a>>4Q56`Ngf+a63TWyen#zs@~M}|b|T{F1UtgB?(_KmdQKPv#VrF3beJP1me*a%U<6In zM=^gKC5Efz`~)Nipgi*R<0|gd@bY!p=l&Z(8-Yi0TttvThZuJWFQ&qEI_xO^{UWZ0 zvHC&h2%(K~c@JxU6bn6CaNr5$w(9}=99pv6EXrHaQV83eci<8YiZfXaq(IF?;z%}3 z+}B}pdrrH46$x()i?@mqDMrOePVTu-6((O4ssS8z)A0i|a3@gK& zn0HGyZRF>SRy&J$$J%kB^Q^x!XjE!lXa^;}yELG6rypK!;meQ>9b746{DoLS=|TTz zkDvt9*QLLRv;ML*Kt=q~D@`JkJtdcV?*Po&5Y6T7+XUVh?=i6x!3fqWmTY|+%LzCG zfjo`nC`ao`x;J#HAYg>yQm)DN3+vBE=#D2lVsDJr!a_FY;f2WiXV=!EgvnMYI7N9P zYFmk#YIb)Pv=`S7S|lb$Xfa@6Xa;M&l^X)D^;+b-u8i5bq0HOdB_3%B+}?drz~}S8 z!F(JT)3NTEcM|957kIW`(cRyv)bMVh%t%S+&4;LiOf1_jn^z5qiK8F5Y-v(qUyOzK zt9QrDi*?a17&(Djx76Z&cde&T=atVx%iH@YbcrygQ=@&_nb3Rsx&%Qc{D6a%lYL@= z-a?@_250-+vAoFgviXX%yRF?}+o`_cy}`o~X?jEQ5#Q>k0gL>0D+0VMNh=oqINXNCdBQLPTAPC2 zM5Ndc-1w`^>J3$D3*oAJKS*ub8dgOh%^&xbs|>XObx^0mVE4jp2^WsT!V!`mDU zxE{}ItMhS6CXiSmJ827{HEVbEjT7Q+tl04~M%K9Ch4tq}$RU^(OmC1v5BEH=UZF$w zKGf(Ws+*!1MM>U)NQ#75UmVbB*(w}xu;&+xU^Q$S(ka^SG20ner?_CyIq<9)cC7?@ z-niMOS0}|Z^S<{879>%2tg6 z3rSAp)#R@6DVE#|mR+hTgv=s3ZVgv{d^O>EY(1vTzF_ax%$3WIYWVF3 zEqa$TN_q?IJ|6*Hq>f2G_uX+Gpj<$KV@>kbR97`=kiH z2bU0OA8Bz|avWWVS4@?~sjBghyDZ3WUi8N^*-GxJuzGQx>!^uYx+>Y%Ow;4so$;xL zNk~PS6a|o-`9a2_>rR4IIO1OkGL|ohpouP-25TuW3i~&d10mM#C!30!EJMs&$=UB$ z@0$H5J=+KO?(I1ZfuF9;V@?H){1#JdB;k{Ha&WwQRHgj#J%>UQkH@imf`#;qHLJ^h zRSLQlZk%Vsut7qOvNZ|vxg%oXNLA0?a`ME^2d(lEme6P>&A(MTc5+t z)zT&twxcfZG)RGzq)Tq9rB-e#-;746tK$7OW+g+2w*}SQ=)txWu(VL6;8p0n%3HiVudJ_h5MZ>21eN-`TH){vEvhiORdS*6B zNO04{5c0rmHgA4T*(#pu(6&pG(D63jjy>)e`uyc@u?4aj26TyaOdk8Io7Wh1Wb5d4 zDlp5LGZjpi_NK^^khLob$USt!kis6!T8yxazQIiA8$+-UJs+OJ63i1OU zK;G6$Gw$XqVC07Mm~vbyFK%+JA5r{Z#jTp*D*<}8+TxliB6>eJ_u zeeBm*`aJH)iUOlTd2iF=f9-me~RiUl%GpZslq25MpTgSloX}@vB$I@dd&@&X$DHXS3^Rw_% zfZ0uH;c&7jCJX}C3$oi-+N)h9?N%)=d%9q9G;a|lUbMRMy0&Z)v`LvOoo;1O>X25? zecBz;Q7z0mV_PU`Q7q?`$@xohsrt=xSIg@2#*LBW*w6i_`IHAs^ z6r;PEMNg5J;$;irVn)38tFAkD4QpgL68K$bm;a=e8!H(ow#c|oG);1sq%HTxDm6#e z_9B+ta9l@mH;?l64h)7e^;->$m@H0HGO6l_;?)vb7pl7&lPO5LpXW6|dZXU&4?)!C_?0S#PtQ?6jU86{qa51{Djm@wPLq8}o)58?;6nQ+Yh%^~ z&=sn04F(p@EL3Q@9@_|ab=z=@Rkm=r_s;yahhH`M17u5y@AUk=g%nIH&wAt0&mc2_ z(Nci$Ug*~0B1Nonz$dBMNH5_eA}fmo=E}S2?uu#_4|PD_b@}BsK_B=t$04z8^SJ-U z6-xE_-|Xfusy_mJxGu7r5lA^S+V}3-v-@UX_rAx=6n`wCrKmG5@z5O&8Lr2ckCj2E z@o)CaOnH}vc;Iy3_{+cXL?nOXn@n#EZ+fBge5O|ZXmR#PpNS62E^{5)KKrIa_}$>F zoLABQKH9`bAVyDQ$l*+pCf|nLr}3j=%_eWet8a}H@ctW`d`lOReY7jj#l+6Yzzb>$ z<=qUR@FC57#B$}2@NAfwrv=f{y}lJ|ZIXTU+>(T4JKJKAthd?jc4O#G!gjo#L|r`>0BqoY!M-!pe%z z+dl19?S96pi2jXcP4;0<*9HD{O+jpw|n|)z7 zwqjI9-*K|)mZP(CX6NzwKrr6a&LPZH#3nZ!(%HGjn*v`| z#uFYuBB}v&D`vJ0gP@wn1JcjCb2)tl+!JNxxRk!%Ok=?M<2{U!4Ww)QL!Ku) zB$LWB_yHK$^;@ls-0!uw|{}Fl;(#Xg%94g?=bd8ZX0PD+pU`Um%*p;yi>mJZlSBWi(Nm@lksLB<5^h5=@nLkJu3=>ejsS^gMJ& zMwOo@f{4Zo&>F_}!%j0EF7(i=Vo6CB@9zqzqpB1I1Z)uF17ny{fD89tY+wde<$GPb z@_8FokOWN7QDo>&6o&-5siUkn5`gt#qyP#FpQOr%VF!g` z8OmA$Qczv9eV|IK)jMOoq~M9)YK*UtJzVUqJ#Mgp6NDU=5{t0}y5lS@quILidz%NE z;($aWy(K1lQ*@ukqVFTr;PKHQo2QWGTZ0;|fR4!!pN3v~Kbqz{;tBeA64M8gU!6&T zOpq1AIx@6TQ>*+K-EaZd{Jh@9nvaQ&D3+p!L5pdaEhUkn1fsB%VQ!CkF>&ISH2Cgb zyX|CQD%)_w?wlBWeV1Ro|A7!0tJEs)o!T2oFyRG5tR^{MSm^sMqJ`~tGg0ZQ#p>k; zqhhCD!>=*{2!7{Bqhs!+#92W3xCtm9qDBl0fmhVbMi9a$e?#^o{(jT?IXXvo6TMPWdt0OH^MDTD87p3U*?fOniVR#`V!>Tr zzWCPsBKvVyNU?p&lM9?P`{gFagey$b8_48N;feoURlE@^{M2g*ig#WQOA7gFQQYH-9UsCUS%ciCOfD(r>NNbK9VYMS++Aks-@Xdy4}{P9*&o9%Vf zCz!P_E5uCW=*J z*5mU*B$GfoY;*c%MS9pbWABl~HdM?7*ZOswGw)m73Biu-7tF=lLHR{i39m8mYnV|_ z0;X)>4i)6v{#vZf{JgAMI-3U%`jmlNGYPucF|XGQesW=ERIN!_Y?IRWszQ<9)j99> z-G@o$LxBPI_ICPbcb-*~uItcKu}+Zg%D}ePpIr-FY9NIa%WdbeWlY-nsv}nm#4PZr zfeI13Qs)G|3JuNaRj8>WFACK>3(JMeo^YIzyj?e3VCYVGIsn+jn?qMu1jND-J9qWl zYtq$|zeyBny^+MPpur9<3uc9s zRHdX9(E1fo6ST{dHCW$wX6B>+5WnX?@O5PdDodbvVGMlTX5>_5`I!faeVS2bzcno& zM}Qx8HHOc!M~uQ9N%X^h5zxLF9O_Xjgd1zUBim9TSA%eq80d z0_d=0cp#1almkVeT5n7M{|5>E;v;1Nr|pppjL-ISEjN5`N?J%18eADA0taK;_`{>2 zEVnXt55fa|zWY+0@A4-#+@ASwTyki}0xXuPieY^kBQDP=<3)bt^3eCIUkuP>>z$xF+tHzsU-e@zB z72PnV;KF6zw6o!xzw&0QOdZ`xTd&F@OGoxo7kf!G5YTBe<=4BXsrogpCX0J$S}^c- z_a;YNnzzqOh{gC7F`ZiU-EecfMziSNczG(j7d0(Q5sWgl5Yvm+%q=cudH~;?W~?4e zkKD@Z?Ql4L-(vp-d~Ppm0s|Ty<_NZ#Ue7L~bsbWi!P@6)YXn1D84R_v#P!sCT0Mki zMoYw7y>69G$POa;0y>&@z38R(4ISRX z$+K<4*+7CE=a$=FWl)u}VBYKmET-~^|HgOG8USz3gH&_kfX{vg(BCg*e0F&VbTxqo zFLeLq41rpp>&)Se?1syE36lzy-~7eLf6xA-pKvR!;==`^@zV$Wgvr!|boz#5X+e)^E}|2*v)z@a8=hS<8S8eNSg*(3U>1 zYi5*s0#LGf0Y>UPrGx?=kS90ArceA?0J5@YKTF6c;%JsqGI|;_%~L-Ck+rnb`uz*f&~vIdOL zZ~*J|c#EpM<@gmf2J*jFi(P}_C*+Be^WqPrZT!RSy~y!n2kE=VC5WldDj&!Qnl6z` zSU4wa6pt+^Wz_s&tag4UN?JP4t;QIw&DDUJPNrKijt@Uz;sZXxpfXSmk$q7CVOD2^ z0o4G)EUH}i#9ylw_)~MY2oAuteQ}0gmer|wxL2xflA(&JTtrjtY%^#SS5yj*eauGu zy@=pTueGDi!>E>iEBexZI2_ zR$FZ4za`KBf%5(ACx=rjQMGhtu>QI=xxGVW!S*IUze4ILQA{fvol44{tEPju=sDk` zn+4|@4=)3_abVF6E~mUz<~?QTrP%$MTNHmvuk_9xve{IM`-5-u$KsW3lcEtbKZ_n>-8B$wn3ZpoMj0!fu zV6xKozp<=JBP6>ayTx*Re+C~uLIO+j9$gGvlHHhG?H3Z5Qk!g_fjC(8Yr+_Y2J zT0c~?qOCY1kFR^n-rZZBCu1+|W8r{s=jw2Gsg(-f0WDsFj1mS!AEO2Y7t7PozTvNy zq6S4Po}&aDQLXa6jjc%+w#x8dYi6-+dyL{>Bww06h22bh(jYex!ObJMRW<=IL^I8M z>orT7zohjkref~_JwQCaMw{D{(uMY&Q*!LgAQ+C%lG+FYb*AehHsilq{73w|f0l0r zz@ktxgY*A;QA`dO1sx_$4MrDChwHHYQ}$aqD+k*ChCyQ2lU&aKznD>T1@f)oP5NJz zJdLm;{o_+s#r*$o+%QmQ>OwS_ez`<2*Mk~p>_m3s0Bc7b_z16g=S6++q99w&CZ|6I z8EPy0Lnqp;$8do;@^<4f>4$1Uu^Sb)a zD_w{!j}291Y@)4_KS^wt@6Dz*B8VaQPimfR%i|__Hu@*?&X) zq|4B6y3~v7KMaTJ7DR)B_&vwRy;GT^?+bpNb)YH(cQ18WT3M3XDjC!rFpCl{bKvsb z(1G8Ys)Kb3`591d5_i9%qBH)Jl73J-glX+>5T~pZ4-9kBZRqtc5O+`m2D0Iiqls+( zv%nGFKvEf`9edrQGiyHdNB#O#4*$~`*n+kNn`hi=U$;is! zVCnP1pIVirI5CF$y?^`b`N7V<_$AoB1SV#%m3*W6W%t1ahlV*KlQB&utO_o#x1RW? znU@*^%-(ToG<%8c8z>)Sh=ws|)`C;JyX)-w+pGRVtgraf?wyF{ph6qges1&)K>0^7q!)f4X>e#8C?2znK5=OemK#3c2=%3}*-=dhC*xJ} z{@PfpvY#9wb#_G8^$5&w2n)UN!z?S*z#FuPZ8$&f-Q`sdd;Z(wJe8XRu`t%59xSxR zeXux16di%8z$qL&#b0ZG~Fns8ncf_ArEq~qQUorT%TK#|j>=dn14IXPSr!t?cmR(OMOfS8j z1(7y-Yv`P#aJxM2mk(sB3RJ-UeCP#y5#?1@&2o4`p={PXV?ELyZG-u`;Wo@=dAw^ zJk+6M`j4CYkB9#s#HB<3^Pk}MpSb=%#x?(eH2;A#{~dh!uR_;Cl+Ic z?476ETN$9Dtoeuo=_x{BiK%a6`Q#fU>mYy9Qk`+gBF$^UkjDsl6mF_uhBSy}``yyD zM^RHlfzoU~rWz}uo-?IoK3Hvi8G?!a;+9?`*CUt{m=~EGt=B-jb#Y~jVv})BAE9%c zuA1C3^JUXm&vj&fwefZCknQJYj*GEi@WLno(9zB@`Ppvl7kXe!65@Z%OW*~)>!~fR z^(IKqrVMAJz}*K0dZ$Y^_LTK(YBTanZ-ZID72CMtFV{Lrl*Pq^*L6MvSfO>OB-I1&s@XX=lMAcc&K z*f5vr9!vJQ%1LTCYS8)pK*o5iXF~HVp?s+++jVi#=lkv`dUK4^$41YdK9z*YpFbb*%sTIz@NO+vHEetjnsqo#7EIw&BW8G0!U+_gbxJD*&fA452*^>(Vcs-!c@^F>hsb3x_mX) zu-Wj_y9^auEeHZD>uD-xt`%L1D#D|vu=kB^DDf>-l{^@hTv{kyK=zGPmX_m337A1- z?Xfr}y+Q@I&~=v}6BIY#0M^KILx(QkvLD%&)VH%63O#+m=N7$lZNZmP=*l};lJ|=& z6>#pd=X~JhNc8pCwtMtTy|j6b)4>LiF4lijg|XoWqeJ`N)3SngZ@}X_qe~Zz(!5-Z za^M;}hn%q5Xv4J7Cnv#B$*{@-^=0pePaZ$+tw&8hla}O_;;B~DON{{qHwJ>!Hq+xS z>7&j&=pe}TmI`{6<|&+;a+*P&%)X;Owl+h3l;HCu81Zc@0-9Qb8(QMD-v_f^_GW9w z^E?Ux6$B9D#n2*fV@_=u1two{etuNS2(PsN81mSs14f|TZQ+tcKLel^~j1{PlPZFewQM{ov_M%eexCv~jE zTw?QfQO`Vz&o`Cz!MqVu&EWuC@~WbsWfk!sCp zX;MzlPC>Ik`h)5ag}^(u0r;+oNmBeK&rGgs;m0Kul8)Ia;#{xsLplXZ zCu`D?jSGe>C&T}%ko5ogjFK9VMvhEss)26#naovtn$0Vw+c$FVN)SoIKdjXio(RD& zev24mR4s3+EN~9{2y;4Zl_>|`G&tJ;`9>gN_OnkJCVXFY)PHA|^DwT2je! z$>0mutMgNis)not+(`7IQ`c(mEY!DAZ9W>RV|V*Je!B_pjJkT1@u~XPk7HjaE?&ku z+1v`k8X&PKASG%k1Xx5$Y3*)%^5Wp;|UaV59 zZWS{SHFZGYcV5a@P1gDu@mD<#AK55%(6fqmbld(UfdB-BI^9PVoqNNj1ILHja&*V( z6bXaMmYZOZ&&e%VLUQ|PdFeUI;RD^XqeAKpg9o4U$*U%Um#xH}4gJ-U_&+;)Ic4D0 zR84aUH>ZfZ?d!Y5WK~)SUOlGr;X5MRA`1guef0dpfegOqSA3Hrsj^@CtfR|ZXFtcf zw~|lLlzd!#`7AG;PNw^72i9h_?eiV2Z1Xxrm-fVnOjq-73VDTelHI73tSJ_}+H0;W}cS`2u1i=E0dmb;HXwP-ZfWdQ( zT$#aOTxB8jN@3rJyKebYiRO4rT6@Y0B>8Qjh@2e;bN(7(FmXvmnrKecOdWro-_bYn z?LW{upNUQM;x+IpxD4!q>2P0QxUiMY!yXyDZ+l!R#A7-E>gjSf(^tC%`Ojt|#mLz; zg3nl{{7$`y*C~xu!@Rt=sQIZ+>$^dhcE+m$bUS1o^_(0uzhgv)kgFPJJL)`SAE2u( z3x>Ar5U}mU!?|m?iG|h~i{u$<1q)ihMLHMF5hHAM{)|vo7031SyCTZX(Glyq=T)N5 zy@@-$W8U5>FygWzF|*%)Eqi3pV`Xc^677Q3*)Zy;lkiAEpb?`T)mYqe@=SG~0YL+Y zcx};#Sz@H*%`8*wa-f4doJ5`=z`k^trl2mpiWK(q_pNz9egq-TZoG;yq1E9&F(#;f zJ;IpQ!r*(=rw32phCXHYRb^({CMIG(@^8P4X$Izx&1q_5=7XANp&dJ~J`W|HeQ&FO z<3r0fF?6^6^6RScVY2dkGfm?7A!?SkIYZxjU&3{m(EKWmizze@rf4!=opoxkRgT|R z5xT$wgL!uP zc1kG$1@DsJ(NJ2{7AJ3yEB?ZuJin^Wa@6Mj`B9;K-JUCM(}J3M#WFcV(Pq4!iH*p2 ztzSIe8!p)fAH8XMw_)eC+4X45{r+6BM*Kz_Z?al(oB+(cw$d%ksFQo+rs3JNhQ0jx z#kmSi*Pxp<5>Sumlj9uEx?Ec3E)_`Gm0$HdK?>`$w9elay5sUTdp46wOG+%qOyN7s zxQ#stqVP^_T3u|K)p$8n6GdM=BMsWc@q1p+4(-0H-oiRW3+bTri%Z3(=AxD=fdVhW@Sd|b+=u|i?i1Z z%cf45+f?wP#CixA|-Xm2xRN&T-(Fp3ryP^63mxFGXw1g0nuO zv&6pjy)U3#-|Jq@=P5RvKQHU_RA1vNOf^5Uvqw3?Pq*=+(ar+~UBwDCdS3l`-F4wy z-{|)?v6Y`Mc2&|x3fmX!U$nJX-@p8l#7ZfFOr(n}BI9%JJ^k{bs`64bDGV&W@SKtd ze1G8RGht_U2~Mq;fOmb~_B2$FZd9tBzf`!pFZF80(Qo+j*V5fb_-fTUGNZ1V^^Z@c zqBe~c7RGg>804)zS-Wa>hSKjFdXZA>WjsTeOlLb9V_ZY-j8B|Foi0gg$f^GXoe#7= z!hLu4_Gwl+zekX)?|XcgXW2RysZ(j9nq9n@C`h1Oy|IoHI}Yv6n;0u;^mg@*;5m4) zc{Hr&jQD1Jm6M5pECSng>2ghC>FD^Re_nQM*RJNwj{1w?5vHK4FF75K&8hiXefs|3 zrM&IwH#WDugu@~@P_wtCAW!r7)hEB4wGFTx5pk};`k#ZB`*M`-=eD~r5!CQ}uK6|k zNB0sbv|C#nIT#8DziOI0KjnBo7peais@5Z{_PTWFWzb{I#H?%ZMBbI_*(Wb?YM<`E z9@{v(>bo&Ws%4_K?)?g%lu5;1!>~2X?5D@@743)Qy;ffu+X3Ck+<|6pU{bJEGq26+ zgx^E`@R7END`ee>)JpTRf8=-H3Z7ggB9=SyZ@>1qinJ-Z*&xz6PzAp88U=MqC_GNAHd4+1V{jQ|3$)7B_7ID##QQR@;w59EvFZ zNnl}l|7NMg)TQE?;mens`LB>0`^3KMq5GcE7*{UZjYf}GgQt!59u#2GIw$CCehg{cxI$bGdR~qH>|0hN;ml-{f7`T zy%$TbWG~rWjx@T~V01d2u^KgVzGAI#!BWs{s-AId#_$6ZH^%9uKpf3IcZ_FOYZyZ= z`nWU}8!x;WZ-+#~5^uvEKNxnhjPT-H1V84`~8nJ)JOjLw_mGKA9Z+@z^nz>x}A#3 zjD9{NMFO{LxF)|=y&6*S<~4bEy+}XA7^3!Ks4tI?l?}4IUQ#W2 zxUtr6z3jq}L%i+W5QZZDpccwbP}>mIRl{Guo21ts)!cqQBQ&9`IpBE49(BemrKOlp zP9br782Acr?d_hYjV;8aF(rMPx}Wa*4EEh4t~N-&;wH|Sz%f>?KE2jipBvoK4rR%L zlODU;TM_G9I6;Dl()` zC!ywwGL1>ut0$Q}OBDEtgcnr~$Ephbc$&m){HvWDa{^6#`;S2JInVrXEv!_|$*rO4 zMl9*R?(HG7cvFY%$@**)89GM#T$!Bd{JBC7>qQ^$6CVmZyj#`f;oFY^{)z9C6np5i ztsM`)^Q}l-v7D@;P-*5do!hmps7EP&7Td)+I^n*WbqTYv?4w|bCMWb@S! zj+jWRr*x&AlJ|FLzvkrJLHagMY@>D;?>mgG8sUB8b=EI)W6-LX`kN4*^f67+>1B;4 zv0s?eyBs0VRF{>aj+#%PlZ=M|JU*Q=|{BUgFKoH!e?#K>em8VI`d~fEq99E zy|y4;{k6gI&0X*c-C(-HANN3p zY0-AR5Yk7bh^%Nof>K4a->G+xf?pdGTX0(oZEf>uFoRX_+NdU_NiTgxToKhG!u=huBdn7 zYA+6Etx=8PgLDTUoaPtnT3eLz>G*wB(*1 z`41d5r4ZHZI8m>8$>>FXiw^3TZFUK@X6{#}X^x0Uo4j<%n{zLo_kOmsuA2j;fFLCv zC;hzgQS5CA>9s)pnkW7Bf;NwzVEz2I5Y&S2Q)GNGs>jM}dW`Srh`;)(@g-1)#-A`F zJJ5@d%qXJQ{j$}(x89}>vWqz`Sis2hS5dBLjQeJSA@cEq#Pe0dC-pn3{<(8AT2siJ zEgT1LnC0kcr*t5$Lb{Y=rYpFjmzwbAu|zqq9Ym{K8gZu$2M1)p*r5l(T$`&9H9?%# z>{c5seSN#0noCI0ESjg8{90D(xY>DT#Jl>BCX&JRdFPd=oZb5dE_bP{aczM&F`j0E z^X%#hhik+JKHRmPh}X&;^d0v%zMET5u42JEF;_!TXqi`A+L)9mR%0LCE+yLUD7Gfh zFCkr59~M)#B5VxL;6P3*tTjTFYkb~Qhl@!F-KcQ;3SDtPMPWEZWwcwFrbV&eb7$%M zD9j^S+{cm_)(78tPHlWWjnkLi_ED?MOL|3`?Y_6F>LH6n(clZV=} zJDrw%hX)yzTOcs~uK$_qDOUkj!$>`sByqZlQj`AuC|`Om>I@t$gBAV11&IJo!uW^` zLfH}1#`3wvw<4yIw)CP+5oNNRxAFRuxl{&0O^$EgM6u(`^v}Vg6i0Xra9WBtgDYho z(ib^&&^jot!e`UZ7;Ob3K7T0NBGOG7@iZvEcso~EwcH|4u~K2BJ8kWDjPg#K^1#(B zt=DQ(02O3a8}sxbg;QU^1d2_TIEWTK++Pf(B+{zvjCgR%x|-gQ^*w#pGJV|UpSXLR z%N?}lw*6mq>R?*EY&&b=$Gu#0b_csm>dy~q)g^py5kT)v=DfNySDG;J7L#cDNMY-2 z@A%r4#i?+uxS6JH)b_A-!jU%QCAGBSoXMLJxZ&zcjN)4OMU#%0E|fmcG}D&1y>|Sr zlobS95lT8cCBNTs$@mLuI+#LWI)AW5OW5?SSF1iVeO4O%hyF{aoIR2>vd)AzSL!H6 z_|46onjcl-7mXQ+FrvLDdGDL_#TygVigO^}b*)REm)WpkOk)l0wJ+4um0x;t%Rkz# z=b>Pp#?YNTRT{YEz^md&Y*Mo02;*44;l=&RaysxTWGW;|LN{~0a?{x11$up7!#3pz z2t_xRp6tIlzN6;eMC^8YoTi4R**_~uBXMXI1Tp-cpYFH%(rv9pm%RBXQMJlz@I~jb zY)^ir@ZxIKAPbX=N$!tVOVtBlcy+AN?g@FXv{Ny%rD|zb;1_|?~dlQ>sE|IcO*-Za<2Jgk>ZQziF!jc@y;$`;=Xx`g&# zB!1ERoaqgu@4QIW>rvzMFw%$T$;sTPj(49jOxym7f986T+ctQhscFzhKDPuno{%EtRy-#Pqm;RJ*L)`M6uiV!Em6`&O+tDn%V--NSPv1?{{jK- zM62o=PF&H=ea%(}DXxuHlwE-C%iPg1dK{rH`=ldvr6Y!Z4JPsZ^D7D4_0Jr4eC|q{ zsIRb;YSW3C-j>0s@up|;_ICJb6VC}y@ub;LOuyxYIQr^7>)U@+_^6gpAn!FF|IW-u zNqhhLR$rkd97cuWR-!UVHlP~*f_wt?4n&;gwMIsW$Q5ABE=RkM=p*Da?e{)x=NNm9Aqa=tt6O-7Cu zo_NWdqCPR~Jrmngrn}hRyaLJt-iv2bt7;s?;}$E=(au6G7Mq>=hn>xZqV91*V)_kw zna#?2SJH&^_g*6Gm7hl5xL60aGI3$%zA`Q#{~zZhHkY)rg@*kBxo=Oz3F;5~SJebr zo;1EwxIKEcZ|@y#6}Kl@|5`gEdha}8a7H*C&$2UOaT9%Z2`>1lP5g2S8zd@AZDR4I z1~)77d#Bi0G-a#(SC2w`3CwZdV9VHfQ|}b~Lv6fm$XcSfoHVOh^%tRqQYR{qcCHOG zZe4*Gi)Owa-5uVSVy$~=p(Q@vs{Wi^`WF4O4Q2|TqyIYX1gK)bnOrWS-OLQCna_Cn zFxAw4k-dH=H(fQxHRk2O=DC8?`+MH*@^9vSDjJs;7}KLpHXVQDMwH`X8)QwbE8Aun zU<}!RM~vUPRo&Ufr-a;D6EeXW2cPY7OzD38Xf$Jv zIMQ2iDg*z-!Kn1|9gregM*4Hef3#U=Rz3eB5qVwYJylAxZ5}|e#2eP)AG(T$$B#1N_Way7>AZgAF?#gR~KquG7)-3d~J+f zaeLE#XU5w2mR4N(x7E76xxiQ_Kkasrf?~hRYJTK%^fME{StKfc~(5Z}9mFk=-+DR+e0R*2? z;T9Ej#=@5*2VBmzj$se(hbd~WXDigdmj<&qPJ|4D>hs08=8(*LzFjN}y}}C8j59ap zjLBYvdV4M%1c>jl6-{(EQ>HNMq|HJ^f*8RanFD0^=oBj7{1T%CYM)tE+bE~NRz&QX zVa($@LxT`qx(pMo%6FaW)8A9**QT?>0~!LDI;Gml8dRaYulfVH*;^uX{{{ z;jWEL*_^i^ZMT@n`|b1EgOUVpd6P@-Pa}FvCX*gy=)(s|r0=R>bOsdR9&RDU+q#5W z3x^BnErmU2o1PU9#gX9)@5g;!a_l4<^oB7YJV*2!K3n9m)Dtb`Bt9O`2IUr$E-3{g zpli}?mEqrJR}_p(=dPV*x%|qV|pi zri05Kl5gS{sB+Y9kHNT+$iu@<0ExxFEqg!xhIP}mL_{iSYIOP>sP~2 z6eAL!jDCFhsDCqBh*Rx*LdzXN(3nb#*&G{}x$jhoqIq;GCR9GY zB56==9KFNlY6qB+m@qfsiO@& zE`sZnD2ij(iW(7WwTL|`*0rJ>i9IStRjgNQ#c0tWqM@f&2vM^Xp=N}H#GW@j_q^`U zxBtWQ{d}JH^L$=kAIE;=5~zTfY|=Q>ZUL*$gpPJYcE=$Pasy94(fconjEl@qj)mU! zSU1}VrXx=#9PXyDla8ygPGHVwZ-37vEW#nkDb`wgMpSt0^5>Pb9qbMkbXk2#KZB?y z={vCa#xzXi_#ijQtR)DmRRn)yP~(TD|AkKsmgi*1&YrYH58v-FI5Z&8})Civ!xtSl+tx z;@Wu#()JR&>dz|aWnsjs#+`@bFFpqR09)^c@;$#jtytKfcJ*^lh*wN1*wxiaS5c{f zMqgzPkN8zT?ywWkI#?)FP-Q%?-hf%om&vyzwZMCaO71GoZ%Y`C+E(E!t-U|!Z#RK& zXPa%fzlW@B`!3Oh&}a$(r$DLv12DC4InXzupw$9JT`6aKWo0KMo!Y4+nj8iz&!()4P zLA5~Op3v%~m8fR2ULD=$mNyP0iK2Do%M}Ep;!^#ajs0G{XL`=iV zJcl&jsY@9k2%O5*;h9BkFRnI;;8p&-ZCHMDl=L?VS$!uVX!2KMT0C_v;G30_{V4cO z_Z0UKMO9AslUxt6DId8&V6{k(o^X)QgJry!7y1iFq;Yc=b*GXD`JbyO0!vH=#@C(P z3J-TRw7#X$Xq<&M(6JgA_PQq67~R7JJJ(+Y&q^e@C=>IG`Y}XzaA24$)g_)wz1Off zG(h{!%W5Wpt?+4brl44Utn}Wx(V=q_XT^725{pyUpGy!Ja7J6 zEx?D54>cMDz+2jI!zucF$=kw}aG6m*h&9JqpdUF(KQKDqcbYwk1gcr_3>u&PmY0;+ zqS42MJ1Ug%KlHs68&oMsbEg(`B=ARcRP4lr%#~6#@S=TfdkQZ}q|2%wS(_J89v0Vv zV^Lotu7^89BH3v7x;o$WHoO;hzbXH*9&K!5Cya!-{zSuxNZ?!0pp0(nwU*V>2NDO7 z+we1w*ecAoro(sn*GP)>cCl7IyJ-AGx-;?cFAJxip%D?5q))a;x8>xT!e1WC3OY8_ z#;Vq}!*%kaWO>sd-+e)|op`WMD_+$UYGV2b`3;2gj`MKdt#FC#VF)`2W4}7~(Hjz! z&{7fe!IlVN<1`3NCP2_HB@Lyo#OGLe zS(X_w(nQU!3`5+pF2nbYGp|e7YQc2&r>l^(X*v$#xymk%y6S~6N$|tJW3_CB9+a}C zBgvet?C>D@TBkq7yUpAs|l97mG+s^-ewx5`UPBJ+N?aGRa*3wJqxn ztf;)ggwM^*eD@RZep^0r8Ab9>zL4`Mi?B74Z`cwH-f23o-0fNJduB^QOa!<2GNAvX zeGDhFsfAbtTORB9cplPBt_$|8iNs>doe_ya9*_Dh!y&+wPDaF3++oG>$GXlut$QwI!x29`O}4P1!@_YBGb(HUvTF#lH=lG21Kr>rrgRodzf^ zu)DJ|t<)g49Gr$9r{byUjJ=DJru~RhQlJVS{aN`YC)XtSv!MDm}mt2k0Omkt}v1j zp=TpQ{p?7k*5aN**LxYBbj>F})sK>$W$6CMqxIbH0~XX+g9!rAGCh8ml0dZjW3EoV zX4$*Gr|DwPJs!I8B;R|jbBKVUrPHD%CFG0sHy5^7UN z^E2^>p|33CI%hfg|MFfhJ1HSYt*+e$Vmmbp_kp-UH`;0oK_i;}-j=NCJT zSB>M0ZF^=4kZa(z*H0^_L`rpR$ey4|xU3;BKpK*VqxyYwnv~kQ8FJ5hh&cRj+8Eb= z!^iCcYkgzrB-N937Z5zV>9k!CmC`6PXEcIom!PA1iAQy@a`~CXVk#)Bo%T`%)s}F^ zq#+Pk*pXdR^DE@78pheF??kSx`o!jR=Pw8F&7h7_gOdli`u!_Qh!5#N5m^8FN_tWT zEOn3}q7{yDaw^|VYuHe0N;6an3@gv0Fm+hfHD<0Z1Hjc^j&yJKJD97Sq>BDicS@ae zI*=F2m)W-1Jh32Zvoy+W*3Id~hnj_YD>q-V;EzJp^$wiBorbkA|b^@h$1^h zmXIac_c6nFJkRs~zW4nZXU^Q)xzD})m+ODs@untvEKIyiG&D3UaD9k54b2%U4GnDo z<5?iZhu8Br@I@P7uBSs&_5JEH4Gq&>Jk$p78|>!k<4PkUulwIS5jj~8Yye(F9wH(q z=j`VvjrMeQ_jmRUkjA*;fg~W`7mN0E^K?c3w~U;u9Qc+j@R75Wl@pQImRABUAds{I zNWt#E<(=JKG5>4O7#r&8H~ZO$-ot1 z0bFc=e~_#bNWn=-9Jtl?^Yd{tNI@W=WBDKL13tGGD> zJwI(!$u1b>qG&7!GX{nUW@)Y%3J)<4bn^+t1O#FN(1y;Ta!6l!%V1eMJ453@a~wDT z=)6v-ii(c4o;=*r%hosqZ)gKHKw#Y=AOetu!G;)nCr2oo@r5Nhb=0dX+{ z@=OCzXnkb})(qxi1w2jBz{u7OY8&9{BWDEh1i@UbT>^|lfeb5*g`t8q$Xs47#K_vp z$_tLvMFsfjdC6LtD_dC_czF3>fCKYNGGwYiVI(p`dN#6J%^@jP&ufvegY% z4!~Fwe86Td2(%*B4g^OSnyFv|Ww8h=GaZz#yS1mjtS<@<3}bfV-=WimjUqunKiRMwS--A*UVo2@XPFd|=9f z?ry=d{upZ)BLZ9x1e5iL7+JXE6;1sxW`1_ECIKo~lpER|4>rSst@Mp-byW;d+9tuk zBX!W$Hh}~qFv7*q3m&NLZ|4a$1G~BS;>|&BCV0ybVAa5EgM%@)vdSo^rxg<9>K`bF zQ~)a4nkk!vSR1PhP<&!~&3HIzb*FTUTp4OCZ_6*-$s+ zbX5Y$#=yT)kg`9<+g90E)*EbYp{;#7Svr2f0q&t*C=XX14@;#Gw5@)Sk}Mi(p>JX9 ztY;4L^{@pK0z*7KWv#Hzm>_qAxuq`5!yg63D~6bRB7p|bP#a}K-5`X8w~Ks`M~IOZ z2pVdt9OM=37aS67g4V}tJEIj1WUb|KfqL3FsE-*&7pbSCr;GvWS_fLXDLVUT<1xl= zAzljdw*JaCK|XG>L7qlNP!(^4f}WQPB-jurY2cyfj#I=M8_Vh7>^!V-`d&~IgHTU1 zJvWFM#92ol>L09PssuBGLTrMAJ%W@>fjX8-e)3pTjEAWSNCyT;+0&s6a=|HEd3XT1 zz{k+i6X9_>P+mdqF8)SnC4y&&8v&x|Z>8jA7#xB_0L$JI1M_u5Aw&F-fO>!_={f6~ z87c<_;lc7|vOa!B1gs|*M<95ewqoRQD$$_6zy+mjV+wU41jvDcb#Mf^KnoL)rLvW* z3eqOn1q`!5x_F0r1>yAJC=(S~3g{JE?~PS~AawoVNU*=Ror@>h&fQyq0MYhR z3WcG3-K+rVtK%ACq74fIx(fwXwGPS)X>AdTGWG}x2{p8|G=!J{iE!7`ji#*asbd`i zb4APRyC|8VgM(Bo%|NHwvLHQ_i>r%?IS3qL>a2oQFhCf?{B`ig2p_z=8^ksw*x%aD zKrTp8QN>I?5at3Xz96uk4_eRE)62_D*)15Y4HR=VcQ(f=I@<+WDxP*k8RzAXMJOOl zoZ)sV5I{P3x*EF^EL}Ve46M-dvd{o;v^-AN#zZ+pUf;sR2BD0HS)tM9z?&Nc8(C-v zTABptDtNlP6XY!|JZ)t4Z9?7r{B`6FfTqC)E}@DRC?ii@z?jhS(=l^V!TQO$=;@pJ zTbtPE>7Pm|cT<0rP$=5P#3$6<+CatCPFvXwMSz45RDew2BO9vZtY83AP%=2JVzVa4Q#QlsrsH zhY;f8DofA@L9xIFfO&yr2}moDio7Kt&wS)CN+=~A*B}@e9b)QcYOZf%Z5(Ln8)yYl z46y-0Wi8OoN}(XMyCpc(AE~71Y=rmGcC#^2be0W}3;2(4JAD>lSP_&?cNZE0Aq1UT zGN&KF=7Rkn8|Oa;5Agec#-u!S!lo>XhUPjA9HMO*?D)^&Trj8A7*RCMl;ir2w7yx> z_45u|r9q|oT&!t=`fNJW(T}b>JO(p22c=C#o0dw%U60#wWM_;{D>%{QQJQAY`1kMo zqrAz0qQ1p53#0qZL4>J$eJV)0@&?v4S|=Koba@vGwN&w#7d$^%sd8Lseax90;cork z)T^ErQ?RXhQNN?#sx*AU*9Uz&bGx*1o%Y>q&QfT=GrKE=YTM*Mgrji@ikRm#DKyhM zjci&>IroXN5k$8pgj&_gxG)bbl)BNl`-bZ)PqNnbOX3U5U*}@{X)7$opD`p`ZT@5M z`_1dO^os2pLG#lA(F!4W%j(lg&UByM<5Y>Gwsm)+ zX5|x|afvnOb$8$Uea+7Fm+wcj-W@m*knmr)5Vff+c-ethtz%>FN?Pyqwp-8Zb6Q=9 zOR^D(GPUT-B9o0u;|^MPiL{OKBwP6lt)KPJGz~BAuB5Ew9RAr|k=BI8Cr3^9t@Xa) z+g_P!-(0#(yRRJy)ud5z+VkBow&FF^L+hjm{fc$4*Yn?F+OX|Xhd}sqj+-66jC$3X zryM4Kq2I2|u2{|`%97$|$&?UqhRF7V)YPB{AyVo}T%kK+>=O+q54mr>)X5MQyH%d$ zS27TnExG^9t@l#;jWZ$nc_Ir)@g#E37s}sErRU8Ad1Cg({B00jQsfHTM(}{lV$anS zCg`?v%#F&G<$jw>c9+*~P=1LllP9-$SMp}pWJz!>kpuO6@FpT+n({~{LqIa>tym#v zVa9;NC{~b?!vBF$H}-p?%XW8W@5Z)artpVsiCvw$ciD`d{5W$$31E&o8=fj+_)sWh z3tjx=aJy7Ol}Mq}puCYpp$PG{2H(UBLS>vfuR{|MezWa&!Zx&9qZ2pDR!rp@+aMBo z-q^ExGSca}?w_zE8lk5w^lElHi5xSRm~+srMeS$`e%_a|HzuxLcpy(=a+C{QY%qFe zE_E;&OugBP^?Ai+n6svndMy)oa!iIG)_VsISE{}BGjEFc|M{AfDJoNpUzUzbnPNkZ zQr;^RJ9(-wNSdR~gvsJU)lSdT{>lq%S0bje_Uyh;B=`jvUFa{`aOo#iBxF5qJpf;SqFW(3s-+IO*ScT zMyyRo+)UsOKUbhS0?m_iz|FQZWJx>d=TZ>)qkNLcwd8xpGurfj$1br4noC%ev6h)o znM4deCLV}V7j}shoF)VYYi?y4aj3e(_HR0Z_srRzw!>}YK%FPo&cC0RUB9)6Zmsd=5>+U*35KZT@t> z@fAYi+pa{1eh9y|FxD(SdWE!Rd^R2 z%fZ>@&Sn$BtC1U4-96R2jeirSeKenGzkir}92mNC?(Z{^Us==b5Yj-S9{rD!ECp|l zp8S0lUhs>Jhf9dAkJEQPTKs(Rf=8z=^YFOFWiduS=T9>{fjD$w;_agFOXPZD)VcB! zofs73#bY+ZNB`PtU4+qGZ=km!`VcfB$6?Nd5SzZ!V10`$`;MWnl~!o#(`Ne+Ujz`h$#LlDX5!{~+piu~p??>v*Evis(S0Mz~$r zjv4%8@FqJm4+ber#{Ozd-Jiy$*0_9limbkg%1E1NZ3-kYCDLt!QIK=}$}f&P5@X}_ ztP_bqo1&H9HYd?U{?r;t`~LH!s_3{0H(-$pLP6lC^zT<|=PileUz__3n163hD4v=TuF5zxzq zk`Xa6%&|OJ)uF}L{`9Lso+GoKy4DLRq#u7=T} zcy-s1a?ulcN&VJ#xSCE`g+j$4>$klRnVn; z8fsoq3t@Zl^Q0E9IqUHHqNN+rxBTJZJO111S)7!MjZ%{lv)n5||9(I(tKuE6ZXpVN zxJ8#~X`KXHmM)!Nj(&8j@)>Tn(>+zTBPNnu}<~!yP;V{ zf)tD5nS1sP*VsgAbHA2@u;_>`PJfg}MB!oh+8>5O~URZfjr0*8GOUN8;`qCf{|TZSUKJP_c?L#9bPp z{dBgw^z3?{wjmJ)Mef0GZYY?S&xbhQqBE0;Okyc1<&hb(u^+ZQbX^!DyvTE!s2S4j zc9)4X;&md4hZS%5xk!|n{02jo+ci~HVUT@<81-nKAN@7U2AW|=$ zctxM~e4q2L6|=ee6{D?}5{7ag|JjunNVpJJX@s}E&#@}pi9ssZX1Kntaxx$3yRkkZ z8_B!IdnhVgS>;OX9=nln_zrgwL{g+H&qVPqB1Oggm9EgjuAiq@GogHq>=EFX7>%~% zVqc0FH|nW2aLjeert$jN`0y~>iG)m|5bJRl(>*p`mi^q9-}V-&E<*u@XcQdp%u&3q zs;^xGw1b$I*4tTQ?P=eyZN>dJe;gzAKNsIu?WV|)B-s4cXL%h-QcPAR>dA>`7jh1* z;v?_zj2(~zu^S=yq)7McpNgmDgX^co6r_0TB*;bT_ij*v#Ak9AIJUhNpgq+iX(BeC z3X9WB!lb(A+ZqQU0W*gc#jJ~Af7X3u&)u!Cskz*ldcw{n!TFX+!d?#f za+vx4eW)xBb;&-VQf7{p^70(~i-N}!Cw`%C4(@<4d&2t0VCRUpa}-e^=xQi)CEMv^ zXtw&O#}0hq&#s*7)_g-!ZP?6VWv(lmP}K2$;w$akpA>r$@oQ2{NBc$bE??Y)ioNN&0|=**-zUkTBbk z$lvz(YkGWDR4Lz858Zwd_*`-SHA)jb@-_G+j`)s_V?PSm0zP_L=}QPD)SYj`;ug_| z<;Iy+2G8i0nccr7_z+w4msgW{K%ho`ws;a|y7ibjLX&u~^E2~Bj?6g?5dG^n&Gsph z-NjBK_1C1C)_>XZox6)hbH22u@~jo+yjog*u9y!Gwj(y#YbT@wxK zcuui2j!ObC)R&a${t`WxIxi~+k-YPq?tqJP)F%N|kYHmuHM$-!1Uv(jWn8jCF)W#Q z8B~1crPUEl#7X#>*Y>GxQ_U7{h*a&X%6@0%-S_?<3()=40z7x=SbEgy~aW4Od8aZFz4m_#GS_i6}=Ds)gL#sen1&q+F16VsJqdfuH$vs(J`FfAjCX zG~((lZa@FC+*Ao#ce8`Dk@D_Ux`0-F+8g;la}~!v{60Ya2K(8I6sffml2M*c*Q3|s z3*V_{Z`oUO?1BNqb-gA3Os43*P6LLcqXO;WfxJQ2Lh)yRcnO;d=p)+ri{vh!f#S!k z6124I%(%Pz?SMEW6j^?iuoPtalHwo?*)LSL@H^O0$Yef2J6yM=8wQL(?Pv`BGM z5u+CE@Zsj~Sb_Rti*nh2KR{-T8thKY+&*lA)O@VnGr}s3lzlzlmsF=un{xr||D;}% zT)uoyOunCVg(~awBh0x9vFg}_=K!o>?gv$XEqtTD;y81nB_kD+0lqcX=waGc{XCD|X(!@@ zdN%3u<{8YS9gB>NOms9wc*EhXTh@yNMH(haNW1vx#nsR^T-`#WNo z7*w@PoqS;?f?pY>$&s0cR6APCeI5LuD!UPWfq(oWT0CQza!A=WtyzX_$0$}jh>C!G z&Ck z%S8FYuI0eAG}Ve-QkeDX<6NhO zv1c4on_siz+H_-&wK@`-qg9m7S7;s|oV&MQ*cNq;FFNw(wm19qLa|ePi013={V8wJ zzt4sy4#Zm!ngzrAOCVdHJu}TNue(2M*@`bSu$AtAs<7BLtDXOVD?W+3#MCy{aPg5e zKb!k=UD3V|K}DSFH2yr}%}3F1=$RM~%o{J5Z6NGU)>|fcEy{^8$me(DuR zOw^Yg9L&khauz9~xNq_D^2k8rmy}X_wKi!1*Fk7QDV|t@Oa1Zc*{O3b445lvl+R5qXpLR=wjxXZOFAiIm~YUZAPtU<^XM6h~B#IeSf1Xy!YkumpKI znq>3+YO*gI<>n{-gA*ckq3O2n(Yqn0I4QD~Tx;sr_XX+)lf6c-KuuD&1C%e!k1s~&-6E}U#Jncu69(ZglfK@o$ooUu=#TS z(C3R&1nGo$MZ^H#mdsbl$_dt^XDKyS*@j@wDR@6hwm;!)E*d?EVNPscmU-Th7*u?3 zAC4;NN5A)59%Rpw*k$3Ja%F`53oR?8loNiN_}`i8NQX z9219z$1lv67-}>wfcSEqyv;0goGQd#*$1UH9;ft_!(W+|66HE>UNmm|K`A}iSvC~2 zdMRJ1IW2AUO1$IJy!-1V(V)ma4Q94L^P@V?2F&CVJ*})SIbAcjGl``JH`mz6aZkc9 zkh(jjvw3@N&f*Ne>M(beHfL(&?EPFyFGqlSoVVp%v9>q~-YXceO>FW{ryO?oqB;`N zXZQ0I?|%N==R8z~D}Ef^bJVMZ;J-l*nD2_)SzcjIyf_|N+%8JRzV-Unklmz|)HH~; zf`HULj%_kIBV0VgsCM#h-@h@+(v3r|YY!BK|0|W8d};mF-r)Wv(sSqZ)RX;t5!C(B zG~%d}pHGGV+r^fLn*|@)-u->n%9?HodR|M!>TM86U5i$iUI{ zCSVa4wgr#)MIvb*WSIMvYe(5JM?ehDJLtI@-!991%OSeJwCM$UufH>q@02)~l+!BA zlzQ#?5*1h%zj<4zggnMqdZNZ?_E(lWnQ-y&kLC$UzY^*^E#fsZ5i;QiGLC9SxqBHl zH=KtWumKI4%kIRac zl#$|wS=JRSr2E=iIC*J*T~m8|;cOUrSr2!A(WvIO*s)eJud8g}Uy0xKT1bWzzA=U6 zu}93%DD^{Que8l@vhgE}x)1~FYx94#MYlu|-#N7TKDRxJd{xsEPUgQw?lzC6Yr3Y2 zxAyrvxZbfezzn^>ZSjiMJ0b!?RQiqVZOArm{8^FqnolW#RCA}Ry5Rb(*Xfp-WBe#W zUbbsdJHEn`;yHM|xtXkiU+w%dc1_)DrE7%}7NxtWFK0)$H9sF$n3a2g6eJrHOYpZ? z59hBW%Mp)OWs>+B^dT+{S^vbz%b|kgF5zj@t@YAN`b;P&ZIPJBe6?3a#6lm}$@M&@ z_Xax?=}z7opg+$qagF;ugB+88zIEp;4+J~{n!Fhc!4vbX4u$>m&m%;?U%YxCy%rs* zY@Ab7$T$TxqXLU$Up!ZU;h+3-=g;HL!!N$;>>#V|-0?Y327^dF)n{(zX6(N;AHcF7 z-mJWwuuv$9V0PZ}O$gCOg?xTEcAp|P5U(0uXCbIf(*P!@M=$EpcqJsiglTzmP{Fi!Wa(Ho=B}VWmil?y(|&$|=z2KQC#L z&%oJT#J+p)V1uDCBBHs`apPj~usJXb%J3$!@w8TH&rS9{ALbeQ?Q${N+ z_yP1(_&YND)j6@JSHpwNg@d<5M#ykY++7*ss$vbl<-qphK-+`s-lG>#C0{DKp~Xdq zf$ZOS7myW84P4@?o1y}KP>fMLSf_luP9tFP`s*DRVxwt(IWFxkhE@ zU!7BZtNPd};MI~FeeF{@hh#6V6har}ua(cQ5Bc;FlTF$$OrO1IbR1lmKTEMA*B5{0 z8khP>imtOAJ!t6wR)p%(>&yoS8w(*J_x~j4HhEvPGrJ?e? zM#s(Ri1oO2{@U@<^@1t~KE@SZ1GxG<^3MA=Vs=gV9GgmT(b7f-`gQuSLV;qq%lllX z{6!HSGyvI6Uf+1MBsz4g9q7FfE?PpGJd68td?wm=^0yyHghlLJqHd9;BggtBUQT#l zb{sK*$w*WW&$Qjavuj$5rk(FSB12-?!*Iks2WgHiA1x`D2f7jQ?CVV1mSi^Xfwt==JQY#}7t%eiGh8YbkQ%2y*!Wx(7ynbF z_%n~p6kF!aT~1Ns_W|ymE#c+UO(_Y2Ux(s1pQxOHTz_U(LgZdpoDVW-spdq-QDDm{ z9=wu=Wq-AICK8=K82bdh>WV2&I}B`#9+V}0=ZrDY8b37lfPjVfj#l85!3DL+B`p#O zJ*mBU&ZOY6s8qX;3FcGvb8MyhJ(J?l-Ilc8+4_DKrtQT`oR0!;CVm(wxm^NFzSOZ> za7C^Z`r3|m)(h6ul6g@@`cC7;T8UiHoHwWBDBGWJJY4xS?F`}TlsYZv5KfZ8duEhPF z?{eng!~~`wf{i0;E%6a442-(V^ug-oQ^B{Xsn(Hd@$Dzg+EId8^mHWx0&?d?io6Wa zRq*2{JD*D|ck=T?GV`8_d+@~{0~qudhhEst-*ws>yJJNO^-IT2suc%^i#M=w#ODjt zc_jmG#^!rU9J}evG+^| z`ECnYFP80mhJj3J-V2DVk40Qb!`rnUM&R}c&3o)5^b}%~0r2r8qBT0)pW0k}nUTMd zKirTlGJs|BfltF0;#)?<2vN%)I(L#x_QkEgw}e~FRSi5plYWuk>o;|O`zAT*B4p_i z>$uO;&pZYT3sktn(0k9J8xt)x;Lhro@4vfBg&tS7%U-*G!-ID`GG%wo{RgqqtpfPHR z8BI9GjlhKJyjVWephyWmb7RES4|>Ku)%P}Xe~5v5tCQ3b56(3-EKvVV`Pizc+IIf< z>%v+#atgeOJlm}OaD{X}+$#w$NykAXXI>Qf$Lla66Spo3q#A z^Fyw5af>EMQTm}~!RX21Vqfrjvl_5a_Ex{#3^iWd`g70cs58U0&iDOgzC>67;o+&NvfCTP8It?X;SLtT~IobuZAyvk$l_kkInG8I=PamV@sDc zc?;m@7$kN?Xv&ie(hY@+_%^(kC%%3&WFMQk={AKxf~~8MX>TIdcF*dcIrqJDp*I${ z_b(MoxlQHE+s@2(p&Tf~)#V+EJe#0V5(=8j1ZtW@mZD+|ro(O)=Q zJ*=PIYp=*HZ{TPpVvAvbA5UJEWC>A~SbF7syZmEdRO?LOF@0_GR;X3aw{MjaNvv~m zyKQMBm3p@7DUdL-X7`Z^jH|_cvr{OH9Pu;c&*)4`^?F`_UnsTa#(s`S7}J%^u;SA0 z{V$UH%jG#oKi?vw8OtSe#9pd8^uf#`tMAy}q|7bH##bF*QQnZhs&){&QAye08vj3B z08umS+R(}Vw4qkeUWS0(SQUrgzaRU6;B0#d(X?eSohLT6#S21*nuk0GhaVmWT5{5c z{v96q(TJ1!nk6nJsMS~S0Z#uBPk_q#{4y^0E^Ad>5)b$=L(MuPk3rpm- zim9}spm)^<;ymq4aG^F?Fu}F*m4yriyAus z5aRZa7HV!|xHxv|k>JuMfKD(jRb1dFutvnIdk>b@Ho8X9$(dge2`$2}YuL6E1dEPK ztCmaUo*M){9GSM^{XMpD*v)W-Hd$!Kn{joKA?Z=F$2B(gI@`HPE{aaMD|w?j{I1U_gAD-qKnb>DEY-`tFRw(xust{07nsWqj?&DjfGXsYulJE z{5u}q&D(pJ`8V-I13CUMByBVrv-~Esu+au=6RK(dxmx5xz#>Mv%3b)rJK!h7%*Fw9 z*1IA&qmNJVDqRz;I}j2U)KM$=D{U*MwpN<^U<)%rxG>RxFda!5i2+ywBGcCMQ~I-0 zlq2*|ndxvP@*@6>USFZb##|~XZgD61oyUM_+ItO$68(Wa<9IreAr=G9{%V&mvUeJ_ zt`$~)Gc}6rOqR3_v;MHBwaKFA>cJ5=E1z4uFG?_WKc@WjJ?iuak*xdlq(3n(vC@** zp6#M#US*|IK9@hFUBoNX5Bym$)*H@5hdlQ&+WY-=_iV>p@I<+$a-HMag)7|2|8Y$| zU2bxloAXW;c$a*6xv?M{|M1Owx%lv|%<#exW4VHVUj(Aq2e1;lE$jPh>fAwMqMXsO z1pIPBS@c?9nQ9j#iH_Vq;=T7#RZMY|u}L)n!z^3CDuU@0ipIrP`UuU@^aXix82dap3*!NSajGdt7XhnItnE3qb5m%Ie+tMAnss5R1(>Sxw^ zRGhg4>Mz`^kmFq#<2BP?cWoNYV7HaL<@k}qzCJphBx<-IVzOYfGjVTA*s%!kR1_$7 ze?xSNi>jJE1aFNIX>>KL{mzt%9 zx_^f%LU(!Sm)e~kP|Tv8{e(VPE)^Rb-`+g&=omLk__A4Ve1oQ`zS@4oAjI&`_vfKv zj-b+0|Z06w{T+ojWvZFhh4^|{$ShT7Suw_C+o*%(rVhuUU4wiDyp z;FB^&W9J7vyXt-QVz`(_^vY3o8p{o$qF8>#^B?s9C){ zZrOy3DKz6%e#6xi=q+|^6c!s>#C5a1&e`;pjYinfYEFFBJ-gU>rH`l%$C{fJzk;TB z*}My<_~$XuGBS^hqu%~nvaCP)T+M)iV6;@=i&R9R8b^7eMcEEJ>gF+=^fJ&e%Kw>% zX)V$BkJ7XAuOU04yD7pvjpsQYB^n<}*~&fDONE_uOr+l>4k9*jCQll#S9OUDDEZo9 z0mby8F(q~369ABPJd|p#?Cyx7eCdAq9o)CnP-<54Y&PMSTkp#k_I=mdt5u)k?Z#s$ ze>p*Gib8kg>4>e)Ih$X9$f1Vs=10x`{ME?MT<@VeTTB1``QQxke8MsQVLx|Xc=)1` z$3VNPXb1h-!HAA=(S0GI5C4e+%_;jLYeze_E+M{1g89B`E97Z3MYrz60{q;to5d4z3m7 z!|J&`v$K2%pISAfD+LC$?RiucyI>|n)z#xyqnuqm_^HsG+P5 zqt5g#CzZ3+R{Rl@FRMvzzrz#mi+qZj-(`N>+2I&_Ir=&gO3HZeB}s~pKfa_P#z^kV z5_4o}*7KClnJR!!wzdG2gW!9j990f=KL;B(O>>R9dZC)Hy%WZCX(vB6XO=+XDc9d( z9^al08!~9zNw=rP!Mz_D^(==+0I+Zk9Du2plzvY&?^=He3-yZ{*y$Z@*$-)o6!$+St4UWsghm~#S>5e+-kL76pz5U-qNrpiXW6in zbRkV1{Gh`p{NU7wAoX6zSsHWB5UHzn^MGzbWbkTaVma(c2g4@))cG5j)X zc*XtAWb9PY@Z@KFlTo;=?_JZA*K(pkXNbPGnO+O@IOv*!UJ6BAP}M?;mHjN|{qdcF zqRRrD+K*q+UsL;W{Z5?!0>zRM65!-&-fh{2ibh10=5z zjnkN{qhob%RHog2-KJgudUf2aZM;)$!!fLg*rbuvlM??0Fz@3s?=s24JXgoh2x?v4 zqyj-GKe)d;5W0bFO*KDm$BH^4y)p3i{C^STSU~3 zKAma)U;N_mG=8D1!0U?s{!9Pk7f;>D1-kbPX@QUamg3AjR>)S#i61PV;)s(BZr9EU zX)%`askD?yT{#E4{GcxQ+(co;Ak5fuBLjZ5Ki*;Q-!XQh zE_7FnZ6-asrTfk_FW?+cwxHP!_?S8Ka@iTCN+!6)BAvoclRtBM#2Fn43D&po7C=dV z{=go5A63!bR9M0+?<@}`WKNH*U7LtmDnCIaT&kt-%BjwN(|(;zS6dNYmv%c=H{;=0 z_3L8oic?Bdf$V3@$ji_N#Gmlj_v{w&wn~UJ z5$D3Tyy!uGj)OZ4VVMU$P(!1R}aEBA{viPtp)0P}Jx9Y1F>-7*An{VI_rq7N`v zq62vxaJTPj8JMFkkNFNDxC%+`o+quZ>@MVOsx6oH+Q0qhL=f`fA7ykhMSTdqUm zn*bzr9P|52p;}_f)YIo2M2-p|aASgHNj&Qf(aigm(O8@su=VVCyDtK_v*d&BSvt2h z=QG_>cEFfa)N+JLX7$g?F%KI+ASFg$^Ti!g?GqMH-R{X%Z?9Oow1IdK19#~Jr`T87 zcp~|2H~Wo>qnHQ)fX%u!!IGYC-_gk+0z^a>KfVtky$dLIB3)_YMK6Abx0UX1{0^9n z&Vi8FHM&C4tq)$KqRR2h4+M`NeE#oTv^o}RRs>>aqaa69&E?URG=~dA$pY|rViWSC z_3PSM38VpM^Ao91%rBc5fI8F}e{4~HY~6sd-C_QXVVchwzMX+-dDeTmY2guNLww}* zZT)ssybReG`AZ|B#i4P{G(n(Cre#TVOv-((~7jdh2tYg=9svc1q<`eVPS%7&xZ<0?Qn`>myMToE4DdGsId zLhFCH3lDsjDQSZfd@?AM-w~Yt`*cgx86IhM;^p0cILn=H+IE}2Q~QJxiP`e@pNqA_ z_9ejUK-^A3x0Nh5s5Dut)snwsrWE+s%jcIex9eBV&TPqs7_4-`j{&O$AePm>KfTW* z5HHN#?K3?;gc!TE_A|iz&Wi5w@eZ2-?Z+jwOvQ(AxRI$QufoeXba%#iP4gxN)Iv|o z<=X_dy>-o~z!g~HN>tYqwVI=tZv4+_(*2F;@{^Qn>X&L|`$Mm5K7VTtgdSGp`go?x zrvaTcW@z@Q(>TBLywmK=Ownx5k>{CKb^-ii(B3~|XppoMF@@R`e`bj0rmttA%@YnF z3|GIE-`{wPVXH)$FTPm_zU1Cz>eN2)bS88M^Sn_Cf7<|9l$hIqGt13KkAKUot#)sZ ztW@fjx-V`wF0j`EkqWKBjW4NO-%&!eWHh?;+T z;&Ac|(N)TA3~R5l;R7Z!0YW-|jx^IBT9I*7x0PXf*ini6>5>d9LOTwA%bLc@SV&E_eAcqo2`0opRy&mc*ay8)4dfTG{`_7V+PT=tejb1 zBTTG(%;+s&NB53|jtzjdlKIgo7eo6rQ{P~I4l#NLh+}NI8v7T%2h9CfCDFzEk_8;X zvt4%4#bFl3G?@P~>awynwD+}#;DWyYl);|4r?v!O;hWf;p=*hLkI>B9&osPrK9H#q zHn10Q=o)tQ7oNoiwQ>wFZsV7zSso6VHcxKv^VQJi0DO=a)H>(=o#g=MwdT-W4YNm> zp{I>BoCm23%(gsf2+ffabfK_{5?Gqo2QVB#q5$vusbi7GbVr9h{4bHfR1bfI%!*_7 z?|#O3jD807R=GZ-e&NcXwOV;H-+0e+NPgQ6MycpFC&zWY6t_lVxYe)SX3G;GaD>}m zYf@&W9a$NVIhJ33|@*dWqpGnEIhUv-h%uHP0;U|JeOZ`;@Q zo=1@G?bpbD6LtE15Z_}izNHLJSA5>S##45j9-|FV#9j&g0v6Q8+_CqWk2&ganE(Vw z|K1SLDiTjK_1HN%+XtS(KU2mnIA6BPf>Ml6Ot`fd?ki^3L07;M663R>bs>`pIYk$O ztujTlG6g_Yvr08V!oF_?$4l(Fq(oFV%Z+b@o=Cvw&7pi9b=~NOcFH)5fw46Zm{*9u z5|qI9<_ax0RZ+LR{J|b)Oek-|B>AQD!1b1UT)F!bEQ$1CwJpYQ%|ybk&18dlYf??DLzFgUY1G?Mb&g z7=I+uBogzV4OwWrC+es>m%OOvz$LYl4Uf~92#3|;ch+4Aeuf~k<3OC|vV)>}okTS_GD)Hzu;|twLC`pxq zH-8Kz85D!1c<*EQ?pH@)yC(qk@Kxtn_16I$4yNS-Q2xd)-`HDExlkG@ ztR9O?wW7VSI?cg9rms;_`^u`wYyr7Cj3SN*zF6HFUr-VehO{;Uhe_&t1ld`-UwR)> zjgU;x-z1ux`U?Xk-)#D-k$^w=wpnMZ7*2J(^9!;hFRgcl zP71Gawy;m1+3pUkvvFvdEN@@fS1N_k-5*q=qfv+o+oS6mmNf|7luLf>b=CIKq1Aoq z?MgZ|e$J6I|7Du$O0+XS$zyH8&*^+1xJ-qDopplPVkUpaljS^2fq=rJzMsY<%=&V` z^#CDXQjtZl-tXT4$F!hL;qvl(tJ@oVrd8&e`hy<&9>ksg`@MS)bpettz=dUgLzo$R znlnhzc{RBKM=(6G^Sv1&&9h=F+b4kwH02UTJ-*rD=?`rK6_#ye7`v;6!PQgJSvs5r^ zQsjRI%`fg+i2$6^HunC393ZBJP3`y*(pL+s7{fO}{<-7FQ^IcI-HIrN1^Z+x` zu;E(R%VO?WZs0i3r=w76)gz3@lj+~9`Ngq+_TR#Hrr*EayJfoc69|vj`25tL|C)UT zIH2;>s*=_6m5q@Nt?5&RrlIitH4ZUk6=M^@Hxi*q)gRU!d&_X{BpM)J->>RFS}MuN zRo#{J8RtwPowVG4^9Cb04JfXGS3ep5{Ze6b`63Oo`N2NlS9IozJo%Z+7uBVyX0}s; z*VCse0J6X&ft0>~MZN{LIeQ2_Jhj|E%!sE6F+|wExyy=~3X22)ycx=2WmloP5P(!D zp1p)ANy0b3a*Cly@YS9BQ!7NG<&xOVOdizxa#6qojiB#1$~%0=Vt#we+x}c5r_c=i zKl54d`jkZ?J2&afklelY$W@gtiRSgWuAeP~-+n;%UX2z8f=Ckm!o*7jtCAhSw3-fM z2EGC%UR)f-{$bConch13iJsC#L1L9;Pv$c(br>^_ybNc-uFnIcNWq6J`7-ne0Jxtf z7^9THwGy}WC$Sh&N!oF>JG=;Yap->X_22^XX*k zo7of9U8T`iS4753SRx-VOw9cLoD7hzG!l5$Dv@ z&sFqQ*jv4w{4FO((D-rI^=~u=Kuk(^>Z9$zQ*8!LiDB-~58r)uFA7g$=BaWStC|BM zq8eWpI5+u-d*0%Tgqm8q?|mXueN2ae#C7mgals*rnA2gcRLHwJcR$Tzsud{qUr%*H zp`RJ-(;48H*gCCn;=NztzcVQsRZ{!fdr8+oZXYgeEu6XT^v&aAST>xlaQwN+ImqhCtfoOAV2iO7wzYMH)qdzIF+TWwPgq_Ru-k z3#1}0ow7=wi#DYDeE#`$) z{YAH3myznMT@1(i?;Iif(#jxBq+Pq!VXhNj+PdoIwfg1w=}6Vwk({w=^Oqlt8)KbP z{<>2~FTRM%gKy_&g^}AUs7-jvzk! zaotpiJkcN^nQ%hcPHXh?2S8$z<9jT1BW%sX* z)7_1LfOM-!2*>~m2q-Bf$RHsq-91Ap4N56Bq##IxfW#mnDIH3ONH-D#!@PTZzUMvX zUBB}^>#X(r=UIF4EbrNS-+O=J`dpvuF1(E8m?ktVPO9mznwu!cgNad3g}QlUx=>GV zHNx;k9(@$*J7~>YB!2mWUSt2f@8U7>NBq;;GW?8m+Pzx@CP)4h%Qq8<=Xx;TdVJ>u zQE^={)4zi+_o;Qi^l5c833K62I+S&35Wa_s1WyPcnO)tosLP>*=9`Jo=v_OlT+GjE zW)hN9YI&;F=iDcnZ{Fzb9hO7`F5L6Erk5drj&TQm zHFmU6;_1P!3~Yz{x89!i_^XNuU0F((C+B^; zb_LmT5zvC~?uu%yl31Uk*VK1gk~$RW?3`iR+Ja7bCo#9Kn57md@T7smN~pvrOA={x zJx3kpZg=(7Q4@@@a_hqdKT|WPc6#icoZx5HKpWiHrauQpKdx<|y+j|`(arlFerEW6 zZv8EQn~N9fPj;-Xg48dt*gH6&YdH!hTWNdCCm)W%%r?dkS1i(GDBRYGa3PzK@joW2 z^i|45m=kp;a{)g5^W~1n=+j?`mny#|KbaauoGvVIhDQ`~1@7yh$}O5hvBWhAqyq^Y z4((5?y&6h9*oi*hTwGqxlpP0~klV0L`W|e;AxOv2{^;XT@YX-=@#qoN_wU--D(RIi zaqKRyyFUqp3URWp+7Q%4ff%Or=5htB$G6&1wbkFYqOG+Jaw7@C;mX`~iF)|OuQ01U z!bP{UzZ}ofbI^J5rU`BT<|N1DuKdr>xeiOac8NOhiyWPILmcZP1*%Axqzfn>0wWb2 zQ3I`6TV=VP%_q1wpK}+U{kQ0_H{}u;aNa|KbY0f0_gLqb^I!qJUKpA|!E2b~ZTbYz z(?VN{z{Ate9YukrPLQ*Legeiy+-`5g6baQ%1Yj?Gi=ZW+dl`Dk-LeK~61afHE?1_* zKy#fppMGOpB=uBf;t*BaS{_oDdT!?`iquy+>#bC8rbDFj%;M1B^ov(r(`VBM zP2T!HUUJ&`Kf$J#2?g@{)BVa;im4ePUx?bYJVgm z`Jv4~C&_bK5XwHb=@bg26KI3I=(Sqki`n9#2ny&|Wb@>t9gN%94})Ch(_VbgI^<5F z7{s!xwS7OPC`F95%SRegrcYcw31BpX` zgv_(W}LSHpx3xADRaH%iTU5y&FA4BDHDJDyN4h$!3_#4c7)pUs16hjr zT)6|k`yE=F)l2otx(#YRWnEo~<_AGhQjk#L4zB+(9v5O_ue!Br_Qds9hD6nwMEiJS z`yZNu`NtP`(&h^qcbC@?E!`=>rySh4#-*J)n`eLO|_L>!j=LPsT7s#`@-E5s+6|KFE@Owi!EMcJhEnOS& zc)daloxsZ~)P&%1ES}!ehS7O*mfv;Qk|XtbspaV>ysQb6NZMD)Syt<6@4Wb8gT9H3 zLrh(5f=y~EZBF?>wRwA;VbKL8&37pK4W-Gq6*TU8sd|P^WMdD^?@t#k7qIs=U9EZ zgK8SI+;|Q*MU60%REL`Z{h2?ltS*R>${jo&9r@NSlT%O}B{m0q@b_o+zpCJ~7yJE+ zUB59Q+BCUuFTIIZRVP8>DS-;Tm>>gaj z`b08D62~Hw>nW_GTlsOvAYqbgBA~Qcast;fO{`uPj%_Ma14al=7J> zHyzd~d|X5A_a9t09{y!LSn!Hk!1l#mWt>{ z_;|IT2luJ=^RzQhylx}u9MxfGxfM8{R&WpB*{4)dMk&pMr<7U8_WDQZ+Gn;b_`E*o z7Z3Ou_oJk){vL&iFcaH>o+R+BeHs@Zr5jo9*k)7+*qO#%2TQ%RmYI?6chK$qlE06I zxScB11sx=qhz4%)Mo_{36dTC#A31xCSh$D{eozbrGEV$@vtpaG& zN}T@y31ApAq2;)HUpZ9?*5KaaV0;RzRZp3?hg?sBF!kWYz1~Il@uK>YOAcCybk3y& zP-x203k&WIxy%FgE3YzSA}?#&+vZL*!FEiU1<&xPDU|~QIVs{kN60%cMLpY&F*asQpLN0~i0mFmXnZU6#L1KSi!h6PDO-TSdB-NAlNL0!308HMW26Zp81WH}T4_PuZ;S_UIEOgXzDs=+@c9Mxm@-V^q{T&DM^5p8qL;8Xn0`n+cKSgutQ~{y)UU<_ zX@kt5oIclKjXzq2O=(n*_P4RWO(j?`X*A4m&-E#mq9=hT3OY16PBLhRNWsukC5dv@ zM(;WPb2;I*iRXORRWR5rw)m6cb zdD97IEdZwfwd*gV?SDKWn~$sT`i$3kwcO6~UQ>D8_`?gDZ;Nz&%Xs@1)!X1t7N+K+ zCThitYv&-RtST!ccN6k)#T&@6yj&yjio8YSe6Pm@F=3ahYP2;fz!ZM`Rj;J>I=|s|eul{QCV4DuAD*-9SP3u3Pl@fJ@@2mnC z{ZzGpvxj*@D2*=@=UoD5vI%@UAV-3022PVR`AhRx;7oo}0xzG@a$Qs;S|yYAnXNZS zLvY3ton1Lxu^V<4*g4Ce*H&_q0`H858tjPzAK=Nie#O4M>#gFSvQ%@( zzj~{T_LtKu8U45iAgVy?7>h~VQ@n_=1tCc6y-k7j$Ge<}BN21zj9Bc?Vvk>Kgewf3 z+zLAIKqI^ld0V1np7&BHpV-12alQwucD#V(4+JSd@6w)sSO4UxSNi9&a?N`IwOGuG zJBYo|c87y`b_5wgU>5~~wsQGR7`~AB2uSr^$dNt||K6myt#h1NXS`($)*uthrKcx` zFT~6Bcu|)l8KARy2`heBuI4gW!=$Y8%pGi}3>ZRQN5%%J=A64}@mRw$+si3Vai0fe zu%<8hJ`lRs-2H0p-J=BCCr@sO;=e{x~l1cJJ-`#@!NfZWlNsoq^R;wGxp= zcu@7`GBTmH3DVQ8z^03F19PI~-~wgAt-A*%Z#E)K)R{XJ*kB9e7%crSN;HiBQljYx zN;LQ{^yOOc2b{P!fD(fEH(k##HBeH z#qXbAiWX&6(~#MT^>UE`O|>kRWm({5{VR=WM_Uoh5P@ep-CZ%me>%L|GE=qY&0go$ zvNgSsUVJMht~>17u51~1vJ?d5kv2U|3(L!;Rs7_ggBG}og*y);A8UAC9GCqDVBX;RHzC-XcO%yBgp zF%o^Xzl^*WPS%;kV4PeCQb7_5(U3@b|Ds}b*F~mR{v;5r!c2M7(#;85n_*`T3y_Ul z{>SwBsKaQ7gn^N)?TtaffE|s`NwK$tZTH2=E=m{_CFy_FDNS69kMMa#O8;dUB;6$c z*-m`_OT5V`W`&^1+BD-Kb1GV1{71ECVVED znqFVxiQgFHvz>f1c_O`zH^qb|3QgtIS5svPnYkX7m-+tJ2G&Aof_0mbviuFw?1A}o z7PCV24Cd-W%zus;{GMbQn-k~BJ@OYoMi?A6mHt_bmq3ArI()U5Q`|bI(pRae%ql9d zHqaDq{XI=D_vsXlph*SgRmGVI3f2HU6yM?i2OjiouN4 zS5dN2DO)a_v&hwO@N1jL;o?7A{Qq{j-fKj_uSNl?ZbU4=2;I45Pu24utkX|QlqB$S zVyIhuBHtMkZnKX=M{^;y+@k7kq_26@sCum#Ei&GN6UKrY4c>1Z39TJ=>IVYv;mFhSuA2l=MiM({2j$2xe?~5?F0PVHb!?`)ne^lfa zX-2M>?io&$?UQ{rKN!$Ise03q&e$8 z!Ze>uEq)E9yq_#gsQ;gxmV7@X?7L#I@-oX`DMw<7Ge3!^``|u|zVzVf8c*VTVHw6< z^opaa-w^0<;P5CR@4bb(m^gHFb4g^jbJsq(Vp#K%yre9-kY;by@uHMC^5J03~2y?`BDqRBr zk33(BM0@(zM=|~zALW+-Q0n5OjF3Qs z(t{P5S_h{&`1$eO#X&GD(oZHg^xvJt|0jT^$IE>PJ4}SBM_jVszIiPWg);f#K>)B2 zJI-O9+JDK&h~a9DN#+C&?=rJ{v6jb(@u*QMJ}cb+>o8lvAy~cL3WBZyY^3Gx;-xAA z!qDI^hf_ANaci;F?s}ro>504L{EDB)nC1%++Grp$6#^P4Ew_G%|7bi(l6?d5b-N0L zx>Nwh-=EdINQr={N5i6a<}0lQsW{|BP>igs;lp{%A!W?BI34D0<_a3u*&^AUyk|9whkNL{t{f5lj&4Vj=v z27LAtTg z9Ra(!3j|qsHymetSn%-X+7q}PZ@d^GbC|zmgPqwn6|DkVWA~dp)-otGJma?HBI=L- z+N3j)HligAup8T--y(xzfX>g)TTXgw9je(z7_AXB$#m;z-Z1p1!*P$g}re<9z!~`-z4LE z8yoC{ed|L1CR`g27RCRFb5D7$r0g0;5`SF?U>!v&!5DrbmZ&r2q^LmyU`_K$6$NjJ zxK`CCLCxxrSLPxpN}}>dH+rhid3B7&1U1HaHC~dMDq4rRk|6d(EaR)Oc@i10w3VD#neLQJ}8x*MT6C3sHfA_c-8?>{F z4D^8gmInJh&;P?iFG4j({CQPFVt@Jfp~#RdUeq{u^8FWb>nFu(@&d2BWR+6(je6eFfFb0wEey%8)ly*Yz&JE-SPEFxje55boL3-hW4{5>SYB^aV zs@PCVXyfP-T?qP50p<9O~R5t!mmv4CBt$^3Q1elhms|d4ec$l|*?aLvn4>Ri7 zr$2MF1N4-^^9v09^*XXwM=u9+Q$ycyYaT+GERtIB;oTo!(<8HZ)>Q0`QsYtv8-Lh= z9RsK%cm69*V}&16xH!Q)=|f56hSKN4HKgyM;#CMtSo^n5R#U`CK_i!(A8|lHld6E} zk;qci_jAUt9q+N$3ym!a!7>gaSa4&p0i<=W5~ssd&JiN57Ccns5*NYA}qKMTzIA1iiN{ZHx|eRO_FSP zz(~0hp4*GRuWSh{d@vD<8vHjOq7xSa?(8`+2S`9+MCJlYJY0#J|K6@`1uk;4&BDKgsK$pa$7_%|XsM4Pv zeF?;$md8FKX_+P{y>}JcCZRfAKPsPe@PF&#JfXiqgw^TTUrr@U4FhsnxsKC1Lk)*M z`8o(DdVynbbDYv#|5+{r7Hq)1pMeGQ#;{{bzbWy)T;1u5qHPXvM3wn+F+({J-AXHo zCZ9_DuU{cc@sLYexf=JBgWOf2ujvgzAW~m~-y!ciCQYirHU{f(d zE>|Y(I24Sf)?PGu-?lNE34?BcZ9h-w@nfpCY5peq@p{-PJ1gsl%$`&fn)5G zDIh!=9Koi*$$k39MK8O?XI~q1){xTDnk|Vki$8fBqF>O``d*SeK>{i=G_P?40h)mC zPy%T`nxL};zeYf;(@N5T^%MgC&IuA%(GBZ}jR69mIb2=woHs&99|HsiE1wU?$v$;} zjlfYyd`ZINtb+(=b)Y7ao4AjEW(rglI2Ni0RqkbuDVmafGLw;0#(D4?ts31D8pFzF zwfxQ1Rp9th$_8q)JiJY%wX2Xw&Z9_diC9-5L^IhhvL4rN&_ZME&jNhD=;neUC6LYX z(qQi%6u<7q2d12jg*Z3kC3kM{WJ_X)G1o=D^yLnbRd~EhJ~LEL(;tQa=X$gYF*yBK z%f6rqC*gp+pU3C5u!gnxD8JxaFJ4-o?DUXO1aNwADgHxVGq{F2T4LStsyzKOIj`4nVBChu>ObaGjG+le6+aRpr9=HzMDs48FGehnw_uW;MpnmV-bY_-^%@8mp_UCcC`~h9AM0?OaX3x~~LM zQB59HQDeD15kDC1?=r-;TgZQt>xyX{B#|WsDd)h~pNypyf*yn$1NMn$hOcY?wzM;N z#9uq;tBO(ZeVmQe%4kB+MXpe?Sr8xO-4odOtMitatq78+&(0W1I5Ej zNpoZi*=_e{e)e3v--J})Ha1BveRh)aCp zmb(R-o#CoL=cpY>3^D4JfgH=KI_TP2I_gfqDIQQ|urQbKS?P6rx&#VmKET!Vbmaa& zXG{RCz=}2M{|#&+myjFZr%SSJXJ<={b97L$|c<+chujGG!{g1YrbF^cE ztuoks(cjfCKZ&ABL!u=CREWcqAPfAOuNO810Py|ru4T{@lRMEW%ptpBAKpVfx#S-o zm275GX;!aI;%dyOg8Ml{4F1KRuk#LF2*eVW--&pT^+HQkLHnL@T`WbBeX@4%11BQu&nc^`JI_M8w zQQtMjq5F$Cd1Da|`Kn%PD}r0$z%D!-emXf&Ay^}lkya7|&MSa1Jx@U%9?8j!h4a<8 zsK7h_A2w?n$SO=VamdHz+4jBMkSoYO>P`07E1e?lP7=xtnZfiOtm);~D`%@l7boP` zNoWF18v?iA{V^bm5GgsS^@RW=>Tb0A-&7}o0K0X5|90!lcu>UDS)oHie38)zyqRGR7!V!|nZL<~_iFr=_nZ?@1K!FL6i~ZXV86l{(rO zzh~ME!kop92CzB+c}0Lth^BER!WzogVC;GNqTyDwidc1Ci!<1Lic(rM=+^qy{`yVs zSHQKk++WK4QN{P*;*RM}i6ISubb1EhSR!EKG;V}gbD)E@0f{m@Qasr_*kOdINMmpA zkY=k#N+#IecT&0YO$F~IC!cTGYj4!Q(dmaG6Fz~|yrNY1l3H0w?}mvLFY&!xf@Y(p#PPU=Mb)8g~t#AAe$ zVeIj{4QP`O$84iaTlX7|1UXO5j&E0h@6^}jL@4;a0u6CM1gdF^Wxku#v#yf4N2nEG$Hs?MjQ?H!S=#kzG*UBue(DGqtwa7FMdd_5C7_ z1|56F5Qt<^x||(61hfrCYEGwnuP9pH$-3SqqIk#G;h)H#{6;{>sJkl0X_nt=AOlyW zKmN*}YMNCLGK5xC@V&eBf^VXnljp%c(2I?Thlq#9D+&wUgTJ}9Yk9hNd_P^95p*3jIBGfgUQjs_ieZ#Wmh1IP?43iV?8`Fm8;#QXdtWW_!J%`+iH*h{J1LMV)DejVuGs1oNwj?07CW>Tvx;Lz1N#bC zH?(*yz|yE3l|Qc4IbHJiZEjgC19+F9$#rJG6DR&l7|5{$(0@EC^_19fY&a^swgnj^ zvM{l-xIiFfx|S{UlPNlUNCMhIjE(F}t5K11lVx{;K0JKA3{V0P>`uEXqHR2t6faf> zC1S{Dx)jkHzv)x4;G9~k7dTDRaU7{wK?YSUm_d*zE?Fr!#^)F%|NTPvmGIY?IN5a> zW^_1t-CY+Kb*vs;1QySDdVWmm8kqACSMK9r)+{FWvee=Z8DPBuO`yBVw0{Bv4f53E zzb)B70I6yze_kPB-Wd82Fr6z^)(kRpa-IxVT65^Fj+b~R$T(|dDBj_`aih{T7rs97 zq^$;W%}UFBS`;<@T|oRhFh(&!+t?lCkQc|MAyL>`2?_s`XI6SeT2l<(X=jEM$^5Et zpmRLj68P|F{qRLDKKF2o_pZ>Quu{tz&_I5uf0QcvleP9`awib=jgkXxgmF5G>+x(d zMvD`ruYp(=2=VkjJILHqb7EwpP69bry3Riy40jrfYRP+(3@JCP#-6mnY0#0O7E8qESknY z`A7(%S;b4k2ojR}g=YZn>vTp2#?uRke~X0Sb^Iq=J|1-2yKzB04gPhsvPp2E?WfvG z4rlk<7sSiSo5WC~tZl!qJf(=gMPZR$r0;0+_VF!N+D0qKZhMF{xkeSp{;>5XY_8!9 z5v?_%k5(zYVKskw9l5brQ|OBU62i1JdwfgjCs_3#mB5jF#%oiT63CeNO%D$b3L@$( zp4^jk{qf?03cEoD(EoRnwi!Y9{zS&!h|ql%#uI|#1Hub>=XL{I(SOA{3|UG*wE&=)t@)O9EJ+) z6bOLI-h7}bNc;XN3VU8m38ThqQC=?&x{&Uxv^Q<;iHZ2kBEv= z1B~mpqH5^`9<_*q08*p2-1#e-vJ!u@UuUibW_D@)l-LFf#4*x$M5pkgDr}RMhnR4a zLK(Lt-vK-}EQL329)0)EhX|1Ji3 z5z=nOqekSrewu`M4ls7A#Kf+JEbKFMEU?cg|L|u2L!4Y3);&{$WEro!6Z?o!TI?fq zjT6@<_l1Ca&_N6-z#OsII>;c_IigiSB#sn4EQ#Y`LT~I@50}$H#IY6mtMNgg0(Pn? zyt*QWkn+<|ZMKSi9hlqXw%(sQ-@%7C^1t}>4CfZMaQR(7?~64kQ~`-$vUgC=qi&p8 zLuNwX@-I;ewYM^l1h7y=fAEEZC)FNO+S2zwxB!6npr#~+SFUsZM(gdbfw!XHCqGJn z8IDT^i~RS=9kHqQB1ib^bbW45vgl+nfJ|>s#rlfO16wODzA7i68ChE4kMOUS@-Hqo%VE^GP zKu7g%p*H39=eqLq3oVC2d*5dGdiSeK1K1WkB{Y%7_hy0#?zct~)KsI5_q;A{W4;LWk4s6F;XOvp0aT62tP#hpwOGQd5vcHP3 z+iS(oq-C3C1Mrb@BpqQwk_RQDu*XO~=$PFAlw(q9$EoOfI5dd;c9n`bE7#{#iS_I$d1Xa$|RfzDY8T9_mGXeOmK}Oq|)5<`Z)bhK9;16}-Q?z1SCXHlNz%J{cy=7n5+EP($o`bHfc71_O%D&AJ)Fm&Xjwt5}sz= z8C99K5RIPv^ge=!viJoA>9R<-Q=dmW=aVObI(7ueai=?F;7nX5;f|9#1J`hu z-kew7kv~rdA`yr5G(3omLAfVn*+pT|^pS-3RWZ(<*tC^)SLYjCICaiv1wA#1Sy}`z zH2seQ12QH@+33>q9i@Fda@W06B|=#zt3XX8sSmp*9*j?E{X}FgNLyQoLJ)?O7zij; z{xtW*M*PYn|0}ykecp|%&fdQ8jr!Z}MtSy=kt~pw*T)HWDhn<%>&GD5OpJ!)57Q^q zepYc*z%l0A&jAc#oBu^~nFY>YVv)b=pYewT|aOOZA|?E-Fy1lS3nj;M28irh#+00dpVx zSvEc^%f==_)9&Jrp>|n!1%h2U5uID(4SIrBVF#=0u0+v=M{ERLPk9%-@seOj9;zRB zNo_$nb+%7`+fzPc6pXAGny)xVUr$1cv}-hLU<$O1bxR*h)X!eM_%o@xH7m~+POfe_ zUMx1Xa~Oc6+tF=|FswG)DQWFRF&X#lk3@pcEujD6mWYR>2U%$;rnh@lTw=t`#c(=^KaBq0wqUaYo zxNOdj;^5Bl1(654|CPwpIuUrrbDL~7O(gDfb``wOG}%YfA-B1rb;xf*RCBia#9Q9swPeH9L^>8ZLL>B_%e$E|+q6t9 z8SM%;5gGOwDbI9!bVB}QU78sOZkDdPc5&PuMq(%~PM2Q`>g8r1=c5l+< zcN=$MN~;6(dwmj<+^&A40dZ`VPdL+EvWVV0unfL@#O_6zh< zaF7+f-6pfxl+Rrh1bxJ|{S#+tF0k@jkzVEvzaw=P#ef0i1P$D^n*zQvLale|99rEu zl=uTZt(|FiA%sq0&&dcQxKIKPg1G4I0}Y`hUJ^+1*ML&u5+;dy);G4hZA-F*jb)BdE(Y_)v@dBQy2SzN|Z&NX-KueV5<;XXzk++C<2{ z$qqbo+1w^r|HJE~N>j)CYY^VGZN}7wbi^pHbbp)<5V4H9QZUb;NX3s!w-+AdMGgNN zVO-(u1%npSObZe#T^evSue*v2%+h|4ZgkKszLB5TTtaL z)zhwi-A7-#G)m73Q)iKQ<9>YY+d?041OiOLMt1RecU16jxs}$bNxaOM3K*tBZd*oQ zhhlOjclIH!PAp%fH9Pj=&UAl$ranG_atCLaHl;X^y)NAUnkKI+ieUkiX1Kj~(xuYx z@3k6S^fbC!OqVpV*V30VVKPwtWKutw8LL4JyGp-4x0SW^GR1!7^%Gn=Z|>kAa0`hE z8pWdZ$sE73SE#-#hY#E9{E;xSe?btf74MszbPK zeyxpDFdZ%P`Iml>y~Q^@R3K3wheqo&aAneir|a1ZhSYxGnnayV;XfgC!+jpIL}<3y zJFZ#(?h%V|VzshiG8?0yZq5<{Qh2GfJ8w7k_MO0X%9oTmB(r-y2hZ=}%TG9_ZEq7K zr%7!{XJbdBZz_I4uNvbszL`%2**5_Sdz}r`1X0E8`214t1J}Q}wmENLE|8QUd zYhIiT+!t-RggO(RhK-anUp*1P)gaMD@sYMs+v<{`aSU^X-k0BdTEQR!{#u&defvIF zBCQE&|D#}O;p?|4BEH|h9Je}4Ss3?<9j;UL;lFFsGW$YH!N+~Euy=U0R1p2!)s2Xb zgalU;u2eByC0H=T)Q@Wxc1A>ThxwQq7y)>_n-QIhLU6K@DGu6T1wtF)2=99jm%4Vc zd+M+>TO{nY@*%o)B2a005^g+TzIS|d*J7@mT-2K%myJ{xvgo!sc+L;HM7&m?%@{Y;MhLOUcf+{+%w21QXAdQiVcGE$SNu$wO$Yp5~T(( z5^1QGp^9^h=T7xdp}g8D?@7VBGuh+{h8liG4ZJI_KKvdwtA~<&y5?~93{NEJ0O#8x z3Ii>|W2?r2C=h1o*2D)owa`Qm?--ZQFLRdSRdxmm8JcV270fgHv2NX6m%)v{tD%Gk z4kW3{o&jl+3M4q$=J9V44I0sQVer?I`)*m;yfoCeG;x{mS=b35Q1$ZTnb#zs87y&d z_LI&`_V2-6b&zCXtp(8a^Ae#>R@$`Jh3 z=*=$$W%Y$!$qRXgY+HIu&mk$Ag^m%1&IfyNNHQ_kf?izFz3BW|DynFjlRVYc6Cc5uw3lsi|M=(FICLVa^5K96h91$Fc z1y&fE_7l!C+~;Bg8{~^Rfsf%+@B8m#f^c_1#E4ng7`G99f?_11%7_#*n^U#*jZ&&v!(E1J9{)bPyj+kTB-nak$>#6gmy^luTfYoEG!!R68jV!xUJw$~X)H#cb0sO!5Mo{t=#D*^hcnXi_p3W>T@!^W66NG@n^Z2DDIA+HZj%L% z6inx6CZ7)>;r0$cD%g0`VG&OnpDpIR*35I7O}(|q480n$-A)Y|ZF%&g(fjkU-F`vn z4-3DKd^=6mG-*;Lv4sd2hidficKh;LRn4d<&~5omX8ytCOL{0p4Cg$4R4(|YlL-ae z1nu@TBnNThf4RmK{LQ;Tm9fDRq1E_jCDJ_Tr7W1M5oV?#+N9-zEr$IkDUX`RNLm?) zzBE(C%Sh{ggjpgl4m@~yKaI8E&7NMV< z9c1o-EXD`KIDv03rR@20l|x@n{g6P;L<(fF1VLeECMA5H^mb?Nzn%vH_5R#v-^r$+ zgYT=az%PA?u-P8xmH*{JjBwNwgNwz2m%siDzA#LGbIEM+?$Z+(GVmPMkHDvZoolMpLdC$L@Ooc8%WsK#$Ggek-|YHwckemy{38#go#%}iN32$nGyLN`DiuX z0S7lCb3l&?TsrEPQ9Eav6XL#!cMU(c62AiBrwTOII(23{XI^ zN+BZZN=&t#@$)*oh==AkxWu=aPc9(GCh%JhERneQQ_mqAv%;J^GJqltVGmJvjNrx`TDse1u?lA*t5+=PqBI3rTPW3m(j zx1v_zwpjv=2~~^(8H?BIdVoeKce^D$3cP&rO2d`e9mu^5L^^nQ2jNM zKZLj}RR5@XA(1A<7IJ#J4`Na|E33wEYr*JY7pYt|nJ3P;~T{h@c39&6D1CqU<71#7Yi zHZh?p7*cshr`;LAM-k5V9t3MLcjK)?Q(DQm_{dxc9r1C+6ce+t2w8;s$(ufz|| z-20J=&rF&*wFqQj~Sc+Jn?vKdHUxx{jnd zZ1Q!VJCbV}Loyo^k^IzY^i>d&1uzsK#3b&B9EG(;S0C2JH9mO=h%9$MJDAo;kdHVm zB?ps|Moux3`TcHuL8pl5j8#9baafrW;0{2sgLxYG+EwrA+%bL zD?4d7fUG3csV<8`^w+415BhroR(e8`O;o^!kg93ZIS_>Cp^UvKOFjs`Oe)TPg%_dT z&isfW#=wvwH5OScsGehGz5H{>D+xxwu8Vrux~xl=5Cr0-5Soiecm+c`mVED1&NS%= z`YNJ;vlf1vAy7m@kaz^2xYN|6{*bj$MT|PNN>}6SH=5M**M-lP!tn09S3z3LPSH_) zqfo=-=Z2~dZw_~a3{wP2!Ea!038MKITm?z*gI$nNCV>eD>#qq^ynWCYz(*QlYoZFK z&8RUz|A-(&6}9Xib}OV<)QG{>CGUMtqHUO{4QG^H5MDttS%;s(PR~PpA04F69gBPS zoRTEq#;vaCy>{j8ynA73y+8O8yCx(#Q1_kbVzag zE)5+C5x}EPXYc>aDSp|+8lXhX>X2P~QV zLhZMR!Ie*)*eH#h%PnewhlXMVS!}jOR-Z(9Giwn#&SR4TA~MEA1O1uGrT)uO^i`(2ce53%vLS8U##LWOqLJ!+rtA=u(?!W=K)tLImRcwBC}ZfwX{ zZHN5u@C1EAtou>l_yV=5Fyrrh!@MPc@b0Uo__M=FZuq7AP>C@E5T1zG`dNLg^eFoF zoWC<4zm7$IpzqLs35lxhB(hzXgHQLnAT?5egb@1C01A3NCf^LctE3qHAlA9RY^V3d z!!_p0IBP81Wc~IOvjqRs)*x^2flN7@z7#^hxBw)Ilf4JX7~&iD*Kp;*PHtzjzmpOS z4_gT%6L? zn~NW9-6q8%#{0|uN_q@O8;sFU+vRct&jp;`!no(H=0U9UHSi=Em>bR)C+A4aWr-|+ zr8omBh-zQn3HoH(e#3PM4~sF&dk=JY&!dAL*P?*$uzU+mP83f<|T*P&=llcL{0T{f%doQ&Bp>!GJt%liuX7x_Rq z0Vm@qM#vCX|K{QiMg}o03G}Q=xwPMWrs43Voq()N^$5aG2NOw8I)%RQ+<0`)e1;?g z0tbY#hc$hB;ivOo3X(5{4gEkvy})pta|wKV5@1~0{g_nfw`;r|;bLYmmDnp1&qokn z_;}5S$_Q9wT|Rg@x74pf@@hL*FpG=v*4tAWQ{%W+%kM826ueBONKMp$0c6!&=g(!( zWR5H#yaCm6b*pNnCy6*JQ8L2$t|tl=;pKybX=$jse9t7*+#?%6=03{dQaBQJvyqS6 z8{&|CQ^C+$Nan(~S6Pksz5sq#z^0jjDQ(OrMZO40P_2pT92Wl7PT3rv)m}eYpV;8vPg}}*rSCFciR6Wz%e}snv@-& zM8mBJ%aEP~)t+WoUrd^mSuHU}%JDHdH#hg%h%MeuJrD7mZ{fV-q|>k!%ZwAXm9_uf zfTw#Ghl#GMnc+S<5Qy7-u4=mMBY7F$|K~zf8=?%e1vQzjMq3vmRKJt04j;UkDbf>X zFNntXb(5pUK$(M~@pxu1&9Aw!c9kn#3W63c+*DWLPjKVp%NG~c?>}3|oWt!DV(l6_ z$0d{-O@+{SZ6gKH9NWG{&>cepDmVB2v*(Hp)1CKNL5)N$ekbbL_gA@(YpVbkquI-> zHCtJ-w(z9T>CeWU2uijO&j-n9&UdE%L?7oBBI3dl0$s6?^podv0fKb&67~@PjgK#Z zY6xyxYqTA)P0eSY1!yQy`HN*hON={M*;RRS?F<}5e}-xoy6bVp_*2*vX zC-EXORQT~eYakyB?kZ(8T)%RM4cgJ_<*S>U{eXSzNDRlN)JK7jhn?b=nm{8>Q#|JZyi zp&plxv6!)N*%*UPN#Z)8-?JcwrzKqjGs7B2?v;Ae4N=2d_v*U!Bj&x`(>0C8a^ROeq2NU1EO#J1ETY;*mt2jWzfDS z|A6Ss;s9&StiOEc?DQGD?Q_mLj*(rAI9?N80i+ez#rNNC2`m*>YZCmu$fAsaDZV$X z@VNQjtY60qho9a`xBZG=Js?(nRum2%l_Z2_y8hwF4CjqOf)Wmh&+tj+2ek~y&=j(* zqmuJD#H}l({MnC6Ec{PDi_OPW`B~jSc)dcpy?-ruQI|7qb*iy-X`6aLJa~G(%+mi{ zTN8Jn6F*MdtG)Mut?`K$X)**K!SvC8pq z64SIbmCIP;e+s@H)H?U`Oe}4rVf4|K9}_7vzYsj@>go*t%0>*BtwPPDi*OX15StHM zgWinh9ariW8CX-{ydX4=2NzBcHYnX?;ddbeQCu^Jh$K{A(4A7A^ihf4YQ2C~H@>`# zlY;g5mnj3(T@|XQtNprj?YsDfuJQ2ktR5GCP2pBjPBjBau4y!+Wz6=mC&AhiZYL02JOCl7T`O)zmOhKpskG?#0LP$yJ_kPCQngIy zC&CufRV$m$rH@UTOZMQy*Kd+ZqE;$jYIhBe+K-rinyNCt?f0c***`g3Srd7OyNI>{ z-epI>t2SPBEV`C&_<0Ba3iZX4+wODIqdG!p?cU60*=P_;n)-g&ex<)Q)n__TXKe$=RWUy+oiMii1%h z-IW0Z>v>B}A6c zu??fNR}T$*C6IHW#XHe<`+qQgBCP^`uifB*V6#y^tg~i>S<0DFqkH2ucxtQNHOm`E zw?n_5TRc+83J0K*KSRa7r2s&;jtL~o5dTz|2L_}iy_rq(22P4Yl&4$H{S4LEuh~z$ z|5*GSU-kVbpR#Yycrh<41GM%YH-7W^7`{SH>=B?gn&PwfyGeW2YP z#AbZSZbI|wZGe5@;21_0H|d2$*+owQ&ZHn(F>yJLmx~!J+T7Q~l(Jx6GwtAeXTVBd z7~^{Z8q;cR5k?kzGM&ubmh+uQYLZ4D^~xQJHrC69S^FTni&Ag8J_jo0{R7^5QI|)m z%09vt!MxdvyqV8Pbc`G$Fh_%S^To<7`X@3v*4qOeQE;q+)iR*n##p+w~*!AAHU!Z26Mer5`0M= z`H;bSO!TRS?AJn1wy3@n@ZYp7pg02n*xlztbH8b?xRJ=uk*gN?Z=1=Q7*C-^bW2mS z>U?!kp;Gn}+X28pu0DH!1^?29aC(b(a)f|CXl|C7{K@qrlZW(CscB(ju1b^0CArd* z-}-m!QU&!Ex^q>d^+)XM{r-HhHOM}GEqholT5`CckQxw?ln&|c7&-=q`R>tot@qt~uf4u^ z|6d*){P1Ard7kUJ@9VzeJkKj`;@jNZgvXZ$H=EM^z%rg*kml7gy|uUe`ijTO&NS41 zm1)%pFYZG&Cw@f4Zq>$&RL;Q}L2%uBpDkd7Dz<7{8P#;#TulZVsje(`ls9L4iig*K)zlKAqK!^A4h;Pb3F*c1W2c{wLhqkS@8)m@zjuVY^ z`;`o$w9y}>57%G8H5@-9(^={B=sm2Q2BK@G>L`HQkOd`RpOrIBRT#XWVDb^KiGxsH zY{ONxr$h###cazIUt!3VnHaX57pt3r1I3hdt~W3zz{y}MXL}|aEj#gkqLz8Tl3JKH zD%$7znQA9S)3B;9{nd5V_?z2ndcI^e(bgapY}Nwl=Drta){LvJF8p)yE{sh{=iF5j zD&FpKUb-Tj*78%R#^HH>kI?z&e$cp(R2F3-{3Ag;T~4R!jC&VDEdb$p?o?#T7P;IF zwZ^SsIs5Ddun5Z01NNBm@JE`62BEzb#&L;@HcG{G>y~}GNl7ME%deEEKy@tpZiz6` z(M6eeewi=PU8vj9P;bV9q(q!AmUdD1i>TC6v{ z%?J7XD_XoGdX-AyFvUD_Gd+*!+s1uo3n|}{zGRWZ(T8s0YG@P(!~aBa$o_%iuzx06 ziVfZ{FTs28wBIH)1SlMI@c5K44Uml=eQ^a74{`#PZ~1(JbkYTBU(%x9DuhAD&`^+0 z6_uvn`L3!MZkFbN=}qpC)5ka;21P!Az2m13j&13BBo&XZ8Fz^&5$D=*>!q8ZK83;S zOqzaRwS7L;&<%EtV9wK`yQ=A;(6R{2#T7>}21T+bnKpxm9F|vXg4NcX$GUbSSOC9q=B1TJC&s?t~SSJIo7AK>j zO5Z!Pub~k`L3D$*p#1WWqpa+nGtN0L5}%Y2(4%cpI*VyCTene|+CYsLW0UduLs`jWS z$@81g@M{KI^=`ob_dXYMZd+=?z|Ez^y&Y94rwS|RP&TB^f{Z`Y*Uturlj}CiU%Dk@ z$)Z>;Dky-g(aO{APYmAbdvdAq#0*Rv6AJ`6#8=R8??;2aB(d@8{n24=93Kga1E`Ky z(xPD+K)gAWhIc)~6>gdPxU%5xc9ZYZ)hQ1mNtEYRFw4hxp{@6Y9u>&)zbvq8W-jIkgpT={0ad?uAm8D9~6PQ4AAG_#>#of$lqVp((*?Gx87b_ zbBH=1{tP>wwh{m}9dYDKb{gLCAOOKx*qIY?6X)WDG*S{#)nNPM0r8%jPgW=0#ZA1C z2CCj^q_?p*P+o7P6fqwQW9nZ0__c~98aknQ&w@o2_UMB-QxAwmTurh+GiVVdCl;=T zo{HUrxLAez7!N>-r~TVwcb2R=qZ$Fi!|dX*Y*dhU0_$BE*Z(ytfHztj-a9 z_?#@#@5$2YiF0$XdNEEUuh31Z&IAAXISeN{ur9FnYOhnE`8Io)E2P*Y=jK&ms9WpF zC7C-P2AH%ROMl)6)i?NL4Oc+VkJthDrp;KgJNo4Vgw>7fCW@Jg*mvga$17r8V5l0$ z{Su3=y74O)p*;t#n<6x+Py`9!{k?j$-x4Sl7$-PhB2EOV968nLt zw*+@DG3s#^nRkSLOFDHBgAiWhHByvFJYJrVF>f0bK(^>dxsbk3?b|xLM=y68vFQ`ys3HPt zgzY7%K*{61ust71YR1q$H?kOZk#a-?(~$C7fV8`Ao?HQ1sq$v?^o!l`56-?Y43J7T z%nZ#j;lELW#5JT%xw*r1`agFHX=Mhx$v*7l;a=w<8qd{)?$kWQ<2MjQDCO(EH)T|X z;jon8P$H zGL@cme0cDf62Qp5?=8)kZj87B3Jmhuhz)0+4iE7z1{n5C3lMYg5fdMNg$zYU2Ga5}acvo1d#Raru;J5ctkQ%|SV06*OP4jHJ6Xm5-P2a72iHHy z2L?7eks}jSIf(dle`d~hG!a^qm*+wXbCdO6-@^QS<6oBb93bp6uv+U48z;7Yf| zaxQ&3IFstQ`y=pB&sS+aHq34vHGJL(TH^4aFBpV$pi3`Iz(?>_i^!juosvTkLb)R{ zw2OnqiwwuOzve0*YMFpO=U2{tuF|x4>!nqlny;;HHAZ}J(ols(mzHo+{OTP)`ETAZ z8tQyr(T}d+;vG{(Lp}Dar#!!jDCk5jNQQm6=;@iA;zTwlGVr*H0e9t0nT@VGslv+3 z!J+`i`VXN`2Zl|z+H*koOlg{E$(C|oYiN{)H^hB1g&J8!lr;m6S^>h3=$j|hoS%z^ zzH2E>yS;3UDX%`K56nlLu@wK;Yrj?&Xk{o9wKKS{;-Pym!3_|3=}X_(P0`0IyY#!j zM?@T#+x!MT7UPnM20R6Kz#9~cKEh{H{08hbpoNCjm$k%?A)$u!Q9u6m9mxKVJIDfg zwm=Q~5n@v*rp!))YO(~_Yz_ORh_w*NmYDJnKELbh7RkO3IjsdphFU!f=h_XD(1*56 z(LYT$u{X%kRi%^V&0_4uiC9@70MN5cu&)(6I;6rMa(XLTa+^0=t~wSBqHkOI<3DfP z5L#oG+`kv^h+IG4bmTvfb*lvYhYAdlycJWcr8)_fTpgVoHY=;*@V zg9w1!PS-wIOymu~B8wzJo`>m-Lr4ce3c{i42;XSgH8w#V=KVVX5&jJ7=m%{2mk+3k zIK*BiptuDxZF{=p18lB<0VVT1>1AsoCj5IzK14NXoOYjVX5lf4<+|%Kz9&+4@iE{P zRxQ5z=PjQ*^pk#MQ*OEi7S@xO1Eim*RGHp?#)>cI^1PNrNRSI4L=I*Kx{Dq0gte75 zt$*TULadfw%E*HY8|wcp!_D58mJVx2-jhsp6V8kMfiv}ZavqJ|@rChN^~^lo?M~tg zLWldrYH>FwbYm_8ULMMu)_bPh$B9)~l_W#^1A_3PG;4)|UCi=F5sGDbKwehL* z3fLDDv88+lRfXJ0;vZhl>S z*L#&6FbxJz93C)6bBY8X?^w-ylSM!TJ5~NC!AwhquS`xdOGb30@`+^U)g8rH> zpeQtbSvhr0FR3S6N6;_vr46&WlNm_jWQAb5JD4%lW8;Gb<;ICeZjoft0n3JcNm44#eX!sX6*;$XqGg|FmVUam zYz?~CvRLfWSnQFQ=*<`S_HQl5k2uOcfZ##=>mxz`hmU}EbeBMyTC4?^Sh5(}p~Z=) z#4d}2XI`;L7$^U0bcenvvS(Q0gZ744f6Jkof|KnRt{N187+C)=A5n$fZ1dqWh|OAk zx!#9%i}UOT3H!kLCdWO0p@fe0c*LGI_VmO}y~iMolujcAxs)y8-`i&Z_YZ7U|7&Xo z{a-%9i7dN@U2AR-n=$xV4ti-`+{B~~t@l<~!cl*zx`p@zA@3Z^gD+J9r{ZCBw z?{k(vV=8#69DD6)JD*RE;$&I>6v|`{Qg#j0uWmA1ID`LiK7SMdvCKL+!{xcbfk}*R z`1o+HQANwD_?PD1T(%}xR+sx7lJkN1+WT^-i$+9ua?+h8<)AL=FQy&sNDR9VOQ0e* zUHrB3Gxdj8ST_K`5$md}V$pzb(-1>zfQvKW1Kv|DgYHq*$fUQwf_4z{ z?F*1i*_;q?zOa1zw)G!afL&Pc(F}XnxB*m0g8uknVT}nul8}A71HPZR=bMPTXzNF-T_aRQC zuNdCL-Xk96&qZGkGyS6X^47!cIm9Af8PQ)lYR)(;1?% ztMT?pu6Y#QN57JI0K}U?ak;)rh}PL#IT3x=x+U& z?CBV{y{uhGPyOnO8}ig0T*lcgDdk_+-PqH}HGK;(5;FB0b(K}21aTjNXA|xbQ|+<^ z+Hiwe^d<`7Hf`jLwb(Z(VzEv@S}O8Mu4?WER{R6Rsk)(#Or^XQsIoRKCD(l4tRzdBE;1iwzX4wpr_R`0w zhnxU{o#%Io2)ViPN({47bJ|zL`N{3qcN>FzxAxNHWpb*|;TM-valS*7e8%#fLED#L zseP?9o)qieks`B@vXm4lhW;_yK1UJ3Qa601!P>+Ru1Ae#Q6W+tne|K?eLwY9`p5XZ zc75L)$>Txod0Rw^dy2BaR$uQpWyUA`sm8+BC5Fy)N|M6Fk3yopK7(0OeW~~=DthD; z^RjMk`qHFvOi+;^*hOv7xM+DiQXE|1whEs*RyBZAxNgtM{3S(7KKxAqe*NX4zBo5! z%J}iu^24CPpJi|#rvq)0jSlDUrW~{8YZ^%LT$dB`=Beab0 z$t%XTd{W{6`1mQHdjG7lWdbwSY@Vfj{D?B^O_{k^8$MaV%$m`}x+afx7oCD)h)Syg zMm5fKwuM3?!14wPyU$2;W^;VGM2N$SxfN=%B#0*EQa%ZpyHR=;XWYpozQ9z0U)vgR zPX{pwWxOW#l0vV@Xt2HEWL_JMWMq*Od*do~`x*)n|* zwl)~Y!e@uwh`fQ{{%7W*A33M>F>}-SN0^)O}+C|xOB0J*6MgAOPN(~aDeZHFd78I-?W2oqkO2hiBniE0iod$0J1lxfpFPRIoxVAp9T z+7IXbKvT5Je{r=XLjSasnM+SMz5Lgh{L)HdNAtDPq&b<9}3+y`%rb{hr1EUYe)n7W+XA( zQ~U6I`U7u9O+*;Vc|nmTsCqN6O_aNM9`|LY2iLvukM~4WNqqaFT0-8J z(#b#pKs#1%_ZXo}1C+-ndRy_o%a}V)a9ka4Vdkj7s(bwnvtU-eDrgeigGp)ynPQVt zy1NBzN|Ar-yd7+(R}JTUeE7M18glI2PxsQeU1x2-9zCe9BPfS z`uYSw{|X3uho>8qvjjyjMZ7*_{MO_Pe}Y6zxveBa#~aOhRAXv06vOt$<1WnyW0h_< z`x85k8VBtDSMYZwKl|4fITSAvAw|lGynG>UYApkL1Wb#V)7hS&J5>H)%SPw8@0A_( zLSiG_WaHd!jD>LZM%uK_3G5l&m4GYAy;Y`U6{UlOmo)iRH}%3(wi8qcOSWKyBI<}= zOlh`6%LDgFT|}z^kesGBMI1Ed8kGAfn)f6PpX~PvC0rv`0SarBjYt`)irf1%k| zR;bpaK@D%y(ISZB`H8_+0jkNj7fe7IsX5$(I=Y}r+Yr`P=gv!=MPbwsQ}N@;M_{g` zK$*Ea7MfOI{DiscB|lsPsh0-d_ZjHbAf<=|{SC;7dH1NonsBe?(UtkSfJRK(=WVm-zjx`9fkDxu&&_VzKE4X3Sda4kfKOCYo49BFfPI2^iuPA-$PeYc zLUsiY@p9+Iy1YY49E*o|ok_}$m2otv!5YEA;Jt$?x58HkiXW~o$~e521BH*!(8E*K%ee-oa4|%73S7wGF>0G+l-Ih4_LFFhr`DSLm~L5glnkf zM3jstAs8~fPew8C*OfI}RF~3k$3iERo#Fb*}#BLX- z9X0zsvR7a!eaGk%d|wjp=le(K=lrk;J3}MTCn}N)R~b#F1ERMyhepDzUh7(f7mZn1 z-Y6+kax+jkJb)#i=l=n2{XU~n(X-k8@(^ar)VF##6C*_HM*W`8p=W$dYXIJ|(p)&e zzdb{H{52$JJ;&}35*zu|tAYHDKYe>aV{O4|q8hGL`J%yYaxir|{-X93WFTc51t^+* z1!J1p`MSCu?K6e@@6L`wXl&nQD$!)ZYwfym70j;wEAn7auyJbhm^CVb6Dyd5! z;e`T$z`YGNpDB@dAevEmK0Qec@C;+q;|87)k&1bFH=Qu$uKsXpvrOq3dv1Bd;ZI^) zv%(LG*f*42Wvm)K-Qg-g*Y%xrKaEP~@ZyFB=0YqNp@=G?rR9Uez|R$#hXXq7?Ccqu z>Gh~uqv;-OIb{UI+gKi;&IgTkCqDo!?vF>R%|j2)ligmKHv73(1QKW4%@g76X1)EW zX2jqy@#EK9OUB0;WyO>L0p9CpxTB4L@YLC5QQ)0N)Yi?alYdS z6)VS#L)K3plT#N|q%E=XYXjnR=GKY<%~3(Y=EVJYv5l5zC9-TTbp}kKlm0#){aTax zBEry;W0q=7kA=}azXn8=^U6nCz+u^A2>>?ypTIl>cCeIbNbxG8?r>J_gCVLRoM=Hl z1zrqXN^NR3EL|#Ew6sBPuoDjV-McAUb^yQ0pHpIU^XAW3r(r#69S)dj*N?BS8Q765 z4qT%r;4-s2|I1}kLqIa~Q?cd(+^zrr@Zo%V9T!3+D z0SGet`#)=TnL!+afp^(+=TvS|cv8Bas2P#jqRuK!yv* z`RY>fn_z&srE*4-c)+FHhN+>pr(hXiJJq~?W&2j|3f+3=FYB??{53E=M@o&f0Aytw zPA(3g8am&Kr3`{IjSVEQ)8RPQ^8d0*o-?X68%ob6P)@n@G zEhHi{n@Mk>^Sn4sY)=%XMWfAz^Q&k5N%718@lCVw+&K494(p3iC^?#eI-F-wnJa9Y z5Ij<-#A-jC1ST`He#^RekTCQO&|yK$qh*`=r@la(3$gV5`?M7{26|@(y(v*UtC=zV z*$=$7dU^DqIbNU9>H^xI2e9j0kbZ#7{j8eye&P{Do*uu|1=;@AzVti0yPj^7IRcrU z29d>hy8#r(EMPhbNdK|8)^3Af{5LBKQ(HyN+yLGEG8M(l2QRk1#5AF=8nWmAnQbI5 zO4`W!jd?HS>(9G4kEQsbqoRSA>I_fa0eKCn1%Pkp_zetGNi$aEH?IS%yFPH7E_Osc zs{16x{o)oY8@#QHzxw-M!YumBjt1U?*>bnXn1r&ga%F20Rb6itq;d(DYAu`q(}kPMI+phChEffk{uU-#fVX>OJg;HhU8ffH{Ee4obmOy z1xKa3$^K89^`)$TRa4k-3c%KDM}*TYEbyO(uf{z1q-JWtPvw8l#yRRZN)1nll}C#7 zdqiq+vez%+BA|c;l9KePdoYk8O*;3M15tE^+jd3^Zx2YX3^eC-odFr|faim;FXk^= zX(x7v9u?06AYwG9mZlt}(enlxpa^K7rxU8)KKyW{m-}tFeUCnL z!f0$>OJp)8@iJMh^+XX~H@Z&Wo{VnuF$WRqpQ=I0p5D?E%bK|+g&%4%#de^gL1bdR z*Lb#TYS-4;>;DH|ZMMnx)NQBP?&RW3wB~HvszQo*O+#7#bFzVk#2`HXAjA+GXl;D{ zAp}T=m*{U31EIh=-v*_KYP2o}Ij!)+$7}Xeug%q4lgp$4EXffPPOA#zkFFsl)JJ1b z$dy%c-oa@QZ}Xal&0{@EK5>-YM3?ePl6+!di;&uvxCUi!49?g$Uy9LKk%E!KK0r$2 zi72TYeE`5~JheoSgjP#70AhWJ$3g1}h-{fzS*!0V7O?Pbd7o`H zl2z0FsVm^&j+$=3-dZo^*r!ZCD%ns%!euVH!|K_FK z#0q>qB+rH5$Ud0Ks3NsYN=8b9!#4J8F$OcbphS3BS3L zissfB6<%$KtC#B`@?BkKt9xVIJnwF;m~bJ!Q>nzV3268u+lk&FvIF!z0TZZu?Gd8E zapCo|K@ck2H=Z)l-}PjGU5IIT)R;7Se(C?zF`H;I7N?8oO%jcGm^D5hK%4N~868Xg zNFb5%Ja@bOLYqNA05!b5Pg_kDHt4XutXV;EF}Kx`+o=CKVz+tR&-dH<#mnNn)|VeXah--1CJ_53ApaK*8`25 z+Olt;8dXtigWfDGbBL>`N>6Zt}o+u>dVrPu; z+1~78hqG{zAUZ|e3c)PRf(p!8+h5ssP3-mmT}|c9lG46}wS(PzkeK}gRI^@#WwjCC zd3Pardv3{CdF--+?UY-#RgBGHDIfNg z{pV|r28A`Bbd4TQ6wkbT^9D%yq+awveZ7HzrGUE=Qz&qMU2)#|62oULe5Hu%&xY5@x2*6lUz3ARKiUC&WXsT_SDuexS167^v~itSW*&wVx`r8ik^$d0-*WvW^TK2Pnd6rLmt zV#5Ij=Y!R?tE3e5l_rkg(ad7&lu7>U8Y}zEI!8J>TgB1_C#V^Lp@;7UP5aA)CHDiE zdJoQI-%y1|FmmOmd^#r?yopSD5?T`A@H?z%qtffZp_= zyrIzuk4ZU)N&YKPNMK2FYBO0bF6v2N z?zuUzfO1osL|C1)?>!(&>hSL|jUFg;nQ@;yCe51BdpH?^ zbCVh1?l8k@eXvo&l1tRWXJ(&G@oTJ8UW6YT%#bcXmOBKUW6a^|Y_Tn{gmw&2x);X@hKR;Rl zQZCN6Q#VozUK_3nzjlYM=M{g?eUV#yUIQVc$i*MUKF)BU;i&K=(wxDqBuU$Bz#5Iz zEnk#sNhRL}jfhyC^aDNLyTN(iXpkFUqdOoEh7aAz-aJQCo9lU1dij&PUo?h7jC4wG zUEzv~L}zN2mhRfP-t$%iV`7-(>c8^nARZ||bcA0NPpyx?Vtp}O zd-^TTjoB11gDcd|_>E3h@1YJY(F%l|sxxQM-Q zTv&dxzsxikb1{VP`0m&xL~Uo#?w-qeXr7J-tAHa3n4XZ0Ty+^hb@IT?w#FI82fwsl zXWWjt=PI|AJOz{;M@(3%UTxIn>TW@|R-W_2h0lrjO{SUJ6~x4QglSTFIe`(b!NQ2{bNO)Bj>m*!I`gQM?~nIw-8VILMm zC~l!Ew;1$+p!)-Ulx3vbC8>Hba8-GzgAUy$qY@FPDHj!S$201PR=GN$JVher3Z+Nu zaJs!a+bK3yQ;fQPhn&1$zRnwRZUyv-$gK8-UPsRSp_I_a4l(NCPhntY9C6+JNwEcp z(*eG@8~EjfqV;@Q&@|er?Vn&q$Dd#&8WrWBzjE9@?g&>x!$Z* z{`vMAt;TAxVKpldpV{zuRJIwvywA+NUTLJ;E|mp|buduM1{<@NN}yZ??cp7$e20Y2 zG-F1dTS!%^wlk_iO}R>?Ij<0&EXA=sem*}9 zb$r(-a!=|HE!Z;;KoXYyio0ci+2A3pA)MczDfM+{&iUd_#0l~4GRCf^jAJp(jZ%7~ zU~W9W<@){A&s+AZ)Fy!GgiLgF5)$s&l(PluZa+L3&A+Kl>awLIq$1!3o`k|cgP8O2 zZJ6G-=S)QFudK#glvnnf3eF!PEQsR|MG`<=p?|kb^w+ao^VN}%wIm$V&=L^kXl3B( z*_;{wL=L!3oQ@-?@jKN=@?Hw3D|%BQv{4Qh=Nwc#%f@u_UZ9QPI4_R{RJTrw4e`F! zHMQ2&3MHr=^V9<#h&a1QwE|8?V#G(>+`@FTy_jeoAfM6*S}Jc2fbKC*h0Q5JIKw^v zmDJP%WFn9+@*53Lx{mxzqNMPkJ!bfoiuj~Pz2Y4{yRqDDlPBk>d<8CKqvpIMyD6q` z&3l603#--P@8y%{u2z^D>N4n9g?-i=)W2G4-NZXW2BMHd!xm9c)T7-NqG+g;^is4g zWssd@K^JxJFp!gLl#&GZ5i;Q*281vYO^iw@DfizWeZO4#k8`%60gR(`TJ6syd|dfj z{>H04f{1+x3TT($G6S+v& z((gdvd3YMjsjl*Y^YPribh~#|q*|e*0nNTI|Av;N=O2+ODBcxsaxV8sIEXg1K z(bpw2Md#O8J9BZK#JY1-Xo+84D5$PNb#(e0k#sdiM9V!4exRt%t-Tsbf2T9kekS$= zv{jx>+{MEu<@;vrO5Lb}Xx+Hix^cTL0-kP+9L(1P%%+W?qpj2?RHf^7{~eR(bl#1V zw}TXw{odt;-s`Bj`&QHTF|X=p!>}%S*4(^}5}YmOFg0?POAj<(SyO%}kTik}4D;w-S4oq{imkwxJ@9a= z$ZC;n zRC=D-IaVxueDcsnm#yqeo$)D9?9x*FxlJSEX$sbPXPfEztGl}_J9~RKU(#|H+Lkks zkZfWqz{bkI5-B&m#8S!JI?8Rj(ZKeq?mmBO5_)B#v-1X=n_t?ir?p;83X#y9(jR{u zj*|Wo1Z5fW@=SQzlSD!%c5tV_gm-d`iQu@Y2|Z~Ho5lXV7vg2Cur-|mENFVBV9cSy zb^<3n{Bppl{4HrmIi;5`iGY4EU+v3X#f_1$ofguSxmA6I?<8bO7p*Z#vWp?K#0%dK zV{YJNWFAWZlm53m?T~SqkIAqt=A61es3vd&?VZ7oK0jtKl(;(#n3MP*SSM0Co=|^) zkxF8k-xCgjx@R1Grq#1grO+D3z_=8mA}^zT{RY$fw$t)**K@AE?9~dbgX$NoW1HFr{sq^SaTr^TZ;!_^TDLC` z+%yjzb)6>RuX`q4tah;#^z)-pli~AQzw_)C9h=gT^s`k?6-1M7RC_PKGz@CYxVLt^ zpUCZ|n{kPJW^R{OZ_r-+j^HZCmngXt)3v*)Mz0}9eOXOy<`^50Y--u=3E z+Y)Xb_<$wm2%0L$rxSNQ#_yEiQz0#TiFlZO%gDisRnzpvZpeK3#Db?|@{e_(*@*MO z&xS3X(_hCle#_WcUuAygL&*6aLo#(`b~Ectuwq-0HY<7ow<^GzMBMjO-vrvXtqopv z*ch>=6LlgV7~oU;I!|!%Jxr*-y11AM8F*QtX>?Q#0VftnP4cX9xNh0Y7#YAde8N(r8t=#_V`kIf z2`;;&^d-6O?HL4oue(;C!m()z#o&{P4=h3>##&KToere~Qd2gU) zO@DK*@Y%~vRSs57@+0OOH)u~!#cIBY(&A0<5Pqp~;lFo}c6<8{lM`Xz+5wr;$;POw ze?!BRoIe7vYP_tS3k7ZVn*ac-k+Xpw+vc^$KhOqt$xLQN;e_HNUr$>eVJCGC;l z@bfe--}!l1wtDrDtArDe+56%(X`T!xu&<~EE>)>uRp8UO;r;p|nvb-)GFTe2rYm%T zG4kq)VXKh7 zk5Dt=Cud&1Oe?EGv(oZVSJ^+mA5%xGNV@f+dSvbG!-GT_Lw|+FD-BPyDBR4jsE`_= z+Yx^BKe5ppkU0Ohi$nkR?UZGH9xdnGswkzP6C;xsd}~yEN-(vslb7ycO_Aq7p!6CZ z?mQDEp*U#JHQ@zkWJ=t6$_SR@Z9dCx{E**(67=?#wqRlYCP{_V#_hzQCK%j!)dd`? zkpQ$Z*o#qcKGom(u+y)_X`#CZcfBDhXOchn#TQirN3*wE4 zPb^}$%je(gKOb$phh2YXq5i|Gvtr$GIhPQePWdKn4`z8VuLmXbVx4{x2Jxw>@&72m zMxhj5@HRqYU$FAD+kB**QS(4bNP__X_hR_vU}HHV#mDce26_1hTj2Tgb=QZvQk z)h=0H-iRQbesT*|rY<9Yt>jsk3kPz!sf@+~N>QSSl|7D~x!T(u%6|=H#VuB;KYU=Q zL`@y)RA?=BSFCMIp&^WgISqf>8^$CqZp2bFlPKv$VrF*Kg#y7Q7CAX(f5mgJB+-4I z?-7u2ue0z>g%DZ+dlL-AF`RpXPRNqW#H*4Qj})o^oi1H4{PyjW>0q{jh!=uYG2AF< zXJ?mybS6AsuYBillxOJ`bTGRRw8dvynwu{p5wDtqsVN=c)9{aXta+>Krb5OmQ^9mf zFEYBHlsfs6fejuJY$75QphPJnCJ0DQ=B1#7`KChdxyu5T zx%6kFMO@kUuLMO$Q+^b(!VYJOkWIWB2D-I4Tqmtv*zfP!oTxTxi?};6gfmKDGo>Cc zss%JPnE*mM(e(%P82rT;L-~5dgE^$W&1q)Men0jsd{!skQLdCY%st}__A7nb0!Fp5 zEfT-Y)0t`79BUCU4`Lb^C^CglKi$ic|E$91A>9w*cDMX<3t>{YUBjeyyL?pTy0fqA zQ9rA$pG{Ho5%czy*a)oTD6GZA$DyanO$mhH;X@^nX0+lrR)pEvJ;Owu&v)V8XI*or zh;XHa?~EUBrNho|;v9Wa;d!6jAO{3#mlhV}W|x+@6snc$T(aiFgtO-cvgn42;nz_k zWm!_M4|bYAaKPi)5%9?|!W%jGEO(DKMcQiCfhew6WlXLmWx*YTj_(NmYtQ7>Ff z9W9=DcW`ji=I1T!Ua1*kChu!O?|=mM>sLyH*=lm&$?V6eZqfevf+L%r66BI>*vTIs zHI!K95zE9YGe6rU*_4d^bPfhJnh{#E`R4dFVSDtY-x#_7`7rk~alfS9q@eq`EiFKsXLDH`(ig0_(0XhhG1`>coq)RWSpbL@|vt^bc&M@d(GtMpQ$-yVS2F2 zF(AQw-7;JyW5!gO%kv$eC<_9?LEK%|v}cI5oGw6O78$Ry#ZpoV{93#|oX56{kmy4+6p*~d@d^4Oa3rjtu6(4g zj&cPfwG@}L50-{;Wu%Ye$QS3PLKBsT=7AebJqJv5DbJF+=+Cs)%-1EUuok8V^1*R+ zOA)GHXG~7UiG6uJca4pc`*V8lPu9nC8e_^++`#+R4|S}c?zJa+eRR+~^x1#Om4gA! z&y9%sa5>WzZfb7+t!1jg@`J1=TsoLyA&7^^|jkTGAj%lLmTX!wID~ z68%5HQy8vySW5ii>$iNG`9LW_MAe!Fvs5ed9UAS`^`zDgwYp=t%!SFsyr=l zm02!U%>)U~?LmH*g#e8Pve7zM{?>u4e*H#qntLwYagqpHeoJOOKKwK#_NiUREqz&skm{t_Okn#BtCIM@N<#hq;88 zv^8F5+j5df1qE;25nF?qzVzabD4e4s7r}N2h=oqE($k~^c?~Ty)fs5Hc1Rh-IJ^+T zc9R^~XJ2YWo*b^@xFeW(crY(tKH}(2`woHx6c=KyB>&H+HQmA)qTSe(Kg-@KwdD&y zWnSM%CM{mjAJLjev16kk@@UVS6uZ_=En6J?I$ zS^CKCK5%1-s;VM1{1*=u;j}8(;Iy*$xL+h(T8~Te#F2a39mS6=PhnWD1`2&KAUpp~29wL6ghP5UxB{z=ODXz`nb1=b??xsaDWbhsCK`3F6# z9b+bfI){zs1a-Dn?{tcoIb8+kDYI#j1ep>l;8w9gd2RjW-jAW)v^TKd{NztKc#X{m zX}l~fyb{<+w@-k{@3ImS=P~JN1WUnIYU2)&xd?S{6}Z^kG;)_sf95s6=(n0#(*uA1 zISHf_-WDfWj-h|rShELbF&y@&GOk3RhQ{k$VRhHJDG1k~#`&1Q^`z_ciMcbiy3bMC zPpas05Rnw`q+;Crop}8E-FqGL5O>tn=toLOQ;I8@cmWN)_SZ0PM-si&?5ii|JXUTs zKe9+s<);PC+F6uo5*V5?z!>v${`$DEM*oOm)=13Z;1XFmKk-a*q3`R;>e^l%D~1rD`doX!t9!wuJ@I*ez@%p=MZS#0jurO*4xp!B^^ wfA!FjC=%5j`W~AFlb9?`q6Ficp!Tl!tBeptf}As1D3~ByZ`_I literal 0 HcmV?d00001 diff --git a/spark-operator-docs/spark_application.md b/spark-operator-docs/spark_application.md new file mode 100644 index 00000000..689c5bd6 --- /dev/null +++ b/spark-operator-docs/spark_application.md @@ -0,0 +1,212 @@ +## Spark Application API + +The core user facing API of the Spark Kubernetes Operator is the SparkApplication Custom +Resources Definition (CRD). Spark Application CustomResource extends standard k8s API, +defines Spark Application spec and tracks status. + +Once the Spark Operator is installed and running in your Kubernetes environment, it will +continuously watch SparkApplication(s) submitted, via k8s API client or kubectl by the user, +orchestrate secondary resources (pods, configmaps .etc). + +Please check out the [quickstart](getting_started.md) as well for installing operator. + +## SparkApplication + +SparkApplication can be defined in YAML format and with bare minimal required fields in +order to start: + +``` +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: spark-pi + namespace: spark-test +spec: + mainClass: "org.apache.spark.examples.SparkPi" + jars: "local:///opt/spark/examples/jars/spark-examples.jar" + sparkConf: + spark.executor.instances: "5" + spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" + spark.kubernetes.namespace: "spark-test" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + runtimeVersions: + scalaVersion: v2_12 + sparkVersion: v3_4_1 + +``` + + +After submitted, Operator will add status information to your application based on the +observed state: + +``` +kubectl get sparkapp spark-pi -o yaml +``` + +### Write and build your SparkApplication + +It's straightforward to convert your spark-submit application to `SparkApplication` yaml. +Operators constructs driver spec in the similar approach. To submit Java / scala application, +use `.spec.jars` and `.spec.mainClass`. Similarly, set `pyFiles` or `sparkRFiles` for Python / +SparkR applications. + +While building images to use by driver and executor, it's recommended to use official +[Spark Docker](https://github.com/apache/spark-docker) as base images. Check the pod template +support (`.spec.driverSpec.podTemplateSpec` and `.spec.executorSpec.podTemplateSpec`) as well for +setting custom Spark home and work dir. + +### Pod Template Support + +It is possible to configure pod template for driver & executor pods for configure spec that are +not configurable from +SparkConf. + +Spark Operator supports defining pod template for driver and executor pods in two ways: + +1. Set `PodTemplateSpec` in `SparkApplication` +2. Config `spark.kubernetes.[driver/executor].podTemplateFile` + +See [this example](../spark-operator/src/main/resources/streaming.yaml) for configure pod +template in SparkApplication. + +If pod template spec is set in application spec (option 1), it would take higher precedence +than option 2. Also `spark.kubernetes.[driver/executor].podTemplateFile` would be unset to +avoid multiple override. + +When pod template is set as remote file in conf properties (option 2), please ensure Spark +Operator has necessary permission to access the remote file location, e.g. deploy operator +with proper workload identity with target S3 / Cloud Storage bucket access. Similar permission +requirements are also needed driver pod: operator needs template file access to create driver, +and driver needs the same for creating executors. + +Please be advised that Spark still overrides necessary pod configuration in both options. For +more details, +refer [Spark doc](https://spark.apache.org/docs/latest/running-on-kubernetes.html#pod-template). + +## Understanding Failure Types + +In addition to the general `FAILURE` state (that driver pod fails or driver container exits +with non-zero code), Spark Operator introduces a few different failure state for ease of +app status monitoring at high level, and for ease of setting up different handlers if users +are creating / managing SparkApplications with external microservices or workflow engines. + + +Spark Operator recognizes "infrastructure failure" in the best effort way. It is possible to +configure different restart policy on general failure(s) vs. on potential infrastructure +failure(s). For example, you may configure the app to restart only upon infrastructure +failures. If Spark application fails as a result of + +``` +DRIVER_LAUNCH_TIMED_OUT +EXECUTORS_LAUNCH_TIMED_OUT +SCHEDULING_FAILURE +``` + +It is more likely that the app failed as a result of infrastructure reason(s), including +scenarios like driver or executors cannot be scheduled or cannot initialize in configured +time window for scheduler reasons, as a result of insufficient capacity, cannot get IP +allocated, cannot pull images, or k8s API server issue at scheduling .etc. + +Please be advised that this is a best-effort failure identification. You may still need to +debug actual failure from the driver pods. Spark Operator would stage the last observed +driver pod status with the stopping state for audit purposes. + +## Configure the Tolerations for SparkApplication + +### Restart + +Spark Operator enables configure app restart behavior for different failure types. Here's a +sample restart config snippet: + +``` yaml +restartConfig: + # accptable values are 'NEVER', 'ALWAYS', ON_FAILURE and 'ON_INFRASTRUCTURE_FAILURE' + restartPolicy: NEVER + # operator would retry the application if configured. All resources from current attepmt + # would be deleted before starting next attempt + maxRestartAttempts: 3 + # backoff time (in millis) that operator would wait before next attempt + restartBackoffMillis: 30000 +``` + +### Timeouts + +Example for configure timeouts: + +```yaml +applicationTimeoutConfig: + # timeouts set to 5min + + # time to wait for driver reaches running state after requested driver + driverStartTimeoutMillis: 300000 + + # time to wait for driver reaches ready state + sparkSessionStartTimeoutMillis: 300000 + + # time to wait for driver to acquire minimal number of running executors + executorStartTimeoutMillis: 300000 + + # time to wait for force delete resources at the end of attempt + forceTerminationGracePeriodMillis: 300000 +``` + +### Instance Config + +Instance Config helps operator to decide whether an application is running healthy. When +the underlying cluster has batch scheduler enabled, you may configure the apps to be +started if and only if there are sufficient resources. If, however, the cluster does not +have a batch scheduler, operator may help avoid app hanging with `InstanceConfig` that +describes the bare minimal tolerable scenario. + +For example, with below spec: + +```yaml +applicationTolerations: + instanceConfig: + minExecutors: 3 + initExecutors: 5 + maxExecutors: 10 +sparkConf: + spark.executor.instances: "10" +``` + +Spark would try to bring up 10 executors as defined in SparkConf. In addition, from +operator perspective, + +* If Spark app acquires less than 5 executors in given tine window (.spec. + applicationTolerations.applicationTimeoutConfig.executorStartTimeoutMillis) after + submitted, it would be shut down proactively in order to avoid resource deadlock. +* Spark app would be marked as 'RUNNING_WITH_PARTIAL_CAPACITY' if it loses executors after + successfully start up. +* Spark app would be marked as 'RUNNING_HEALTHY' if it has at least min executors after + successfully started up. + +### Delete Resources On Termination + +Operator by default would delete all created resources at the end of an attempt. It would +try to record the last observed driver status in `status` field of the application for +troubleshooting purpose. + +On the other hand, when developing an application, it's possible to configure + +```yaml +applicationTolerations: + deleteOnTermination: false +``` + +So operator would not attempt to delete resources after app terminates. Note that this +applies only to operator-created resources (driver .etc). You may also want to tune +`spark.kubernetes.executor.deleteOnTermination` to control the behavior of driver-created +resources. + +## Supported Spark Versions + +Spark Version is a required field for SparkApplication. At current phase, operator uses +single submission-worker mode to support all listed versions. + +```yaml +runtimeVersions: + # Supported values are: + # v3_5_1, v3_5_0, v3_4_1, v3_4_0, v3_3_3, v3_3_1, v3_3_0, v3_2_0 + sparkVersion: v3_4_0 +``` diff --git a/spark-operator-tests/.gitignore b/spark-operator-tests/.gitignore new file mode 100644 index 00000000..b63da455 --- /dev/null +++ b/spark-operator-tests/.gitignore @@ -0,0 +1,42 @@ +.gradle +build/ +!gradle/wrapper/gradle-wrapper.jar +!**/src/main/**/build/ +!**/src/test/**/build/ + +### IntelliJ IDEA ### +.idea/modules.xml +.idea/jarRepositories.xml +.idea/compiler.xml +.idea/libraries/ +*.iws +*.iml +*.ipr +out/ +!**/src/main/**/out/ +!**/src/test/**/out/ + +### Eclipse ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache +bin/ +!**/src/main/**/bin/ +!**/src/test/**/bin/ + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ + +### VS Code ### +.vscode/ + +### Mac OS ### +.DS_Store \ No newline at end of file diff --git a/spark-operator-tests/build.gradle b/spark-operator-tests/build.gradle new file mode 100644 index 00000000..151bfd4d --- /dev/null +++ b/spark-operator-tests/build.gradle @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +dependencies { + testImplementation project(":spark-operator-api") + + testImplementation("io.fabric8:kubernetes-client:$fabric8Version") + testImplementation("org.apache.logging.log4j:log4j-slf4j-impl:$log4jVersion") + testImplementation("org.apache.logging.log4j:log4j-core:$log4jVersion") + testImplementation platform("org.junit:junit-bom:$junitVersion") + testImplementation 'org.junit.jupiter:junit-jupiter' +} + +test { + useJUnitPlatform() +} diff --git a/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java b/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java new file mode 100644 index 00000000..f19151c2 --- /dev/null +++ b/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientBuilder; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +class AppSubmitToSucceedTest { + private static final Logger logger = LoggerFactory.getLogger(AppSubmitToSucceedTest.class); + + /** + * Create Spark app(s) & wait them for complete. + * This sample would check apps periodically, force delete them after timeout if they have + * not completed. + * Exit 0 iff all given app(s) isTerminated successfully. + * E.g. when test cluster is up and kube config is configured, this can be invoked as + * java -cp /path/to/test.jar -Dspark.operator.test.app.yaml.files.dir=/path/to/e2e-tests/ + * org.apache.spark.kubernetes.operator.AppSubmitToSucceedTest + * + * @param args directory path(s) to load SparkApp yaml file(s) from + */ + public static void main(String[] args) throws InterruptedException { + KubernetesClient client = new KubernetesClientBuilder().build(); + + Duration observeInterval = Duration.ofMinutes( + Long.parseLong( + System.getProperty("spark.operator.test.observe.interval.min", "1"))); + Duration appExecTimeout = Duration.ofMinutes( + Long.parseLong( + System.getProperty("spark.operator.test.app.timeout.min", "10"))); + Duration testTimeout = Duration.ofMinutes( + Long.parseLong( + System.getProperty("spark.operator.test.timeout.min", "30"))); + Integer execParallelism = Integer.parseInt( + System.getProperty("spark.operator.test.exec.parallelism", "2")); + String testAppYamlFilesDir = System.getProperty("spark.operator.test.app.yaml.files.dir", + "e2e-tests/spark-apps/"); + String testAppNamespace = System.getProperty("spark.operator.test.app.namespace", + "spark-test"); + + Set testApps = + loadSparkAppsFromFile(client, new File(testAppYamlFilesDir)); + ConcurrentMap failedApps = new ConcurrentHashMap<>(); + + ExecutorService execPool = Executors.newFixedThreadPool(execParallelism); + List> todos = new ArrayList<>(testApps.size()); + + for (SparkApplication app : testApps) { + todos.add(() -> { + try { + Instant timeoutTime = Instant.now().plus(appExecTimeout); + SparkApplication updatedApp = + client.resource(app).inNamespace(testAppNamespace).create(); + if (logger.isInfoEnabled()) { + logger.info("Submitting app {}", updatedApp.getMetadata().getName()); + } + while (Instant.now().isBefore(timeoutTime)) { + Thread.sleep(observeInterval.toMillis()); + updatedApp = client.resource(app).inNamespace(testAppNamespace).get(); + if (appCompleted(updatedApp)) { + boolean succeeded = updatedApp.getStatus().getStateTransitionHistory() + .entrySet() + .stream() + .anyMatch(e -> ApplicationStateSummary.SUCCEEDED.equals( + e.getValue().getCurrentStateSummary())); + if (succeeded) { + if (logger.isInfoEnabled()) { + logger.info("App succeeded: {}", + updatedApp.getMetadata().getName()); + } + } else { + if (logger.isErrorEnabled()) { + logger.error("App failed: {}", + updatedApp.getMetadata().getName()); + } + failedApps.put(updatedApp.getMetadata().getName(), + updatedApp.getStatus().toString()); + } + return null; + } else { + if (logger.isInfoEnabled()) { + logger.info("Application {} not completed...", + app.getMetadata().getName()); + } + } + } + if (logger.isInfoEnabled()) { + logger.info("App {} timed out.", app.getMetadata().getName()); + } + failedApps.put(updatedApp.getMetadata().getName(), + "timed out: " + updatedApp.getStatus().toString()); + return null; + } catch (Exception e) { + failedApps.put(app.getMetadata().getName(), "failed: " + e.getMessage()); + return null; + } + }); + } + + int testSucceeded = 1; + try { + execPool.invokeAll(todos, testTimeout.toMillis(), TimeUnit.MILLISECONDS); + if (failedApps.isEmpty()) { + if (logger.isInfoEnabled()) { + logger.info("Test completed successfully"); + } + testSucceeded = 0; + } else { + if (logger.isErrorEnabled()) { + logger.error("Failed apps found. "); + failedApps.forEach((k, v) -> { + logger.error("Application failed: {}", k); + logger.error("\t status: {}", v); + }); + } + } + } finally { + for (SparkApplication app : testApps) { + try { + client.resource(app).inNamespace(testAppNamespace).delete(); + } catch (Exception e) { + if (logger.isErrorEnabled()) { + logger.error("Failed to remove app {}", app.getMetadata().getName()); + } + } + } + } + System.exit(testSucceeded); + } + + private static Set loadSparkAppsFromFile(KubernetesClient client, + File appsFile) { + if (appsFile.exists()) { + if (appsFile.isFile()) { + return Collections.singleton( + client.resources(SparkApplication.class).load(appsFile).item()); + } else { + Set applications = new HashSet<>(); + File[] subDirs = appsFile.listFiles(); + if (subDirs != null) { + for (File file : subDirs) { + applications.addAll(loadSparkAppsFromFile(client, file)); + } + } + return applications; + } + } + if (logger.isErrorEnabled()) { + logger.error("No SparkApp found at {}", appsFile.getAbsolutePath()); + } + return Collections.emptySet(); + } + + private static boolean appCompleted(SparkApplication app) { + return app != null && app.getStatus() != null && app.getStatus().getCurrentState() != null + && app.getStatus().getStateTransitionHistory() != null + && app.getStatus().getCurrentState().getCurrentStateSummary().isTerminated(); + } +} diff --git a/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/CancelAppTest.java b/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/CancelAppTest.java new file mode 100644 index 00000000..6495eba2 --- /dev/null +++ b/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/CancelAppTest.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +class CancelAppTest { + +} diff --git a/spark-operator-tests/src/test/resources/EcsLayout.json b/spark-operator-tests/src/test/resources/EcsLayout.json new file mode 100644 index 00000000..8d215ab5 --- /dev/null +++ b/spark-operator-tests/src/test/resources/EcsLayout.json @@ -0,0 +1,49 @@ +{ + "@timestamp": { + "$resolver": "timestamp", + "pattern": { + "format": "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'", + "timeZone": "UTC" + } + }, + "ecs.version": "1.2.0", + "log.level": { + "$resolver": "level", + "field": "name" + }, + "message": { + "$resolver": "message", + "stringified": true + }, + "process.thread.name": { + "$resolver": "thread", + "field": "name" + }, + "log.logger": { + "$resolver": "logger", + "field": "name" + }, + "labels": { + "$resolver": "mdc", + "flatten": true, + "stringified": true + }, + "tags": { + "$resolver": "ndc" + }, + "error.type": { + "$resolver": "exception", + "field": "className" + }, + "error.message": { + "$resolver": "exception", + "field": "message" + }, + "error.stack_trace": { + "$resolver": "exception", + "field": "stackTrace", + "stackTrace": { + "stringified": true + } + } +} diff --git a/spark-operator-tests/src/test/resources/log4j2.properties b/spark-operator-tests/src/test/resources/log4j2.properties new file mode 100644 index 00000000..9285fa00 --- /dev/null +++ b/spark-operator-tests/src/test/resources/log4j2.properties @@ -0,0 +1,52 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +status=debug +strict=true +dest=out +name=PropertiesConfig +property.filename=/tmp/spark-operator +filter.threshold.type=ThresholdFilter +filter.threshold.level=debug +# console +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%d %p %X %C{1.} [%t] %m%n +appender.console.filter.threshold.type=ThresholdFilter +appender.console.filter.threshold.level=info +# rolling JSON +appender.rolling.type=RollingFile +appender.rolling.name=RollingFile +appender.rolling.append=true +appender.rolling.fileName=${filename}.log +appender.rolling.filePattern=${filename}-%i.log.gz +appender.rolling.layout.type=JsonTemplateLayout +appender.rolling.layout.eventTemplateUri=classpath:EcsLayout.json +appender.rolling.policies.type=Policies +appender.rolling.policies.size.type=SizeBasedTriggeringPolicy +appender.rolling.policies.size.size=100MB +appender.rolling.strategy.type=DefaultRolloverStrategy +appender.rolling.strategy.max=20 +appender.rolling.immediateFlush=true +# chatty loggers +rootLogger.level=all +logger.netty.name=io.netty +logger.netty.level=warn +log4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector +rootLogger.appenderRef.stdout.ref=STDOUT +rootLogger.appenderRef.rolling.ref=RollingFile diff --git a/spark-operator/.gitignore b/spark-operator/.gitignore new file mode 100644 index 00000000..b63da455 --- /dev/null +++ b/spark-operator/.gitignore @@ -0,0 +1,42 @@ +.gradle +build/ +!gradle/wrapper/gradle-wrapper.jar +!**/src/main/**/build/ +!**/src/test/**/build/ + +### IntelliJ IDEA ### +.idea/modules.xml +.idea/jarRepositories.xml +.idea/compiler.xml +.idea/libraries/ +*.iws +*.iml +*.ipr +out/ +!**/src/main/**/out/ +!**/src/test/**/out/ + +### Eclipse ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache +bin/ +!**/src/main/**/bin/ +!**/src/test/**/bin/ + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ + +### VS Code ### +.vscode/ + +### Mac OS ### +.DS_Store \ No newline at end of file diff --git a/spark-operator/build.gradle b/spark-operator/build.gradle new file mode 100644 index 00000000..e0f1f3ef --- /dev/null +++ b/spark-operator/build.gradle @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +apply plugin: 'com.github.johnrengelman.shadow' + +buildscript { + repositories { + maven { + url = uri("https://plugins.gradle.org/m2/") + } + } + dependencies { + classpath "com.github.johnrengelman:shadow:$shadowJarPluginVersion" + } +} + +dependencies { + implementation project(":spark-operator-api") + implementation project(":spark-submission-worker") + + implementation("io.javaoperatorsdk:operator-framework:$operatorSDKVersion") { + exclude group: 'com.squareup.okio' + } + + testImplementation("io.javaoperatorsdk:operator-framework-junit-5:$operatorSDKVersion") { + exclude group: 'com.squareup.okio' + } + + implementation("io.fabric8:kubernetes-httpclient-okhttp:$fabric8Version") { + exclude group: 'com.squareup.okhttp3' + } + implementation("com.squareup.okhttp3:okhttp:$okHttpVersion") + implementation("com.squareup.okhttp3:logging-interceptor:$okHttpVersion") + implementation("io.dropwizard.metrics:metrics-core:$dropwizardMetricsVersion") + implementation("io.dropwizard.metrics:metrics-jvm:$dropwizardMetricsVersion") + compileOnly("org.projectlombok:lombok:$lombokVersion") + implementation("io.dropwizard.metrics:metrics-healthchecks:$dropwizardMetricsVersion") + compileOnly("org.apache.spark:spark-core_$sparkScalaVersion:$sparkVersion") { + exclude group: "org.apache.logging.log4j" + exclude group: "org.slf4j" + } + annotationProcessor("org.projectlombok:lombok:$lombokVersion") + + // logging + implementation("org.apache.logging.log4j:log4j-api:$log4jVersion") + implementation("org.apache.logging.log4j:log4j-core:$log4jVersion") + implementation("org.apache.logging.log4j:log4j-slf4j-impl:$log4jVersion") + implementation("org.apache.logging.log4j:log4j-1.2-api:$log4jVersion") + implementation("org.apache.logging.log4j:log4j-layout-template-json:$log4jLayoutVersion") + + testImplementation("io.fabric8:kubernetes-server-mock:$fabric8Version") { + exclude group: 'junit' + } + testImplementation("org.apache.spark:spark-core_$sparkScalaVersion:$sparkVersion") + testImplementation("com.squareup.okhttp3:mockwebserver:$okHttpVersion") + testImplementation platform("org.junit:junit-bom:$junitVersion") + testImplementation("org.junit.jupiter:junit-jupiter:$junitVersion") + testImplementation("org.mockito:mockito-core:$mockitoVersion") +} + +test { + useJUnitPlatform() +} + +jar.dependsOn shadowJar + +jar { + zip64 = true + archiveVersion.set('') +} + + +shadowJar { + zip64 = true + mergeServiceFiles() + transform(com.github.jengelman.gradle.plugins.shadow.transformers.Log4j2PluginsCacheFileTransformer) +} + +description = "Spark Kubernetes Operator" +def artifact = "spark-kubernetes-operator" +archivesBaseName = artifact diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java new file mode 100644 index 00000000..b6a31069 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java @@ -0,0 +1,217 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +import io.fabric8.kubernetes.client.KubernetesClient; +import io.javaoperatorsdk.operator.Operator; +import io.javaoperatorsdk.operator.api.config.ConfigurationServiceOverrider; +import io.javaoperatorsdk.operator.api.config.ControllerConfigurationOverrider; +import io.javaoperatorsdk.operator.processing.event.rate.LinearRateLimiter; +import io.javaoperatorsdk.operator.processing.event.rate.RateLimiter; +import io.javaoperatorsdk.operator.processing.retry.GenericRetry; +import org.apache.commons.collections.CollectionUtils; +import org.apache.spark.kubernetes.operator.client.KubernetesClientFactory; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConfigMapReconciler; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; +import org.apache.spark.kubernetes.operator.health.SentinelManager; +import org.apache.spark.kubernetes.operator.metrics.MetricsService; +import org.apache.spark.kubernetes.operator.metrics.MetricsSystem; +import io.javaoperatorsdk.operator.RegisteredController; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.metrics.MetricsSystemFactory; +import org.apache.spark.kubernetes.operator.metrics.source.OperatorJosdkMetrics; +import org.apache.spark.kubernetes.operator.probe.ProbeService; +import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconciler; +import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; + +import java.time.Duration; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.DynamicConfigEnabled; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.DynamicConfigSelectorStr; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorNamespace; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.ReconcilerParallelism; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.TerminateOnInformerFailure; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.TerminationTimeoutSeconds; + +/** + * Entry point for Spark Operator. + * Sets up reconcilers for CustomResource and health check servers + */ +@Slf4j +public class SparkOperator { + private Operator sparkOperator; + private Operator sparkOperatorConfMonitor; + private KubernetesClient client; + private StatusRecorder statusRecorder; + private MetricsSystem metricsSystem; + protected Set> registeredSparkControllers; + protected Set watchedNamespaces; + + private SentinelManager sentinelManager; + private ProbeService probeService; + private MetricsService metricsService; + private ExecutorService metricsResourcesSingleThreadPool; + + public SparkOperator() { + this.metricsSystem = MetricsSystemFactory.createMetricsSystem(); + this.client = KubernetesClientFactory.buildKubernetesClient(metricsSystem); + this.statusRecorder = new StatusRecorder(SparkOperatorConf.getApplicationStatusListener()); + this.registeredSparkControllers = new HashSet<>(); + this.watchedNamespaces = SparkReconcilerUtils.getWatchedNamespaces(); + this.sentinelManager = new SentinelManager(); + this.sparkOperator = createOperator(); + this.sparkOperatorConfMonitor = createSparkOperatorConfMonitor(); + var operators = Stream.of(this.sparkOperator, this.sparkOperatorConfMonitor) + .filter(Objects::nonNull).collect(Collectors.toList()); + this.probeService = new ProbeService(operators, this.sentinelManager); + this.metricsService = new MetricsService(metricsSystem); + this.metricsResourcesSingleThreadPool = Executors.newSingleThreadExecutor(); + } + + protected Operator createOperator() { + Operator op = new Operator(this::overrideOperatorConfigs); + registeredSparkControllers.add( + op.register(new SparkApplicationReconciler(statusRecorder, sentinelManager), + this::overrideControllerConfigs)); + return op; + } + + protected Operator createSparkOperatorConfMonitor() { + if (DynamicConfigEnabled.getValue()) { + Operator op = new Operator(client, c -> { + c.withStopOnInformerErrorDuringStartup(true); + c.withCloseClientOnStop(false); + c.withInformerStoppedHandler( + (informer, ex) -> log.error( + "Dynamic config informer stopped: operator will not accept " + + "config updates.") + ); + }); + op.register(new SparkOperatorConfigMapReconciler(this::updateWatchingNamespaces), c -> { + c.settingNamespaces(OperatorNamespace.getValue()); + c.withLabelSelector(DynamicConfigSelectorStr.getValue()); + }); + return op; + } else { + return null; + } + } + + protected Operator getOperator() { + return this.sparkOperator; + } + + protected ProbeService getProbeService() { + return this.probeService; + } + + protected boolean updateWatchingNamespaces(Set namespaces) { + if (watchedNamespaces.equals(namespaces)) { + log.info("No watched namespace change detected"); + return false; + } + if (CollectionUtils.isEmpty(namespaces)) { + log.error("Cannot updating namespaces to empty"); + return false; + } + registeredSparkControllers.forEach(c -> { + if (c.allowsNamespaceChanges()) { + log.info("Updating operator namespaces to {}", namespaces); + c.changeNamespaces(namespaces); + } + }); + this.watchedNamespaces = new HashSet<>(namespaces); + return true; + } + + protected void overrideOperatorConfigs(ConfigurationServiceOverrider overrider) { + overrider.withKubernetesClient(client); + overrider.withStopOnInformerErrorDuringStartup(TerminateOnInformerFailure.getValue()); + overrider.withTerminationTimeoutSeconds(TerminationTimeoutSeconds.getValue()); + int parallelism = ReconcilerParallelism.getValue(); + if (parallelism > 0) { + log.info("Configuring operator with {} reconciliation threads.", parallelism); + overrider.withConcurrentReconciliationThreads(parallelism); + } else { + log.info("Configuring operator with unbounded reconciliation thread pool."); + overrider.withExecutorService(Executors.newCachedThreadPool()); + } + if (SparkOperatorConf.LEADER_ELECTION_ENABLED.getValue()) { + overrider.withLeaderElectionConfiguration(SparkOperatorConf.getLeaderElectionConfig()); + } + if (SparkOperatorConf.JOSDKMetricsEnabled.getValue()) { + log.info("Adding OperatorJosdkMetrics."); + OperatorJosdkMetrics operatorJosdkMetrics = new OperatorJosdkMetrics(); + overrider.withMetrics(operatorJosdkMetrics); + metricsSystem.registerSource(operatorJosdkMetrics); + } + } + + protected void overrideControllerConfigs(ControllerConfigurationOverrider overrider) { + if (watchedNamespaces.isEmpty()) { + log.info("Initializing operator watching at cluster level."); + } else { + log.info("Initializing with watched namespaces {}", watchedNamespaces); + } + overrider.settingNamespaces(watchedNamespaces); + + RateLimiter rateLimiter = new LinearRateLimiter( + Duration.ofSeconds(SparkOperatorConf.RateLimiterRefreshPeriodSeconds.getValue()), + SparkOperatorConf.RateLimiterLimit.getValue()); + overrider.withRateLimiter(rateLimiter); + + GenericRetry genericRetry = new GenericRetry() + .setMaxAttempts(SparkOperatorConf.RetryMaxAttempts.getValue()) + .setInitialInterval( + Duration.ofSeconds(SparkOperatorConf.RetryInitialInternalSeconds.getValue()) + .toMillis()) + .setIntervalMultiplier(SparkOperatorConf.RetryInternalMultiplier.getValue()); + if (SparkOperatorConf.RetryMaxIntervalSeconds.getValue() > 0) { + genericRetry.setMaxInterval( + Duration.ofSeconds(SparkOperatorConf.RetryMaxIntervalSeconds.getValue()) + .toMillis()); + } + overrider.withRetry(genericRetry); + } + + public static void main(String[] args) { + SparkOperator sparkOperator = new SparkOperator(); + sparkOperator.getOperator().start(); + if (DynamicConfigEnabled.getValue() && sparkOperator.sparkOperatorConfMonitor != null) { + sparkOperator.sparkOperatorConfMonitor.start(); + } + sparkOperator.probeService.start(); + // MetricsServer start follows the MetricsSystem start + // so that MetricsSystem::getSinks will not return an empty list + sparkOperator.metricsResourcesSingleThreadPool.submit(() -> { + sparkOperator.metricsSystem.start(); + }); + sparkOperator.metricsResourcesSingleThreadPool.submit(() -> { + sparkOperator.metricsService.start(); + }); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/KubernetesClientFactory.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/KubernetesClientFactory.java new file mode 100644 index 00000000..0d03a46b --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/KubernetesClientFactory.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.client; + +import io.fabric8.kubernetes.client.Config; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientBuilder; +import io.fabric8.kubernetes.client.okhttp.OkHttpClientFactory; +import okhttp3.Interceptor; +import okhttp3.OkHttpClient; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; +import org.apache.spark.kubernetes.operator.metrics.MetricsSystem; +import org.apache.spark.kubernetes.operator.metrics.source.KubernetesMetricsInterceptor; + +import java.util.ArrayList; +import java.util.List; + +/** + * Build Kubernetes Client with metrics configured + */ +public class KubernetesClientFactory { + private static final KubernetesMetricsInterceptor kubernetesMetricsInterceptor = + new KubernetesMetricsInterceptor(); + + public static KubernetesClient buildKubernetesClient(MetricsSystem metricsSystem) { + return buildKubernetesClient(metricsSystem, null); + } + + public static KubernetesClient buildKubernetesClient(MetricsSystem metricsSystem, + Config kubernetesClientConfig) { + List clientInterceptors = new ArrayList<>(); + clientInterceptors.add(new RetryInterceptor()); + + if (SparkOperatorConf.KubernetesClientMetricsEnabled.getValue()) { + clientInterceptors.add(kubernetesMetricsInterceptor); + // Avoid duplicate register metrics exception + if (!metricsSystem.getSources().contains(kubernetesMetricsInterceptor)) { + metricsSystem.registerSource(kubernetesMetricsInterceptor); + } + } + + return new KubernetesClientBuilder() + .withConfig(kubernetesClientConfig) + .withHttpClientFactory( + new OkHttpClientFactory() { + @Override + protected void additionalConfig(OkHttpClient.Builder builder) { + for (Interceptor interceptor : clientInterceptors) { + builder.addInterceptor(interceptor); + } + } + } + ) + .build(); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/RetryInterceptor.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/RetryInterceptor.java new file mode 100644 index 00000000..3d746136 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/RetryInterceptor.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.client; + +import lombok.extern.slf4j.Slf4j; +import okhttp3.Interceptor; +import okhttp3.Request; +import okhttp3.Response; +import okhttp3.ResponseBody; +import org.apache.commons.lang3.StringUtils; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; + +import java.io.IOException; +import java.util.Optional; +import java.util.concurrent.TimeUnit; + +import static io.fabric8.kubernetes.client.utils.Utils.closeQuietly; + +/** + * Intercepts HTTP requests and add custom retry on 429 and 5xx to overcome server instability + */ +@Slf4j +public class RetryInterceptor implements Interceptor { + private static final String RETRY_AFTER_HEADER_NAME = "Retry-After"; + + private final Long maxAttemptCount; + private final Long maxRetryAfterInSecs; + private final Long defaultRetryAfterInSecs; + + public RetryInterceptor() { + this.maxAttemptCount = SparkOperatorConf.MaxRetryAttemptOnKubeServerFailure.getValue(); + this.maxRetryAfterInSecs = SparkOperatorConf.MaxRetryAttemptAfterSeconds.getValue(); + this.defaultRetryAfterInSecs = SparkOperatorConf.RetryAttemptAfterSeconds.getValue(); + } + + @Override + public Response intercept(Chain chain) throws IOException { + Request request = chain.request(); + Response response = chain.proceed(request); + int tryCount = 0; + while (!response.isSuccessful() && (response.code() == 429 || response.code() >= 500) && + tryCount < maxAttemptCount) { + // only retry on consecutive 429 and 5xx failure responses + if (log.isWarnEnabled()) { + log.warn( + "Request is not successful. attempt={} response-code={} " + + "response-headers={}", + tryCount, response.code(), response.headers()); + } + Optional retryAfter = getRetryAfter(response); + if (retryAfter.isPresent()) { + try { + TimeUnit.SECONDS.sleep(retryAfter.get()); + } catch (InterruptedException e) { + if (log.isErrorEnabled()) { + log.error("Aborting retry.", e); + } + } + } + tryCount++; + + ResponseBody responseBody = response.body(); + if (responseBody != null) { + closeQuietly(responseBody); + } + // retry the request for 429 and 5xx + response = chain.proceed(request); + } + return response; + } + + private Optional getRetryAfter(Response response) { + String retryAfter = response.header(RETRY_AFTER_HEADER_NAME); + if (StringUtils.isNotEmpty(retryAfter)) { + try { + return Optional.of(Math.min(Long.parseLong(retryAfter), maxRetryAfterInSecs)); + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error(String.format( + "Error while parsing Retry-After header %s. Retrying with default %s", + retryAfter, defaultRetryAfterInSecs), e); + } + return Optional.of(defaultRetryAfterInSecs); + } + } + return Optional.empty(); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/ConfigOption.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/ConfigOption.java new file mode 100644 index 00000000..fc67da65 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/ConfigOption.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.config; + +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import lombok.ToString; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +/** + * Config options for Spark Operator. Supports primitive and serialized JSON + */ + +@RequiredArgsConstructor +@AllArgsConstructor +@EqualsAndHashCode +@ToString +@Builder +@Slf4j +public class ConfigOption { + private static final ObjectMapper objectMapper = new ObjectMapper(); + + @Getter + @Builder.Default + private boolean enableDynamicOverride = true; + @Getter + private String key; + @Getter + private String description; + private T defaultValue; + private Class typeParameterClass; + + public T getValue() { + return resolveValue(); + } + + private T resolveValue() { + try { + String value = SparkOperatorConfManager.INSTANCE.getValue(key); + if (!enableDynamicOverride) { + value = SparkOperatorConfManager.INSTANCE.getInitialValue(key); + } + if (StringUtils.isNotEmpty(value)) { + if (typeParameterClass.isPrimitive() || typeParameterClass == String.class) { + return (T) resolveValueToPrimitiveType(typeParameterClass, value); + } else { + return objectMapper.readValue(value, typeParameterClass); + } + } else { + return defaultValue; + } + } catch (Throwable t) { + log.error("Failed to resolve value for config key {}, using default value {}", key, + defaultValue, t); + return defaultValue; + } + } + + public static Object resolveValueToPrimitiveType(Class clazz, String value) { + if (Boolean.class == clazz || Boolean.TYPE == clazz) { + return Boolean.parseBoolean(value); + } + if (Byte.class == clazz || Byte.TYPE == clazz) { + return Byte.parseByte(value); + } + if (Short.class == clazz || Short.TYPE == clazz) { + return Short.parseShort(value); + } + if (Integer.class == clazz || Integer.TYPE == clazz) { + return Integer.parseInt(value); + } + if (Long.class == clazz || Long.TYPE == clazz) { + return Long.parseLong(value); + } + if (Float.class == clazz || Float.TYPE == clazz) { + return Float.parseFloat(value); + } + if (Double.class == clazz || Double.TYPE == clazz) { + return Double.parseDouble(value); + } + return value; + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java new file mode 100644 index 00000000..a2d3d49c --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java @@ -0,0 +1,406 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.config; + +import io.javaoperatorsdk.operator.api.config.LeaderElectionConfiguration; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.apache.spark.kubernetes.operator.listeners.ApplicationStatusListener; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.defaultOperatorConfigLabels; +import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.labelsAsStr; + +/** + * Spark Operator Configuration options. + */ +@Slf4j +public class SparkOperatorConf { + public static final String PREFIX = "spark.operator."; + public static final String METRIC_PREFIX = "spark.metrics.conf.operator."; + public static final String SINK = "sink."; + public static final String CLASS = "class"; + + public static final ConfigOption OperatorAppName = ConfigOption.builder() + .key(PREFIX + "name") + .typeParameterClass(String.class) + .description("Name of the operator.") + .defaultValue("spark-kubernetes-operator") + .enableDynamicOverride(false) + .build(); + public static final ConfigOption OperatorNamespace = ConfigOption.builder() + .key(PREFIX + "namespace") + .typeParameterClass(String.class) + .description("Namespace that operator is deployed within.") + .defaultValue("spark-system") + .enableDynamicOverride(false) + .build(); + public static final ConfigOption DynamicConfigEnabled = ConfigOption.builder() + .key(PREFIX + "dynamic.config.enabled") + .typeParameterClass(Boolean.class) + .description( + "When enabled, operator would use config map as source of truth for config " + + "property override. The config map need to be created in " + + "spark.operator.namespace, and labeled with operator name.") + .defaultValue(false) + .enableDynamicOverride(false) + .build(); + public static final ConfigOption DynamicConfigSelectorStr = + ConfigOption.builder() + .key(PREFIX + "dynamic.config.selector.str") + .typeParameterClass(String.class) + .description("The selector str applied to dynamic config map.") + .defaultValue(labelsAsStr(defaultOperatorConfigLabels())) + .enableDynamicOverride(false) + .build(); + public static final ConfigOption TerminateOnInformerFailure = + ConfigOption.builder() + .key(PREFIX + "terminate.on.informer.failure") + .typeParameterClass(Boolean.class) + .description( + "Enable to indicate informer errors should stop operator startup. If " + + "disabled, operator startup will ignore recoverable errors, " + + "caused for example by RBAC issues and will retry " + + "periodically.") + .defaultValue(false) + .enableDynamicOverride(false) + .build(); + public static final ConfigOption TerminationTimeoutSeconds = + ConfigOption.builder() + .key(PREFIX + "termination.timeout.seconds") + .description( + "Grace period for operator shutdown before reconciliation threads " + + "are killed.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(30) + .build(); + public static final ConfigOption ReconcilerParallelism = + ConfigOption.builder() + .key(PREFIX + "reconciler.parallelism") + .description( + "Thread pool size for Spark Operator reconcilers. Use -1 for " + + "unbounded pool.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(30) + .build(); + public static final ConfigOption RateLimiterRefreshPeriodSeconds = + ConfigOption.builder() + .key(PREFIX + "rate.limiter.refresh.period.seconds") + .description( + "Operator rate limiter refresh period(in seconds) for each resource.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(15) + .build(); + public static final ConfigOption RateLimiterLimit = ConfigOption.builder() + .key(PREFIX + "rate.limiter.limit") + .description( + "Max number of reconcile loops triggered within the rate limiter refresh " + + "period for each resource. Setting the limit <= 0 disables the " + + "limiter.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(5) + .build(); + public static final ConfigOption RetryInitialInternalSeconds = + ConfigOption.builder() + .key(PREFIX + "retry.initial.internal.seconds") + .description( + "Initial interval(in seconds) of retries on unhandled controller " + + "errors.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(5) + .build(); + public static final ConfigOption RetryInternalMultiplier = + ConfigOption.builder() + .key(PREFIX + "retry.internal.multiplier") + .description("Interval multiplier of retries on unhandled controller errors.") + .enableDynamicOverride(false) + .typeParameterClass(Double.class) + .defaultValue(1.5) + .build(); + public static final ConfigOption RetryMaxIntervalSeconds = + ConfigOption.builder() + .key(PREFIX + "retry.max.interval.seconds") + .description( + "Max interval(in seconds) of retries on unhandled controller errors. " + + "Set to -1 for unlimited.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(-1) + .build(); + public static final ConfigOption RetryMaxAttempts = ConfigOption.builder() + .key(PREFIX + "retry.max.attempts") + .description("Max attempts of retries on unhandled controller errors.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(15) + .build(); + public static final ConfigOption DriverCreateMaxAttempts = ConfigOption.builder() + .key(PREFIX + "driver.create.max.attempts") + .description( + "Maximal number of retry attempts of requesting driver for Spark application.") + .defaultValue(3L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption MaxRetryAttemptOnKubeServerFailure = + ConfigOption.builder() + .key(PREFIX + "max.retry.attempts.on.kube.failure") + .description( + "Maximal number of retry attempts of requests to k8s server upon " + + "response 429 and 5xx.") + .defaultValue(3L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption RetryAttemptAfterSeconds = ConfigOption.builder() + .key(PREFIX + "retry.attempt.after.seconds") + .description( + "Default time (in seconds) to wait till next request. This would be used if " + + "server does not set Retry-After in response.") + .defaultValue(1L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption MaxRetryAttemptAfterSeconds = + ConfigOption.builder() + .key(PREFIX + "max.retry.attempt.after.seconds") + .description("Maximal time (in seconds) to wait till next request.") + .defaultValue(15L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption StatusPatchMaxRetry = ConfigOption.builder() + .key(PREFIX + "status.patch.max.retry") + .description( + "Maximal number of retry attempts of requests to k8s server for resource " + + "status update.") + .defaultValue(3L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption StatusPatchFailureBackoffSeconds = + ConfigOption.builder() + .key(PREFIX + "status.patch.failure.backoff.seconds") + .description( + "Default time (in seconds) to wait till next request to patch " + + "resource status update.") + .defaultValue(3L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption AppReconcileIntervalSeconds = + ConfigOption.builder() + .key(PREFIX + "app.reconcile.interval.seconds") + .description( + "Interval (in seconds) to reconcile when application is is starting " + + "up. Note that reconcile is always expected to be triggered " + + "per update - this interval controls the reconcile behavior " + + "when operator still need to reconcile even when there's no " + + "update ,e.g. for timeout checks.") + .defaultValue(120L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption ForegroundRequestTimeoutSeconds = + ConfigOption.builder() + .key(PREFIX + "foreground.request.timeout.seconds") + .description( + "Timeout (in seconds) to for requests made to API server. this " + + "applies only to foreground requests.") + .defaultValue(120L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption OperatorWatchedNamespaces = + ConfigOption.builder() + .key(PREFIX + "watched.namespaces") + .description( + "Comma-separated list of namespaces that the operator would be " + + "watching for Spark resources. If unset, operator would " + + "watch all namespaces by default.") + .defaultValue(null) + .typeParameterClass(String.class) + .build(); + public static final ConfigOption TrimAttemptStateTransitionHistory = + ConfigOption.builder() + .key(PREFIX + "trim.attempt.state.transition.history") + .description( + "When enabled, operator would trim state transition history when a " + + "new attempt starts, keeping previous attempt summary only.") + .defaultValue(true) + .typeParameterClass(Boolean.class) + .build(); + + public static final ConfigOption JOSDKMetricsEnabled = ConfigOption.builder() + .key(PREFIX + "josdk.metrics.enabled") + .description( + "When enabled, the josdk metrics will be added in metrics source and " + + "configured for operator.") + .defaultValue(true) + .build(); + + public static final ConfigOption KubernetesClientMetricsEnabled = + ConfigOption.builder() + .key(PREFIX + "kubernetes.client.metrics.enabled") + .defaultValue(true) + .description( + "Enable KubernetesClient metrics for measuring the HTTP traffic to " + + "the Kubernetes API Server. Since the metrics is collected " + + "via Okhttp interceptors, can be disabled when opt in " + + "customized interceptors.") + .build(); + + public static final ConfigOption + KubernetesClientMetricsGroupByResponseCodeGroupEnabled = ConfigOption.builder() + .key(PREFIX + "kubernetes.client.metrics.group.by.response.code.group.enable") + .description( + "When enabled, additional metrics group by http response code group(1xx, " + + "2xx, 3xx, 4xx, 5xx) received from API server will be added. Users " + + "can disable it when their monitoring system can combine lower level " + + "kubernetes.client.http.response.<3-digit-response-code> metrics.") + .defaultValue(true) + .build(); + public static final ConfigOption OperatorProbePort = ConfigOption.builder() + .key(PREFIX + "probe.port") + .defaultValue(18080) + .description("The port used for health/readiness check probe status.") + .typeParameterClass(Integer.class) + .enableDynamicOverride(false) + .build(); + + public static final ConfigOption OperatorMetricsPort = ConfigOption.builder() + .key(PREFIX + "metrics.port") + .defaultValue(19090) + .description("The port used for checking metrics") + .typeParameterClass(Integer.class) + .enableDynamicOverride(false) + .build(); + + public static final ConfigOption SentinelExecutorServicePoolSize = + ConfigOption.builder() + .key(PREFIX + "sentinel.executor.pool.size") + .description( + "Size of executor service in Sentinel Managers to check the health " + + "of sentinel resources.") + .defaultValue(3) + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .build(); + + public static final ConfigOption SENTINEL_RESOURCE_RECONCILIATION_DELAY = + ConfigOption.builder() + .key(PREFIX + "health.sentinel.resource.reconciliation.delay.seconds") + .defaultValue(60L) + .description( + "Allowed max time(seconds) between spec update and reconciliation " + + "for sentinel resources.") + .enableDynamicOverride(true) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption APPLICATION_STATUS_LISTENER_CLASS_NAMES = + ConfigOption.builder() + .key(PREFIX + "application.status.listener.class.names") + .defaultValue("") + .description( + "Comma-separated names of ApplicationStatusListener class " + + "implementations") + .enableDynamicOverride(false) + .typeParameterClass(String.class) + .build(); + public static final ConfigOption LEADER_ELECTION_ENABLED = + ConfigOption.builder() + .key(PREFIX + "leader.election.enabled") + .defaultValue(false) + .description( + "Enable leader election for the operator to allow running standby " + + "instances.") + .enableDynamicOverride(false) + .typeParameterClass(Boolean.class) + .build(); + public static final ConfigOption LEADER_ELECTION_LEASE_NAME = + ConfigOption.builder() + .key(PREFIX + "leader.election.lease.name") + .defaultValue("spark-operator-lease") + .description( + "Leader election lease name, must be unique for leases in the same " + + "namespace.") + .enableDynamicOverride(false) + .typeParameterClass(String.class) + .build(); + public static final ConfigOption LEADER_ELECTION_LEASE_DURATION_SECONDS = + ConfigOption.builder() + .key(PREFIX + "leader.election.lease.duration.seconds") + .defaultValue(1200L) + .description("Leader election lease duration.") + .enableDynamicOverride(false) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption LEADER_ELECTION_RENEW_DEADLINE_SECONDS = + ConfigOption.builder() + .key(PREFIX + "leader.election.renew.deadline.seconds") + .defaultValue(600L) + .description("Leader election renew deadline.") + .enableDynamicOverride(false) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption LEADER_ELECTION_RETRY_PERIOD_SECONDS = + ConfigOption.builder() + .key(PREFIX + "leader.election.retry.period.seconds") + .defaultValue(180L) + .description("Leader election retry period.") + .enableDynamicOverride(false) + .typeParameterClass(Long.class) + .build(); + + public static List getApplicationStatusListener() { + List listeners = new ArrayList<>(); + String listenerNamesStr = + SparkOperatorConf.APPLICATION_STATUS_LISTENER_CLASS_NAMES.getValue(); + if (StringUtils.isNotBlank(listenerNamesStr)) { + try { + List listenerNames = + Arrays.stream(listenerNamesStr.split(",")).map(String::trim) + .collect(Collectors.toList()); + for (String name: listenerNames) { + Class listenerClass = Class.forName(name); + if (ApplicationStatusListener.class.isAssignableFrom(listenerClass)) { + listeners.add((ApplicationStatusListener) + listenerClass.getConstructor().newInstance()); + } + } + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("Failed to initialize listeners for operator with {}", + listenerNamesStr, e); + } + } + } + return listeners; + } + + public static LeaderElectionConfiguration getLeaderElectionConfig() { + return new LeaderElectionConfiguration(LEADER_ELECTION_LEASE_NAME.getValue(), + OperatorNamespace.getValue(), + Duration.ofSeconds(LEADER_ELECTION_LEASE_DURATION_SECONDS.getValue()), + Duration.ofSeconds(LEADER_ELECTION_RENEW_DEADLINE_SECONDS.getValue()), + Duration.ofSeconds(LEADER_ELECTION_RETRY_PERIOD_SECONDS.getValue())); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManager.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManager.java new file mode 100644 index 00000000..68a636e4 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManager.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.config; + +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +import java.io.FileInputStream; +import java.io.InputStream; +import java.util.Map; +import java.util.Properties; + +/** + * Loads ConfigOption from properties file. In addition, loads hot properties override + * from config map if dynamic config is enabled. + */ +@Slf4j +public class SparkOperatorConfManager { + public static final String INITIAL_CONFIG_FILE_PATH = + "/opt/spark-operator/conf/spark-operator.properties"; + + public static final String METRICS_CONFIG_FILE_PATH = + "/opt/spark-operator/conf/metrics.properties"; + + public static final String INITIAL_CONFIG_FILE_PATH_PROPS_KEY = + "spark.operator.base.property.file.name"; + + public static final String METRICS_CONFIG_FILE_PATH_PROPS_KEY = + "spark.operator.metrics.property.file.name"; + + public static final SparkOperatorConfManager INSTANCE = new SparkOperatorConfManager(); + protected final Properties initialConfig; + protected final Properties metricsConfig; + protected Properties configOverrides; + + protected SparkOperatorConfManager() { + this.initialConfig = new Properties(); + this.configOverrides = new Properties(); + this.metricsConfig = new Properties(); + initialize(); + } + + public String getValue(String key) { + String currentValue = configOverrides.getProperty(key); + return StringUtils.isEmpty(currentValue) ? getInitialValue(key) : currentValue; + } + + public String getInitialValue(String key) { + return initialConfig.getProperty(key); + } + + public void refresh(Map updatedConfig) { + synchronized (this) { + this.configOverrides = new Properties(); + configOverrides.putAll(updatedConfig); + } + } + + public Properties getMetricsProperties() { + return metricsConfig; + } + + private void initialize() { + initialConfig.putAll(System.getProperties()); + Properties properties = getProperties( + System.getProperty(INITIAL_CONFIG_FILE_PATH_PROPS_KEY, INITIAL_CONFIG_FILE_PATH)); + initialConfig.putAll(properties); + initializeMetricsProperties(); + } + + private void initializeMetricsProperties() { + Properties properties = getProperties( + System.getProperty(METRICS_CONFIG_FILE_PATH_PROPS_KEY, METRICS_CONFIG_FILE_PATH)); + metricsConfig.putAll(properties); + } + + private Properties getProperties(String filePath) { + Properties properties = new Properties(); + try (InputStream inputStream = new FileInputStream(filePath)) { + properties.load(inputStream); + } catch (Exception e) { + log.error("Failed to load properties from {}.", filePath, e); + } + return properties; + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfigMapReconciler.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfigMapReconciler.java new file mode 100644 index 00000000..723dfab9 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfigMapReconciler.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.config; + +import io.fabric8.kubernetes.api.model.ConfigMap; +import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration; +import io.javaoperatorsdk.operator.api.reconciler.Context; +import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration; +import io.javaoperatorsdk.operator.api.reconciler.ErrorStatusHandler; +import io.javaoperatorsdk.operator.api.reconciler.ErrorStatusUpdateControl; +import io.javaoperatorsdk.operator.api.reconciler.EventSourceContext; +import io.javaoperatorsdk.operator.api.reconciler.EventSourceInitializer; +import io.javaoperatorsdk.operator.api.reconciler.Reconciler; +import io.javaoperatorsdk.operator.api.reconciler.UpdateControl; +import io.javaoperatorsdk.operator.processing.event.rate.RateLimited; +import io.javaoperatorsdk.operator.processing.event.source.EventSource; +import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; + +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorNamespace; + +/** + * This serves dynamic configuration for Spark Operator. + * When enabled, Operator assumes config file is located in given config map + * It would keep watch the config map & apply changes when update is detected. + */ +@ControllerConfiguration +@RateLimited(maxReconciliations = 1, within = 30) +@RequiredArgsConstructor +@Slf4j +public class SparkOperatorConfigMapReconciler implements Reconciler, + ErrorStatusHandler, EventSourceInitializer { + private final Function, Boolean> namespaceUpdater; + + @Override + public ErrorStatusUpdateControl updateErrorStatus(ConfigMap resource, + Context context, + Exception e) { + log.error("Failed to reconcile dynamic config change."); + return ErrorStatusUpdateControl.noStatusUpdate(); + } + + @Override + public Map prepareEventSources(EventSourceContext context) { + var configMapEventSource = + new InformerEventSource<>(InformerConfiguration.from(ConfigMap.class, context) + .withNamespaces(OperatorNamespace.getValue()) + .build(), context); + return EventSourceInitializer.nameEventSources(configMapEventSource); + } + + @Override + public UpdateControl reconcile(ConfigMap resource, Context context) + throws Exception { + SparkOperatorConfManager.INSTANCE.refresh(resource.getData()); + namespaceUpdater.apply(SparkReconcilerUtils.getWatchedNamespaces()); + return UpdateControl.noUpdate(); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java new file mode 100644 index 00000000..97fe43bd --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.controller; + +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.javaoperatorsdk.operator.api.reconciler.Context; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconcileUtils; + +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.driverLabels; +import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.executorLabels; + +/** + * Context for {@link org.apache.spark.kubernetes.operator.SparkApplication} resource + * Includes secondary resource(s) and desired secondary resource spec + */ +@RequiredArgsConstructor +@Slf4j +public class SparkApplicationContext { + @Getter + private final SparkApplication sparkApplication; + private final Context josdkContext; + private ApplicationResourceSpec secondaryResourceSpec; + + public Optional getDriverPod() { + return josdkContext.getSecondaryResourcesAsStream(Pod.class) + .filter(p -> p.getMetadata().getLabels().entrySet() + .containsAll(driverLabels(sparkApplication).entrySet())) + .findAny(); + } + + public Set getExecutorsForApplication() { + return josdkContext.getSecondaryResourcesAsStream(Pod.class) + .filter(p -> p.getMetadata().getLabels().entrySet() + .containsAll(executorLabels(sparkApplication).entrySet())) + .collect(Collectors.toSet()); + } + + private ApplicationResourceSpec getSecondaryResourceSpec() { + synchronized (this) { + if (secondaryResourceSpec == null) { + secondaryResourceSpec = SparkApplicationReconcileUtils.buildResourceSpec( + sparkApplication, josdkContext.getClient()); + } + return secondaryResourceSpec; + } + } + + public KubernetesClient getClient() { + return josdkContext.getClient(); + } + + public List getDriverPreResourcesSpec() { + return getSecondaryResourceSpec().getDriverPreResources(); + } + + public Pod getDriverPodSpec() { + return getSecondaryResourceSpec().getConfiguredPod(); + } + + public List getDriverResourcesSpec() { + return getSecondaryResourceSpec().getDriverResources(); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverDecorator.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverDecorator.java new file mode 100644 index 00000000..702785b5 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverDecorator.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.decorators; + +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; +import lombok.RequiredArgsConstructor; +import org.apache.spark.kubernetes.operator.SparkApplication; + +import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.sparkAppResourceLabels; +import static org.apache.spark.kubernetes.operator.utils.ModelUtils.buildOwnerReferenceTo; + +/** + * Decorates driver (pod) to make sure its metadata matches event source + * Also adds owner reference to the owner SparkApplication for garbage collection + */ +@RequiredArgsConstructor +public class DriverDecorator implements ResourceDecorator { + + private final SparkApplication app; + + /** + * Add labels and owner references to the app for all secondary resources + */ + @Override + public T decorate(T resource) { + ObjectMeta metaData = new ObjectMetaBuilder(resource.getMetadata()) + .addToOwnerReferences(buildOwnerReferenceTo(app)) + .addToLabels(sparkAppResourceLabels(app)) + .withNamespace(app.getMetadata().getNamespace()) + .build(); + resource.setMetadata(metaData); + return resource; + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java new file mode 100644 index 00000000..68d31880 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.decorators; + +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; +import io.fabric8.kubernetes.api.model.OwnerReference; +import io.fabric8.kubernetes.api.model.Pod; +import lombok.RequiredArgsConstructor; +import org.apache.commons.collections.CollectionUtils; + +import static org.apache.spark.kubernetes.operator.utils.ModelUtils.buildOwnerReferenceTo; + +/** + * Decorates Driver resources (except the pod). + * This makes sure all resources have owner reference to the driver pod, so they can + * be garbage collected upon termination. + * Secondary resources would be garbage-collected if ALL owners are deleted. Therefore, + * operator makes only driver pod has owned by the SparkApplication while all other + * secondary resources are owned by the driver. In this way, after driver pod is deleted + * at the end of each attempt, all other resources would be garbage collected automatically. + */ +@RequiredArgsConstructor +public class DriverResourceDecorator implements ResourceDecorator { + private final Pod driverPod; + + @Override + public T decorate(T resource) { + boolean ownerReferenceExists = false; + if (CollectionUtils.isNotEmpty(resource.getMetadata().getOwnerReferences())) { + for (OwnerReference o : resource.getMetadata().getOwnerReferences()) { + if (driverPod.getKind().equals(o.getKind()) + && driverPod.getMetadata().getName().equals(o.getName()) + && driverPod.getMetadata().getUid().equals(o.getUid())) { + ownerReferenceExists = true; + break; + } + } + } + if (!ownerReferenceExists) { + ObjectMeta metaData = new ObjectMetaBuilder(resource.getMetadata()) + .addToOwnerReferences(buildOwnerReferenceTo(driverPod)) + .build(); + resource.setMetadata(metaData); + } + return resource; + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/health/SentinelManager.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/health/SentinelManager.java new file mode 100644 index 00000000..7bc51470 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/health/SentinelManager.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.health; + +import com.google.common.annotations.VisibleForTesting; +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.javaoperatorsdk.operator.processing.event.ResourceID; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.spark.kubernetes.operator.BaseResource; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; +import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.apache.spark.kubernetes.operator.Constants.SPARK_CONF_SENTINEL_DUMMY_FIELD; +import static org.apache.spark.kubernetes.operator.Constants.SENTINEL_LABEL; + +/** + * Sentinel manager monitors dedicated sentinel resources to make sure the operator is healthy + * + * @param custom resource type + */ +@RequiredArgsConstructor +@Slf4j +public class SentinelManager> { + + private final ConcurrentHashMap sentinelResources = + new ConcurrentHashMap<>(); + + private final ScheduledExecutorService executorService = Executors.newScheduledThreadPool( + SparkOperatorConf.SentinelExecutorServicePoolSize.getValue()); + + public static boolean isSentinelResource(HasMetadata resource) { + var labels = resource.getMetadata().getLabels(); + if (labels == null) { + return false; + } + var namespace = resource.getMetadata().getNamespace(); + return shouldSentinelWatchGivenNamespace(namespace) + && Boolean.TRUE.toString() + .equalsIgnoreCase(labels.getOrDefault(SENTINEL_LABEL, Boolean.FALSE.toString())); + } + + private static boolean shouldSentinelWatchGivenNamespace(String namespace) { + if ((!SparkReconcilerUtils.getWatchedNamespaces().isEmpty()) + && !SparkReconcilerUtils.getWatchedNamespaces().contains(namespace)) { + if (log.isErrorEnabled()) { + log.error("Skip watching sentinel resource in namespace {}", namespace); + } + return false; + } + return true; + } + + public boolean allSentinelsAreHealthy() { + Set unWatchedKey = new HashSet<>(); + var result = sentinelResources.entrySet().stream().filter( + x -> { + if (x.getKey().getNamespace().isPresent() + && shouldSentinelWatchGivenNamespace(x.getKey().getNamespace().get())) { + return true; + } + unWatchedKey.add(x.getKey()); + return false; + } + ).map(Map.Entry::getValue).allMatch(SentinelResourceState::isHealthy); + sentinelResources.keySet().removeAll(unWatchedKey); + return result; + } + + public void checkHealth(ResourceID resourceID, KubernetesClient client) { + SentinelResourceState sentinelResourceState = sentinelResources.get(resourceID); + if (sentinelResourceState == null) { + if (log.isErrorEnabled()) { + log.error("Sentinel resources {} not found. Stopping sentinel health checks", + resourceID); + } + return; + } + + if (sentinelResourceState.reconciledSinceUpdate()) { + log.info("Sentinel reports healthy state globally"); + sentinelResourceState.isHealthy = true; + } else { + if (log.isErrorEnabled()) { + log.error( + "Sentinel deployment {} latest spec not was reconciled. Expected " + + "generation larger than {}, received {}", + resourceID, + sentinelResourceState.previousGeneration, + sentinelResourceState.resource.getMetadata().getGeneration()); + } + sentinelResourceState.isHealthy = false; + } + + updateSpecAndScheduleHealthCheck(resourceID, sentinelResourceState, client); + } + + public boolean handleSentinelResourceReconciliation(CR resource, KubernetesClient client) { + if (!isSentinelResource(resource)) { + return false; + } + + var resourceId = ResourceID.fromResource(resource); + sentinelResources.compute( + resourceId, + (id, previousState) -> { + boolean firstReconcile = false; + if (previousState == null) { + firstReconcile = true; + previousState = new SentinelResourceState(); + } + previousState.onReconcile(resource); + if (firstReconcile) { + updateSpecAndScheduleHealthCheck(resourceId, previousState, client); + } + return previousState; + }); + return true; + } + + private void updateSpecAndScheduleHealthCheck(ResourceID resourceID, + SentinelResourceState sentinelResourceState, + KubernetesClient client) { + var sparkConf = sentinelResourceState.resource.getSpec().getSparkConf(); + sparkConf.compute(SPARK_CONF_SENTINEL_DUMMY_FIELD, (key, value) -> { + if (value == null) { + return "1"; + } else { + return String.valueOf(Long.parseLong(value) + 1); + } + }); + sentinelResourceState.previousGeneration = + sentinelResourceState.resource.getMetadata().getGeneration(); + try { + if (log.isDebugEnabled()) { + log.debug("Update the sentinel kubernetes resource spec {}", sentinelResourceState); + } + client.resource(SparkReconcilerUtils.clone(sentinelResourceState.resource)).replace(); + } catch (Throwable t) { + if (log.isWarnEnabled()) { + log.warn("Could not replace the sentinel deployment spark conf {}", + SPARK_CONF_SENTINEL_DUMMY_FIELD, t); + } + } + var delay = SparkOperatorConf.SENTINEL_RESOURCE_RECONCILIATION_DELAY.getValue(); + if (log.isInfoEnabled()) { + log.info("Scheduling sentinel check for {} in {} seconds", resourceID, delay); + } + executorService.schedule(() -> checkHealth(resourceID, client), + delay, + TimeUnit.SECONDS); + } + + public class SentinelResourceState { + CR resource; + long previousGeneration; + + @Getter + boolean isHealthy = true; + + void onReconcile(CR cr) { + resource = cr; + } + + boolean reconciledSinceUpdate() { + return resource.getMetadata().getGeneration() > previousGeneration; + } + + @Override + public String toString() { + return new ToStringBuilder(this) + .append("resource", resource) + .append("previousGeneration", previousGeneration) + .append("isHealthy", isHealthy) + .toString(); + } + } + + @VisibleForTesting + public ConcurrentHashMap getSentinelResources() { + return sentinelResources; + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java new file mode 100644 index 00000000..947e6d78 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.listeners; + +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; + +/** + * Custom listeners, if added, would be listening to Spark App status change + */ +public abstract class ApplicationStatusListener extends BaseStatusListener { +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/BaseStatusListener.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/BaseStatusListener.java new file mode 100644 index 00000000..aa213290 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/BaseStatusListener.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.listeners; + +import org.apache.spark.kubernetes.operator.BaseResource; +import org.apache.spark.kubernetes.operator.status.BaseStatus; + +/** + * Custom listeners, if added, would be listening to resource status change + */ +public abstract class BaseStatusListener, + CR extends BaseResource> { + public abstract void listenStatus(CR resource, STATUS prevStatus, STATUS updatedStatus); +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/JVMMetricSet.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/JVMMetricSet.java new file mode 100644 index 00000000..144a05dd --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/JVMMetricSet.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics; + +import com.codahale.metrics.Metric; +import com.codahale.metrics.MetricSet; +import com.codahale.metrics.jvm.BufferPoolMetricSet; +import com.codahale.metrics.jvm.FileDescriptorRatioGauge; +import com.codahale.metrics.jvm.GarbageCollectorMetricSet; +import com.codahale.metrics.jvm.MemoryUsageGaugeSet; +import com.codahale.metrics.jvm.ThreadStatesGaugeSet; + +import java.lang.management.ManagementFactory; +import java.util.HashMap; +import java.util.Map; + +public class JVMMetricSet implements MetricSet { + public static final String FILE_DESC_RATIO_OPEN_MAX = "fileDesc.ratio.open/max"; + private final BufferPoolMetricSet bufferPoolMetricSet; + private final FileDescriptorRatioGauge fileDescriptorRatioGauge; + private final GarbageCollectorMetricSet garbageCollectorMetricSet; + private final MemoryUsageGaugeSet memoryUsageGaugeSet; + private final ThreadStatesGaugeSet threadStatesGaugeSet; + + public JVMMetricSet() { + bufferPoolMetricSet = new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer()); + fileDescriptorRatioGauge = new FileDescriptorRatioGauge(); + garbageCollectorMetricSet = new GarbageCollectorMetricSet(); + memoryUsageGaugeSet = new MemoryUsageGaugeSet(); + threadStatesGaugeSet = new ThreadStatesGaugeSet(); + } + + @Override + public Map getMetrics() { + final Map jvmMetrics = new HashMap<>(); + putAllMetrics(jvmMetrics, bufferPoolMetricSet, "bufferPool"); + jvmMetrics.put(FILE_DESC_RATIO_OPEN_MAX, fileDescriptorRatioGauge); + putAllMetrics(jvmMetrics, garbageCollectorMetricSet, "gc"); + putAllMetrics(jvmMetrics, memoryUsageGaugeSet, "memoryUsage"); + putAllMetrics(jvmMetrics, threadStatesGaugeSet, "threadStates"); + return jvmMetrics; + } + + private void putAllMetrics(final Map destination, final MetricSet origin, + final String prefix) { + for (Map.Entry entry : origin.getMetrics().entrySet()) { + destination.put(prefix + "." + entry.getKey(), entry.getValue()); + } + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsService.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsService.java new file mode 100644 index 00000000..c904ff67 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsService.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.metrics; + +import com.sun.net.httpserver.HttpServer; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.metrics.sink.PrometheusPullModelSink; +import org.apache.spark.metrics.sink.Sink; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.Optional; + +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorMetricsPort; + +@Slf4j +public class MetricsService { + HttpServer server; + MetricsSystem metricsSystem; + public MetricsService(MetricsSystem metricsSystem) { + this.metricsSystem = metricsSystem; + try { + server = HttpServer.create(new InetSocketAddress(OperatorMetricsPort.getValue()), 0); + } catch (IOException e) { + throw new RuntimeException("Failed to create Metrics Server", e); + } + server.setExecutor(null); + } + + public void start() { + log.info("Metrics Service started"); + List sinks = metricsSystem.getSinks(); + Optional instanceOptional = + sinks.stream().filter(x -> x instanceof PrometheusPullModelSink).findAny(); + instanceOptional.ifPresent(sink -> + server.createContext("/prometheus", (PrometheusPullModelSink) sink)); + server.start(); + } + + public void stop() { + log.info("Metrics Service stopped"); + server.stop(0); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystem.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystem.java new file mode 100644 index 00000000..1cfabf43 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystem.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics; + +import com.codahale.metrics.MetricFilter; +import lombok.Data; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.metrics.source.JVMSource; +import org.apache.spark.metrics.sink.Sink; +import org.apache.spark.metrics.source.Source; +import com.codahale.metrics.MetricRegistry; +import org.apache.spark.util.Utils; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicBoolean; + +@Slf4j +public class MetricsSystem { + private AtomicBoolean running = new AtomicBoolean(false); + @Getter + private List sinks; + @Getter + private List sources; + @Getter + private MetricRegistry registry; + private Properties properties; + private Map sinkPropertiesMap; + + public MetricsSystem() { + this.sinks = new ArrayList<>(); + this.sources = new ArrayList<>(); + this.registry = new MetricRegistry(); + this.properties = new Properties(); + this.sinkPropertiesMap = new HashMap<>(); + } + + public MetricsSystem(Properties properties) { + this.sinks = new ArrayList<>(); + this.sources = new ArrayList<>(); + this.registry = new MetricRegistry(); + this.properties = properties; + this.sinkPropertiesMap = MetricsSystemFactory.parseSinkProperties(this.properties); + } + + public void start() { + if (running.get()) { + throw new IllegalStateException( + "Attempting to start a MetricsSystem that is already running"); + } + running.set(true); + registerSources(); + registerSinks(); + sinks.forEach(Sink::start); + } + + public void stop() { + if (running.get()) { + sinks.forEach(Sink::stop); + registry.removeMatching(MetricFilter.ALL); + } else { + log.error("Stopping a MetricsSystem that is not running"); + } + running.set(false); + } + + public void report() { + sinks.forEach(Sink::report); + } + + public void registerSinks() { + log.info("sinkPropertiesMap: {}", sinkPropertiesMap); + sinkPropertiesMap.values().forEach(sinkProp -> { + Class sink = Utils.classForName(sinkProp.getClassName(), true, false); + Sink sinkInstance; + try { + sinkInstance = sink.getConstructor(Properties.class, MetricRegistry.class) + .newInstance(sinkProp.getProperties(), registry); + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("Fail to create metrics sink for sink name {}, sink properties {}", + sinkProp.getClassName(), sinkProp.getProperties()); + } + throw new RuntimeException("Fail to create metrics sink", e); + } + sinks.add(sinkInstance); + }); + } + + private void registerSources() { + // TO-DO parse the properties to config sources + registerSource(new JVMSource()); + } + + public void registerSource(Source source) { + sources.add(source); + try { + String regName = MetricRegistry.name(source.sourceName()); + registry.register(regName, source.metricRegistry()); + } catch (IllegalArgumentException e) { + log.error("Metrics already registered", e); + } + } + + @Data + public static class SinkProps { + String className; + Properties properties; + + public SinkProps() { + this.className = ""; + this.properties = new Properties(); + } + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactory.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactory.java new file mode 100644 index 00000000..8f24fc33 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactory.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics; + +import org.apache.commons.lang3.StringUtils; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConfManager; + +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.CLASS; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.METRIC_PREFIX; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.SINK; + +public class MetricsSystemFactory { + public static MetricsSystem createMetricsSystem() { + Properties properties = + parseMetricsProperties(SparkOperatorConfManager.INSTANCE.getMetricsProperties()); + return new MetricsSystem(properties); + } + + private static Properties parseMetricsProperties(Properties userProperties) { + Properties properties = new Properties(); + Enumeration valueEnumeration = userProperties.propertyNames(); + while (valueEnumeration.hasMoreElements()) { + String key = (String) valueEnumeration.nextElement(); + if (key.startsWith(METRIC_PREFIX)) { + properties.put(key.substring(METRIC_PREFIX.length()), + userProperties.getProperty(key)); + } + } + return properties; + } + + public static Map parseSinkProperties( + Properties metricsProperties) { + Map propertiesMap = new HashMap<>(); + // e.g: "sink.graphite.class"="org.apache.spark.metrics.sink.GraphiteSink" + Enumeration valueEnumeration = metricsProperties.propertyNames(); + while (valueEnumeration.hasMoreElements()) { + String key = (String) valueEnumeration.nextElement(); + int firstDotIndex = StringUtils.ordinalIndexOf(key, ".", 1); + int secondDotIndex = StringUtils.ordinalIndexOf(key, ".", 2); + if (key.startsWith(SINK)) { + String shortName = key.substring(firstDotIndex + 1, secondDotIndex); + MetricsSystem.SinkProps sinkProps = + propertiesMap.getOrDefault(shortName, new MetricsSystem.SinkProps()); + if (key.endsWith(CLASS)) { + sinkProps.setClassName(metricsProperties.getProperty(key)); + } else { + sinkProps.getProperties().put(key.substring(secondDotIndex + 1), + metricsProperties.getProperty(key)); + } + propertiesMap.put(shortName, sinkProps); + } + } + sinkPropertiesSanityCheck(propertiesMap); + return propertiesMap; + } + + private static void sinkPropertiesSanityCheck( + Map sinkPropsMap) { + for (Map.Entry pair : sinkPropsMap.entrySet()) { + // Each Sink should have mapping class full name + if (StringUtils.isBlank(pair.getValue().className)) { + String errorMessage = String.format( + "%s provides properties, but does not provide full class name", + pair.getKey()); + throw new RuntimeException(errorMessage); + } + // Check the existence of each class full name + try { + Class.forName(pair.getValue().getClassName()); + } catch (ClassNotFoundException e) { + throw new RuntimeException( + String.format("Fail to find class %s", pair.getValue().getClassName()), e); + } + } + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/sink/PrometheusPullModelSink.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/sink/PrometheusPullModelSink.java new file mode 100644 index 00000000..23a18f66 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/sink/PrometheusPullModelSink.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.metrics.sink; + +import com.codahale.metrics.MetricRegistry; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.metrics.sink.PrometheusServlet; + +import javax.servlet.http.HttpServletRequest; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import static org.apache.spark.kubernetes.operator.utils.ProbeUtil.sendMessage; + +@Slf4j +public class PrometheusPullModelSink extends PrometheusServlet implements HttpHandler { + public PrometheusPullModelSink(Properties properties, MetricRegistry registry) { + super(properties, registry); + } + + @Override + public void start() { + log.info("PrometheusPullModelSink started"); + } + + @Override + public void stop() { + log.info("PrometheusPullModelSink stopped"); + } + + @Override + public void report() { + //no-op + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + // https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/metrics/sink/PrometheusServlet.scala#L50 + // Temporary solution since PrometheusServlet.getMetricsSnapshot method does not use + // httpServletRequest at all + HttpServletRequest httpServletRequest = null; + String value = getMetricsSnapshot(httpServletRequest); + // Prometheus will have invalid syntax exception while parsing value equal to "[]", e.g: + // metrics_jvm_threadStates_deadlocks_Number{type="gauges"} [] + // metrics_jvm_threadStates_deadlocks_Value{type="gauges"} [] + String[] records = value.split("\n"); + List filteredRecords = new ArrayList<>(); + for (String record : records) { + String[] keyValuePair = record.split(" "); + if ("[]".equals(keyValuePair[1])) { + log.info("Bug identified strconv.ParseFloat: parsing []"); + continue; + } + filteredRecords.add(record); + } + sendMessage(exchange, 200, String.join("\n", filteredRecords)); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/JVMSource.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/JVMSource.java new file mode 100644 index 00000000..b1d499dd --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/JVMSource.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics.source; + +import com.codahale.metrics.MetricRegistry; +import org.apache.spark.kubernetes.operator.metrics.JVMMetricSet; +import org.apache.spark.metrics.source.Source; + +public class JVMSource implements Source { + + @Override + public String sourceName() { + return "jvm"; + } + + @Override + public MetricRegistry metricRegistry() { + MetricRegistry metricRegistry = new MetricRegistry(); + metricRegistry.registerAll(new JVMMetricSet()); + return metricRegistry; + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptor.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptor.java new file mode 100644 index 00000000..003716d0 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptor.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics.source; + +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; +import com.codahale.metrics.MetricRegistry; +import lombok.extern.slf4j.Slf4j; +import okhttp3.Interceptor; +import okhttp3.Request; +import okhttp3.Response; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.spark.metrics.source.Source; +import org.jetbrains.annotations.NotNull; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; + +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.KubernetesClientMetricsGroupByResponseCodeGroupEnabled; + +@Slf4j +public class KubernetesMetricsInterceptor implements Interceptor, Source { + MetricRegistry metricRegistry; + public static final String NAMESPACES = "namespaces"; + public static final String HTTP_REQUEST_GROUP = "http.request"; + public static final String HTTP_REQUEST_FAILED_GROUP = "failed"; + public static final String HTTP_RESPONSE_GROUP = "http.response"; + public static final String HTTP_RESPONSE_1XX = "1xx"; + public static final String HTTP_RESPONSE_2XX = "2xx"; + public static final String HTTP_RESPONSE_3XX = "3xx"; + public static final String HTTP_RESPONSE_4XX = "4xx"; + public static final String HTTP_RESPONSE_5XX = "5xx"; + private final Histogram responseLatency; + private final Map responseCodeMeters = + new ConcurrentHashMap<>(); + private final Map requestMethodCounter = new ConcurrentHashMap<>(); + private final List responseCodeGroupMeters = new ArrayList<>(5); + private final Meter requestFailedRateMeter; + private final Meter requestRateMeter; + private final Meter responseRateMeter; + private final Map namespacedResourceMethodMeters = new ConcurrentHashMap<>(); + + public KubernetesMetricsInterceptor() { + metricRegistry = new MetricRegistry(); + + responseLatency = metricRegistry.histogram( + MetricRegistry.name(HTTP_RESPONSE_GROUP, "latency", "nanos").toLowerCase()); + requestFailedRateMeter = + metricRegistry.meter(MetricRegistry.name(HTTP_REQUEST_FAILED_GROUP).toLowerCase()); + requestRateMeter = + metricRegistry.meter(MetricRegistry.name(HTTP_REQUEST_GROUP).toLowerCase()); + responseRateMeter = + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_GROUP).toLowerCase()); + + if (KubernetesClientMetricsGroupByResponseCodeGroupEnabled.getValue()) { + responseCodeGroupMeters.add( + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_1XX).toLowerCase())); + responseCodeGroupMeters.add( + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_2XX).toLowerCase())); + responseCodeGroupMeters.add( + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_3XX).toLowerCase())); + responseCodeGroupMeters.add( + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_4XX).toLowerCase())); + responseCodeGroupMeters.add( + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_5XX).toLowerCase())); + } + } + + @NotNull + @Override + public Response intercept(@NotNull Chain chain) throws IOException { + Request request = chain.request(); + updateRequestMetrics(request); + Response response = null; + final long startTime = System.nanoTime(); + try { + response = chain.proceed(request); + return response; + } finally { + updateResponseMetrics(response, startTime); + } + } + + @Override + public String sourceName() { + return "kubernetes.client"; + } + + @Override + public MetricRegistry metricRegistry() { + return this.metricRegistry; + } + + private void updateRequestMetrics(Request request) { + this.requestRateMeter.mark(); + getMeterByRequestMethod(request.method()).mark(); + Optional> resourceNamePairOptional = + parseNamespaceScopedResource(request.url().uri().getPath()); + resourceNamePairOptional.ifPresent(pair -> { + getMeterByRequestMethodAndResourceName( + pair.getValue(), request.method()).mark(); + getMeterByRequestMethodAndResourceName( + pair.getKey() + "." + pair.getValue(), + request.method()).mark(); + } + ); + } + + private void updateResponseMetrics(Response response, long startTimeNanos) { + final long latency = System.nanoTime() - startTimeNanos; + if (response != null) { + this.responseRateMeter.mark(); + this.responseLatency.update(latency); + getMeterByResponseCode(response.code()).mark(); + if (KubernetesClientMetricsGroupByResponseCodeGroupEnabled.getValue()) { + responseCodeGroupMeters.get(response.code() / 100 - 1).mark(); + } + } else { + this.requestFailedRateMeter.mark(); + } + } + + private Meter getMeterByRequestMethod(String method) { + return requestMethodCounter.computeIfAbsent( + method, + key -> + metricRegistry.meter( + MetricRegistry.name(HTTP_REQUEST_GROUP, method).toLowerCase())); + } + + private Meter getMeterByRequestMethodAndResourceName(String resourceName, String method) { + String metricsName = MetricRegistry.name(resourceName, method); + return namespacedResourceMethodMeters.computeIfAbsent( + metricsName, + key -> + metricRegistry.meter(metricsName.toLowerCase())); + } + + private Meter getMeterByResponseCode(int code) { + return responseCodeMeters.computeIfAbsent(code, + key -> metricRegistry.meter( + MetricRegistry.name(HTTP_RESPONSE_GROUP, String.valueOf(code)))); + } + + public Optional> parseNamespaceScopedResource(String path) { + if (path.contains(NAMESPACES)) { + var index = path.indexOf(NAMESPACES) + NAMESPACES.length(); + String namespaceAndResources = path.substring(index + 1); + String[] parts = namespaceAndResources.split("/"); + return Optional.of(Pair.of(parts[0], parts[1])); + } else { + return Optional.empty(); + } + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetrics.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetrics.java new file mode 100644 index 00000000..eafb83aa --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetrics.java @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics.source; + +import com.codahale.metrics.Counter; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.MetricRegistry; +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.javaoperatorsdk.operator.api.monitoring.Metrics; +import io.javaoperatorsdk.operator.api.reconciler.Constants; +import io.javaoperatorsdk.operator.api.reconciler.RetryInfo; +import io.javaoperatorsdk.operator.processing.Controller; +import io.javaoperatorsdk.operator.processing.GroupVersionKind; +import io.javaoperatorsdk.operator.processing.event.Event; +import io.javaoperatorsdk.operator.processing.event.ResourceID; +import io.javaoperatorsdk.operator.processing.event.source.controller.ResourceEvent; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.BaseResource; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.metrics.source.Source; +import org.apache.spark.util.Clock; +import org.apache.spark.util.SystemClock; + +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + +import static io.javaoperatorsdk.operator.api.reconciler.Constants.CONTROLLER_NAME; + +@Slf4j +public class OperatorJosdkMetrics implements Source, Metrics { + public static final String FINISHED = "finished"; + public static final String CLEANUP = "cleanup"; + public static final String FAILED = "failed"; + public static final String RETRIES = "retries"; + private final Map histograms = new ConcurrentHashMap<>(); + private final Map counters = new ConcurrentHashMap<>(); + private final Map gauges = new ConcurrentHashMap<>(); + private static final String RECONCILIATION = "reconciliation"; + private static final String RESOURCE = "resource"; + private static final String EVENT = "event"; + private static final String SUCCESS = "success"; + private static final String FAILURE = "failure"; + private static final String EXCEPTION = "exception"; + private static final String PREFIX = "operator.sdk"; + private static final String RECONCILIATIONS = "reconciliations"; + private static final String RECONCILIATIONS_EXECUTIONS = RECONCILIATIONS + ".executions"; + private static final String RECONCILIATIONS_QUEUE_SIZE = RECONCILIATIONS + ".queue.size"; + private static final String SIZE = "size"; + + private final Clock clock; + private final MetricRegistry metricRegistry; + + public OperatorJosdkMetrics() { + this.clock = new SystemClock(); + this.metricRegistry = new MetricRegistry(); + } + + @Override + public String sourceName() { + return PREFIX; + } + + @Override + public MetricRegistry metricRegistry() { + return metricRegistry; + } + + @Override + public void controllerRegistered(Controller controller) { + // no-op + log.debug("Controller has been registered"); + } + + @Override + public void receivedEvent(Event event, Map metadata) { + log.debug("received event {}, metadata {}", event, metadata); + if (event instanceof ResourceEvent) { + final var action = ((ResourceEvent) event).getAction(); + final var resource = getResourceClass(metadata); + final var namespaceOptional = event.getRelatedCustomResourceID().getNamespace(); + resource.ifPresent(aClass -> getCounter(aClass, action.name().toLowerCase(), RESOURCE, + EVENT).inc()); + if (resource.isPresent() && namespaceOptional.isPresent()) { + getCounter(resource.get(), namespaceOptional.get(), action.name().toLowerCase(), + RESOURCE, EVENT).inc(); + } + } + } + + @Override + public T timeControllerExecution(ControllerExecution execution) throws Exception { + log.debug("Time controller execution"); + final var name = execution.controllerName(); + final var resourceID = execution.resourceID(); + final var namespaceOptional = resourceID.getNamespace(); + final var metadata = execution.metadata(); + final var resourceClass = getResourceClass(metadata); + final var execName = execution.name(); + + long startTime = clock.getTimeMillis(); + try { + T result = execution.execute(); + final var successType = execution.successTypeName(result); + if (resourceClass.isPresent()) { + getHistogram(resourceClass.get(), name, execName, successType).update( + toSeconds(startTime)); + getCounter(resourceClass.get(), name, execName, SUCCESS, successType).inc(); + if (namespaceOptional.isPresent()) { + getHistogram(resourceClass.get(), namespaceOptional.get(), name, execName, + successType).update(toSeconds(startTime)); + getCounter(resourceClass.get(), namespaceOptional.get(), name, execName, + SUCCESS, successType).inc(); + } + } + return result; + } catch (Exception e) { + log.error("Controller execution failed for resource {}, metadata {}", resourceID, + metadata, e); + final var exception = e.getClass().getSimpleName(); + if (resourceClass.isPresent()) { + getHistogram(resourceClass.get(), name, execName, FAILURE).update( + toSeconds(startTime)); + getCounter(resourceClass.get(), name, execName, FAILURE, EXCEPTION, + exception).inc(); + if (namespaceOptional.isPresent()) { + getHistogram(resourceClass.get(), namespaceOptional.get(), name, execName, + FAILURE).update(toSeconds(startTime)); + getCounter(resourceClass.get(), namespaceOptional.get(), name, execName, + FAILURE, EXCEPTION, exception).inc(); + } + } + throw e; + } + } + + @Override + public void reconcileCustomResource(HasMetadata resource, RetryInfo retryInfo, + Map metadata) { + log.debug("Reconcile custom resource {}, with retryInfo {} metadata {}", resource, + retryInfo, metadata); + if (retryInfo != null) { + final var namespace = resource.getMetadata().getNamespace(); + getCounter(resource.getClass(), RECONCILIATION, RETRIES).inc(); + getCounter(resource.getClass(), namespace, RECONCILIATION, RETRIES).inc(); + } + getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_QUEUE_SIZE).inc(); + } + + @Override + public void failedReconciliation(HasMetadata resource, Exception exception, + Map metadata) { + log.error("Failed reconciliation for resource {} with metadata {}", resource, exception, + exception); + getCounter(resource.getClass(), RECONCILIATION, FAILED).inc(); + getCounter(resource.getClass(), resource.getMetadata().getNamespace(), RECONCILIATION, + FAILED).inc(); + } + + @Override + public void finishedReconciliation(HasMetadata resource, Map metadata) { + log.debug("Finished reconciliation for resource {} with metadata {}", resource, metadata); + getCounter(resource.getClass(), RECONCILIATION, FINISHED).inc(); + getCounter(resource.getClass(), resource.getMetadata().getNamespace(), RECONCILIATION, + FINISHED); + } + + @Override + public void cleanupDoneFor(ResourceID resourceID, Map metadata) { + log.debug("Cleanup Done for resource {} with metadata {}", resourceID, metadata); + getCounter(resourceID.getClass(), RECONCILIATION, CLEANUP).inc(); + resourceID.getNamespace().ifPresent( + ns -> getCounter(resourceID.getClass(), ns, RECONCILIATION, CLEANUP).inc()); + } + + @Override + public > T monitorSizeOf(T map, String name) { + log.debug("Monitor size for {}", name); + var gauge = new Gauge<>() { + @Override + public Integer getValue() { + return map.size(); + } + }; + gauges.put(MetricRegistry.name(name, SIZE), gauge); + return map; + } + + @Override + public void reconciliationExecutionStarted(HasMetadata resource, Map metadata) { + log.debug("Reconciliation execution started"); + var namespace = resource.getMetadata().getNamespace(); + getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_EXECUTIONS).inc(); + getCounter(resource.getClass(), namespace, (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_EXECUTIONS).inc(); + } + + @Override + public void reconciliationExecutionFinished(HasMetadata resource, + Map metadata) { + log.debug("Reconciliation execution finished"); + var namespace = resource.getMetadata().getNamespace(); + getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_EXECUTIONS).dec(); + getCounter(resource.getClass(), namespace, (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_EXECUTIONS).dec(); + getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_QUEUE_SIZE).dec(); + } + + private long toSeconds(long startTimeInMilliseconds) { + return TimeUnit.MILLISECONDS.toSeconds(clock.getTimeMillis() - startTimeInMilliseconds); + } + + private Histogram getHistogram(Class kclass, String... names) { + String name = MetricRegistry.name(kclass.getSimpleName(), names).toLowerCase(); + Histogram histogram; + if (!histograms.containsKey(name)) { + histogram = metricRegistry.histogram(name); + histograms.put(name, histogram); + } else { + histogram = histograms.get(name); + } + return histogram; + } + + private Counter getCounter(Class klass, String... names) { + String name = MetricRegistry.name(klass.getSimpleName(), names).toLowerCase(); + Counter counter; + if (!counters.containsKey(name)) { + counter = metricRegistry.counter(name); + counters.put(name, counter); + } else { + counter = counters.get(name); + } + return counter; + } + + private Optional>> getResourceClass( + Map metadata) { + var resourceGvk = (GroupVersionKind) metadata.get(Constants.RESOURCE_GVK_KEY); + + if (resourceGvk == null) { + return Optional.empty(); + } + + Class> resourceClass; + + if (resourceGvk.getKind().equals(SparkApplication.class.getSimpleName())) { + resourceClass = SparkApplication.class; + } else { + return Optional.empty(); + } + return Optional.of(resourceClass); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java new file mode 100644 index 00000000..c5503d81 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.probe; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import io.javaoperatorsdk.operator.Operator; +import io.javaoperatorsdk.operator.RuntimeInfo; +import io.javaoperatorsdk.operator.health.InformerHealthIndicator; +import io.javaoperatorsdk.operator.health.Status; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.health.SentinelManager; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.apache.spark.kubernetes.operator.utils.ProbeUtil.areOperatorsStarted; +import static org.apache.spark.kubernetes.operator.utils.ProbeUtil.sendMessage; + +@Getter +@Slf4j +public class HealthProbe implements HttpHandler { + private final List operators; + private final List> sentinelManagers = new ArrayList<>(); + + public HealthProbe(List operators) { + this.operators = operators; + } + + public void registerSentinelResourceManager(SentinelManager sentinelManager) { + sentinelManagers.add(sentinelManager); + } + + public boolean isHealthy() { + var operatorsAreReady = areOperatorsStarted(operators); + if (operatorsAreReady.isEmpty() || !operatorsAreReady.get()) { + return false; + } + + var runtimeInfosAreHealthy = operators.stream().map(operator -> + checkInformersHealth(operator.getRuntimeInfo()) + ).reduce((a, b) -> a && b); + + if (runtimeInfosAreHealthy.isEmpty() || !runtimeInfosAreHealthy.get()) { + return false; + } + + for (SentinelManager sentinelManager : sentinelManagers) { + if (!sentinelManager.allSentinelsAreHealthy()) { + log.error("One sentinel manager {} reported an unhealthy condition.", + sentinelManager); + return false; + } + } + + return true; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + if (isHealthy()) { + sendMessage(exchange, 200, "healthy"); + } else { + sendMessage(exchange, 500, "unhealthy"); + } + } + + private boolean checkInformersHealth(RuntimeInfo operatorRuntimeInfo) { + log.info("Checking informer health"); + List informersHealthList = new ArrayList<>(); + for (var controllerEntry : + operatorRuntimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator() + .entrySet()) { + for (var eventSourceEntry : controllerEntry.getValue().entrySet()) { + Map informers = + eventSourceEntry.getValue().informerHealthIndicators(); + for (var informerEntry : informers.entrySet()) { + if (informerEntry.getValue().getStatus() == Status.HEALTHY) { + informersHealthList.add(true); + } else { + if (log.isErrorEnabled()) { + log.error( + "Controller: {}, Event Source: {}, Informer: {} is not in a " + + "healthy state", + controllerEntry.getKey(), eventSourceEntry.getKey(), + informerEntry.getKey()); + } + informersHealthList.add(false); + } + } + } + } + return informersHealthList.stream().reduce((a, b) -> a && b).orElse(true); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java new file mode 100644 index 00000000..30cd980d --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.probe; + +import com.sun.net.httpserver.HttpServer; +import io.javaoperatorsdk.operator.Operator; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.health.SentinelManager; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.List; + +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorProbePort; + +@Slf4j +public class ProbeService { + public static final String HEALTHZ = "/healthz"; + public static final String STARTUP = "/startup"; + HttpServer server; + + public ProbeService(List operators, SentinelManager sentinelManager) { + HealthProbe healthProbe = new HealthProbe(operators); + healthProbe.registerSentinelResourceManager(sentinelManager); + try { + server = HttpServer.create(new InetSocketAddress(OperatorProbePort.getValue()), 0); + } catch (IOException e) { + throw new RuntimeException("Failed to create Probe Service Server", e); + } + server.createContext(STARTUP, new ReadinessProbe(operators)); + server.createContext(HEALTHZ, healthProbe); + server.setExecutor(null); + } + + public void start() { + log.info("Probe service started"); + server.start(); + } + + public void stop() { + log.info("Probe service stopped"); + server.stop(0); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbe.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbe.java new file mode 100644 index 00000000..bd810a2f --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbe.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.probe; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import io.javaoperatorsdk.operator.Operator; +import lombok.extern.slf4j.Slf4j; + +import java.io.IOException; +import java.util.List; + +import static org.apache.spark.kubernetes.operator.utils.ProbeUtil.areOperatorsStarted; +import static org.apache.spark.kubernetes.operator.utils.ProbeUtil.sendMessage; + +@Slf4j +public class ReadinessProbe implements HttpHandler { + private final List operators; + + public ReadinessProbe(List operators) { + this.operators = operators; + } + + @Override + public void handle(HttpExchange httpExchange) throws IOException { + var operatorsAreReady = areOperatorsStarted(operators); + if (operatorsAreReady.isEmpty() || !operatorsAreReady.get()) { + sendMessage(httpExchange, 400, "spark operators are not ready yet"); + } + + if (!passRbacCheck()) { + sendMessage(httpExchange, 403, "required rbac test failed, operators are not ready"); + } + + sendMessage(httpExchange, 200, "started"); + } + + public boolean passRbacCheck() { + return true; + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java new file mode 100644 index 00000000..7fa9df37 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler; + +import lombok.Data; + +import java.time.Duration; + +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.AppReconcileIntervalSeconds; + +/** + * Represents the progress of a reconcile request + * - completed : is set to true if there's no more actions expected in the same reconciliation + * - requeue : describes whether the mentioned resource need to be reconciled again - and if so, + * the frequency + */ +@Data +public class ReconcileProgress { + private boolean completed; + boolean requeue; + private Duration requeueAfterDuration; + + private ReconcileProgress(boolean completed, boolean requeue, Duration requeueAfterDuration) { + this.completed = completed; + this.requeue = requeue; + this.requeueAfterDuration = requeueAfterDuration; + } + + public static ReconcileProgress proceed() { + return new ReconcileProgress(false, true, + Duration.ofSeconds(AppReconcileIntervalSeconds.getValue())); + } + + public static ReconcileProgress completeAndDefaultRequeue() { + return new ReconcileProgress(true, true, + Duration.ofSeconds(AppReconcileIntervalSeconds.getValue())); + } + + public static ReconcileProgress completeAndRequeueAfter(Duration requeueAfterDuration) { + return new ReconcileProgress(true, true, requeueAfterDuration); + } + + public static ReconcileProgress completeAndImmediateRequeue() { + return new ReconcileProgress(true, true, Duration.ZERO); + } + + public static ReconcileProgress completeAndNoRequeue() { + return new ReconcileProgress(true, false, Duration.ZERO); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java new file mode 100644 index 00000000..7a927cb6 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler; + +import io.fabric8.kubernetes.api.model.PodTemplateSpec; +import io.fabric8.kubernetes.client.KubernetesClient; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.ApplicationClientWorker; +import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.decorators.DriverDecorator; +import org.apache.spark.kubernetes.operator.utils.ModelUtils; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.time.Instant; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +import static org.apache.spark.kubernetes.operator.utils.ModelUtils.DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY; +import static org.apache.spark.kubernetes.operator.utils.ModelUtils.EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY; +import static org.apache.spark.kubernetes.operator.utils.ModelUtils.overrideDriverTemplate; +import static org.apache.spark.kubernetes.operator.utils.ModelUtils.overrideExecutorTemplate; + +@Slf4j +public class SparkApplicationReconcileUtils { + public static boolean enableForceDelete(SparkApplication app) { + long timeoutThreshold = app.getSpec().getApplicationTolerations() + .getApplicationTimeoutConfig().getForceTerminationGracePeriodMillis(); + Instant lastTransitionTime = + Instant.parse(app.getStatus().getCurrentState().getLastTransitionTime()); + return lastTransitionTime.plusMillis(timeoutThreshold).isBefore(Instant.now()); + } + + public static ApplicationResourceSpec buildResourceSpec(final SparkApplication app, + final KubernetesClient client) { + Map confOverrides = overrideMetadataForSecondaryResources(app); + ApplicationResourceSpec resourceSpec = + ApplicationClientWorker.getResourceSpec(app, client, confOverrides); + cleanUpTempResourcesForApp(app, confOverrides); + DriverDecorator decorator = new DriverDecorator(app); + decorator.decorate(resourceSpec.getConfiguredPod()); + return resourceSpec; + } + + private static Map overrideMetadataForSecondaryResources( + final SparkApplication app) { + Map confOverrides = new HashMap<>(); + SparkReconcilerUtils.sparkAppResourceLabels(app).forEach((k, v) -> { + confOverrides.put("spark.kubernetes.driver.label." + k, v); + confOverrides.put("spark.kubernetes.driver.service.label." + k, v); + confOverrides.put("spark.kubernetes.executor.label." + k, v); + }); + confOverrides.put("spark.kubernetes.namespace", app.getMetadata().getNamespace()); + if (app.getSpec().getSparkConf().containsKey("spark.app.name")) { + confOverrides.put("spark.app.name", app.getMetadata().getName()); + } + // FIXME: avoid this file flushing + confOverrides.putAll(getOrCreateLocalFileForDriverSpec(app, confOverrides)); + confOverrides.putAll(getOrCreateLocalFileForExecutorSpec(app, confOverrides)); + return confOverrides; + } + + private static void cleanUpTempResourcesForApp(final SparkApplication app, + Map confOverrides) { + if (overrideDriverTemplate(app.getSpec())) { + deleteLocalFileFromPathKey(confOverrides, DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY); + } + if (overrideExecutorTemplate(app.getSpec())) { + deleteLocalFileFromPathKey(confOverrides, EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY); + } + } + + private static Optional getLocalFileFromPathKey(Map confOverrides, + String pathKey) { + if (confOverrides.containsKey(pathKey)) { + String filePath = confOverrides.get(pathKey); + if (filePath.startsWith("local") || filePath.startsWith("file") || + filePath.startsWith("/")) { + return Optional.of(new File(filePath)); + } + } + return Optional.empty(); + } + + private static void deleteLocalFileFromPathKey(Map confOverrides, + String pathKey) { + Optional localFile = Optional.empty(); + boolean deleted = false; + try { + localFile = getLocalFileFromPathKey(confOverrides, pathKey); + if (localFile.isPresent() && localFile.get().exists() && localFile.get().isFile()) { + deleted = localFile.get().delete(); + } else { + log.warn("Local temp file not found at {}", pathKey); + } + } catch (Throwable t) { + log.error("Failed to delete temp file. Attempting delete upon exit.", t); + } finally { + if (!deleted && localFile.isPresent() && localFile.get().exists()) { + localFile.get().deleteOnExit(); + } + } + } + + private static Map getOrCreateLocalFileForDriverSpec( + final SparkApplication app, + final Map confOverrides) { + if (overrideDriverTemplate(app.getSpec())) { + Optional localFile = + getLocalFileFromPathKey(confOverrides, DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY); + if (localFile.isEmpty() || !localFile.get().exists() || !localFile.get().isFile()) { + String filePath = createLocalFileForPodTemplateSpec( + app.getSpec().getDriverSpec().getPodTemplateSpec(), + app.getMetadata().getUid() + "-driver-"); + return Collections.singletonMap(DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY, filePath); + } + } + return Collections.emptyMap(); + } + + private static Map getOrCreateLocalFileForExecutorSpec( + final SparkApplication app, + final Map confOverrides) { + if (overrideExecutorTemplate(app.getSpec())) { + Optional localFile = + getLocalFileFromPathKey(confOverrides, EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY); + if (localFile.isEmpty() || !localFile.get().exists() || !localFile.get().isFile()) { + String filePath = createLocalFileForPodTemplateSpec( + app.getSpec().getExecutorSpec().getPodTemplateSpec(), + app.getMetadata().getUid() + "-executor-"); + return Collections.singletonMap(EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY, filePath); + } + } + return Collections.emptyMap(); + } + + /** + * Flush driver pod template spec to a local file + * + * @return temp file path + */ + private static String createLocalFileForPodTemplateSpec(final PodTemplateSpec podTemplateSpec, + final String tempFilePrefix) { + try { + File tmpFile = File.createTempFile(tempFilePrefix, ".json"); + FileOutputStream fileStream = new FileOutputStream(tmpFile); + OutputStreamWriter writer = new OutputStreamWriter(fileStream, "UTF-8"); + writer.write( + ModelUtils.asJsonString(ModelUtils.getPodFromTemplateSpec(podTemplateSpec))); + writer.close(); + String path = tmpFile.getAbsolutePath(); + if (log.isDebugEnabled()) { + log.debug("Temp file wrote to {}", tmpFile.getAbsolutePath()); + } + return path; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java new file mode 100644 index 00000000..3e7193ec --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler; + +import io.fabric8.kubernetes.api.model.Pod; +import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration; +import io.javaoperatorsdk.operator.api.reconciler.Cleaner; +import io.javaoperatorsdk.operator.api.reconciler.Context; +import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration; +import io.javaoperatorsdk.operator.api.reconciler.DeleteControl; +import io.javaoperatorsdk.operator.api.reconciler.ErrorStatusHandler; +import io.javaoperatorsdk.operator.api.reconciler.ErrorStatusUpdateControl; +import io.javaoperatorsdk.operator.api.reconciler.EventSourceContext; +import io.javaoperatorsdk.operator.api.reconciler.EventSourceInitializer; +import io.javaoperatorsdk.operator.api.reconciler.Reconciler; +import io.javaoperatorsdk.operator.api.reconciler.UpdateControl; +import io.javaoperatorsdk.operator.processing.event.source.EventSource; +import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource; +import io.javaoperatorsdk.operator.processing.event.source.informer.Mappers; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.Constants; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.health.SentinelManager; +import org.apache.spark.kubernetes.operator.reconciler.observers.AppDriverReadyObserver; +import org.apache.spark.kubernetes.operator.reconciler.observers.AppDriverRunningObserver; +import org.apache.spark.kubernetes.operator.reconciler.observers.AppDriverStartObserver; +import org.apache.spark.kubernetes.operator.reconciler.observers.AppDriverTimeoutObserver; +import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppCleanUpStep; +import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppResourceObserveStep; +import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppInitStep; +import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppReconcileStep; +import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppRunningStep; +import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppTerminatedStep; +import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppValidateStep; +import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.UnknownStateStep; +import org.apache.spark.kubernetes.operator.utils.ApplicationStatusUtils; +import org.apache.spark.kubernetes.operator.utils.LoggingUtils; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndDefaultRequeue; +import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.commonResourceLabelsStr; + +/** + * Reconciler for Spark Application. + * Performs sanity check on the app, identify the reconcile steps based on App status + * and execute the steps. + */ +@ControllerConfiguration +@Slf4j +@RequiredArgsConstructor +public class SparkApplicationReconciler + implements Reconciler, + ErrorStatusHandler, + EventSourceInitializer, + Cleaner { + private final StatusRecorder statusRecorder; + private final SentinelManager sentinelManager; + + @Override + public UpdateControl reconcile(SparkApplication sparkApplication, + Context context) + throws Exception { + LoggingUtils.TrackedMDC trackedMDC = new LoggingUtils.TrackedMDC(); + try { + trackedMDC.set(sparkApplication); + if (sentinelManager.handleSentinelResourceReconciliation(sparkApplication, + context.getClient())) { + return UpdateControl.noUpdate(); + } + log.debug("Start reconciliation."); + statusRecorder.updateStatusFromCache(sparkApplication); + SparkApplicationContext ctx = new SparkApplicationContext(sparkApplication, context); + List reconcileSteps = getReconcileSteps(sparkApplication); + for (AppReconcileStep step : reconcileSteps) { + ReconcileProgress progress = step.reconcile(ctx, statusRecorder); + if (progress.isCompleted()) { + return SparkReconcilerUtils.toUpdateControl(sparkApplication, progress); + } + } + return SparkReconcilerUtils.toUpdateControl(sparkApplication, + completeAndDefaultRequeue()); + + } finally { + log.debug("Reconciliation completed."); + trackedMDC.reset(); + } + } + + @Override + public ErrorStatusUpdateControl updateErrorStatus( + SparkApplication sparkApplication, + Context context, + Exception e) { + LoggingUtils.TrackedMDC trackedMDC = new LoggingUtils.TrackedMDC(); + try { + trackedMDC.set(sparkApplication); + context.getRetryInfo().ifPresent(retryInfo -> { + if (log.isErrorEnabled()) { + log.error("Failed attempt: {}, last attempt: {}", retryInfo.getAttemptCount(), + retryInfo.isLastAttempt()); + } + }); + return ErrorStatusUpdateControl.noStatusUpdate(); + } finally { + trackedMDC.reset(); + } + } + + @Override + public Map prepareEventSources( + EventSourceContext context) { + var podEventSource = + new InformerEventSource<>(InformerConfiguration.from(Pod.class, context) + .withSecondaryToPrimaryMapper( + Mappers.fromLabel(Constants.LABEL_SPARK_APPLICATION_NAME)) + .withLabelSelector(commonResourceLabelsStr()) + .build(), context); + return EventSourceInitializer.nameEventSources(podEventSource); + } + + protected List getReconcileSteps(final SparkApplication app) { + List steps = new ArrayList<>(); + steps.add(new AppValidateStep()); + steps.add(new AppTerminatedStep()); + switch (app.getStatus().getCurrentState().getCurrentStateSummary()) { + case SUBMITTED: + case SCHEDULED_TO_RESTART: + steps.add(new AppInitStep()); + break; + case DRIVER_REQUESTED: + case DRIVER_STARTED: + steps.add(new AppResourceObserveStep( + List.of(new AppDriverStartObserver(), new AppDriverReadyObserver()))); + steps.add(new AppResourceObserveStep( + Collections.singletonList(new AppDriverRunningObserver()))); + steps.add(new AppResourceObserveStep( + Collections.singletonList(new AppDriverTimeoutObserver()))); + break; + case DRIVER_READY: + case INITIALIZED_BELOW_THRESHOLD_EXECUTORS: + case RUNNING_HEALTHY: + case RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS: + steps.add(new AppRunningStep()); + steps.add(new AppResourceObserveStep( + Collections.singletonList(new AppDriverRunningObserver()))); + steps.add(new AppResourceObserveStep( + Collections.singletonList(new AppDriverTimeoutObserver()))); + break; + case SPARK_SESSION_INITIALIZATION_TIMED_OUT: + case DRIVER_LAUNCH_TIMED_OUT: + case EXECUTORS_LAUNCH_TIMED_OUT: + case SUCCEEDED: + case DRIVER_EVICTED: + case FAILED: + case SCHEDULING_FAILURE: + steps.add(new AppCleanUpStep()); + break; + default: + steps.add(new UnknownStateStep()); + break; + } + return steps; + } + + /** + * Best-effort graceful termination upon delete. + * @param sparkApplication the resource that is marked for deletion + * @param context the context with which the operation is executed + * @return DeleteControl, with requeue if needed + */ + @Override + public DeleteControl cleanup(SparkApplication sparkApplication, + Context context) { + LoggingUtils.TrackedMDC trackedMDC = new LoggingUtils.TrackedMDC(); + DeleteControl deleteControl = DeleteControl.defaultDelete(); + try { + trackedMDC.set(sparkApplication); + log.info("Cleaning up resources for SparkApp."); + SparkApplicationContext ctx = new SparkApplicationContext(sparkApplication, context); + List cleanupSteps = new ArrayList<>(); + cleanupSteps.add(new AppValidateStep()); + cleanupSteps.add(new AppTerminatedStep()); + cleanupSteps.add(new AppCleanUpStep(ApplicationStatusUtils::appCancelled)); + for (AppReconcileStep step : cleanupSteps) { + ReconcileProgress progress = step.reconcile(ctx, statusRecorder); + if (progress.isCompleted()) { + if (progress.isRequeue()) { + return DeleteControl.noFinalizerRemoval().rescheduleAfter( + progress.getRequeueAfterDuration()); + } else { + break; + } + } + } + } finally { + log.info("Cleanup completed"); + trackedMDC.reset(); + } + statusRecorder.removeCachedStatus(sparkApplication); + return deleteControl; + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkReconcilerUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkReconcilerUtils.java new file mode 100644 index 00000000..83bc051d --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkReconcilerUtils.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.fabric8.kubernetes.api.model.DeletionPropagation; +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientException; +import io.javaoperatorsdk.operator.api.reconciler.DeleteControl; +import io.javaoperatorsdk.operator.api.reconciler.UpdateControl; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.apache.spark.kubernetes.operator.BaseResource; +import org.apache.spark.kubernetes.operator.Constants; +import org.apache.spark.kubernetes.operator.SparkApplication; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.apache.spark.kubernetes.operator.Constants.LABEL_RESOURCE_NAME; +import static org.apache.spark.kubernetes.operator.Constants.LABEL_SPARK_OPERATOR_NAME; +import static org.apache.spark.kubernetes.operator.Constants.LABEL_SPARK_ROLE_DRIVER_VALUE; +import static org.apache.spark.kubernetes.operator.Constants.LABEL_SPARK_ROLE_EXECUTOR_VALUE; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.DriverCreateMaxAttempts; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.ForegroundRequestTimeoutSeconds; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorAppName; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorWatchedNamespaces; +import static org.apache.spark.kubernetes.operator.utils.ModelUtils.buildOwnerReferenceTo; +import static org.apache.spark.kubernetes.operator.utils.SparkExceptionUtils.isConflictForExistingResource; + +@Slf4j +public class SparkReconcilerUtils { + + private static final ObjectMapper objectMapper = new ObjectMapper(); + + public static Map commonOperatorResourceLabels() { + Map labels = new HashMap<>(); + labels.put(LABEL_RESOURCE_NAME, OperatorAppName.getValue()); + return labels; + } + + public static Map defaultOperatorConfigLabels() { + Map labels = new HashMap<>(commonOperatorResourceLabels()); + labels.put("app.kubernetes.io/component", "operator-dynamic-config-overrides"); + return labels; + } + + public static Map commonManagedResourceLabels() { + Map labels = new HashMap<>(); + labels.put(LABEL_SPARK_OPERATOR_NAME, OperatorAppName.getValue()); + return labels; + } + + public static Map sparkAppResourceLabels(final SparkApplication app) { + return sparkAppResourceLabels(app.getMetadata().getName()); + } + + public static Map sparkAppResourceLabels(final String appName) { + Map labels = commonManagedResourceLabels(); + labels.put(Constants.LABEL_SPARK_APPLICATION_NAME, appName); + return labels; + } + + public static Map driverLabels(final SparkApplication sparkApplication) { + Map labels = sparkAppResourceLabels(sparkApplication); + labels.put(Constants.LABEL_SPARK_ROLE_NAME, LABEL_SPARK_ROLE_DRIVER_VALUE); + return labels; + } + + public static Map executorLabels(final SparkApplication sparkApplication) { + Map labels = sparkAppResourceLabels(sparkApplication); + labels.put(Constants.LABEL_SPARK_ROLE_NAME, LABEL_SPARK_ROLE_EXECUTOR_VALUE); + return labels; + } + + public static Set getWatchedNamespaces() { + String namespaces = OperatorWatchedNamespaces.getValue(); + if (StringUtils.isNotEmpty(namespaces)) { + return Arrays.stream(namespaces.split(",")).map(String::trim) + .collect(Collectors.toSet()); + } + return Collections.emptySet(); + } + + /** + * Labels to be applied to all created resources, as a comma-separated string + * + * @return labels string + */ + public static String commonResourceLabelsStr() { + return labelsAsStr(commonManagedResourceLabels()); + } + + public static String labelsAsStr(Map labels) { + return labels + .entrySet() + .stream() + .map(e -> String.join("=", e.getKey(), e.getValue())) + .collect(Collectors.joining(",")); + } + + public static > UpdateControl toUpdateControl( + O resource, ReconcileProgress reconcileProgress) { + // reconciler already handled resource and status update, skip update at lower level + UpdateControl updateControl = UpdateControl.noUpdate(); + if (reconcileProgress.isRequeue()) { + return updateControl.rescheduleAfter(reconcileProgress.getRequeueAfterDuration()); + } else { + return updateControl; + } + } + + public static > DeleteControl toDeleteControl( + O resource, ReconcileProgress reconcileProgress) { + if (reconcileProgress.isRequeue()) { + return DeleteControl.noFinalizerRemoval().rescheduleAfter( + reconcileProgress.getRequeueAfterDuration()); + } else { + return DeleteControl.defaultDelete(); + } + } + + public static Optional getOrCreateSecondaryResource( + final KubernetesClient client, + final T resource) { + Optional current = getResource(client, resource); + if (current.isEmpty()) { + // Adding retry logic to overcome a k8s bug: + // https://github.com/kubernetes/kubernetes/issues/67761 + long maxAttempts = DriverCreateMaxAttempts.getValue(); + long attemptCount = 1; + while (true) { + try { + current = Optional.ofNullable(client.resource(resource).create()); + break; + } catch (KubernetesClientException e) { + if (log.isErrorEnabled()) { + log.error( + "Failed to request resource with responseCode={} " + + "attemptCount={}/{}", + e.getCode(), attemptCount, maxAttempts); + } + // retry only on 409 Conflict + if (e.getCode() != 409) { + throw e; + } else { + if (isConflictForExistingResource(e)) { + current = getResource(client, resource); + if (current.isPresent()) { + return current; + } + } + if (++attemptCount > maxAttempts) { + log.error("Max Retries exceeded while trying to create resource"); + throw e; + } + } + } + } + } + return current; + } + + public static void addOwnerReferenceSecondaryResource(final KubernetesClient client, + final List resources, + final HasMetadata owner) { + + resources.forEach(r -> { + ObjectMeta metaData = new ObjectMetaBuilder(r.getMetadata()) + .addToOwnerReferences(buildOwnerReferenceTo(owner)) + .build(); + r.setMetadata(metaData); + }); + client.resourceList(resources).forceConflicts().serverSideApply(); + } + + public static Optional getResource(final KubernetesClient client, + final T desired) { + T resource = null; + try { + resource = client.resource(desired).get(); + } catch (KubernetesClientException e) { + if (e.getCode() == 404) { + return Optional.empty(); + } + } + return Optional.ofNullable(resource); + } + + public static void deleteResourceIfExists(final KubernetesClient client, + final T resource, + boolean forceDelete) { + try { + if (forceDelete) { + client.resource(resource) + .withGracePeriod(0L) + .delete(); + } else { + client.resource(resource) + .withPropagationPolicy(DeletionPropagation.FOREGROUND) + .withTimeout(ForegroundRequestTimeoutSeconds.getValue(), TimeUnit.SECONDS) + .delete(); + } + } catch (KubernetesClientException e) { + if (e.getCode() != 404) { + throw e; + } else { + log.info("Pod to delete does not exist, proceeding..."); + } + } + } + + public static T clone(T object) { + if (object == null) { + return null; + } + try { + return (T) + objectMapper.readValue( + objectMapper.writeValueAsString(object), object.getClass()); + } catch (JsonProcessingException e) { + throw new IllegalStateException(e); + } + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverReadyObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverReadyObserver.java new file mode 100644 index 00000000..a443d104 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverReadyObserver.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.observers; + +import io.fabric8.kubernetes.api.model.Pod; +import org.apache.spark.kubernetes.operator.Constants; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.utils.PodUtils; + +import java.util.Optional; + +/** + * Observes whether driver is ready + */ +public class AppDriverReadyObserver extends BaseAppDriverObserver { + @Override + public Optional observe(Pod driver, + ApplicationSpec spec, + ApplicationStatus currentStatus) { + if (ApplicationStateSummary.DRIVER_READY.ordinal() + <= currentStatus.getCurrentState().getCurrentStateSummary().ordinal()) { + return Optional.empty(); + } + if (PodUtils.isPodReady(driver)) { + return Optional.of(new ApplicationState(ApplicationStateSummary.DRIVER_READY, + Constants.DriverReady)); + } + return observeDriverTermination(driver, true, spec); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverRunningObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverRunningObserver.java new file mode 100644 index 00000000..02f6e0ad --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverRunningObserver.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.observers; + +import io.fabric8.kubernetes.api.model.Pod; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; + +import java.util.Optional; + +/** + * Observes whether driver reaches running state (in other words, whether its at least scheduled) + */ +public class AppDriverRunningObserver extends BaseAppDriverObserver { + @Override + public Optional observe(Pod driver, + ApplicationSpec spec, + ApplicationStatus currentStatus) { + return observeDriverTermination(driver, true, spec); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverStartObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverStartObserver.java new file mode 100644 index 00000000..839adaca --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverStartObserver.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.observers; + +import io.fabric8.kubernetes.api.model.Pod; +import org.apache.spark.kubernetes.operator.Constants; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.utils.PodUtils; + +import java.util.Optional; + +public class AppDriverStartObserver extends BaseAppDriverObserver { + @Override + public Optional observe(Pod driver, + ApplicationSpec spec, + ApplicationStatus currentStatus) { + if (ApplicationStateSummary.DRIVER_STARTED.ordinal() + <= currentStatus.getCurrentState().getCurrentStateSummary().ordinal()) { + return Optional.empty(); + } + if (PodUtils.isPodStarted(driver, spec)) { + return Optional.of(new ApplicationState(ApplicationStateSummary.DRIVER_STARTED, + Constants.DriverRunning)); + } + return observeDriverTermination(driver, false, spec); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java new file mode 100644 index 00000000..34a0383c --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.observers; + +import io.fabric8.kubernetes.api.model.Pod; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; +import org.apache.spark.kubernetes.operator.spec.ApplicationTimeoutConfig; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.utils.ApplicationStatusUtils; + +import java.time.Instant; +import java.util.Optional; +import java.util.function.Supplier; + +/** + * Observes driver status and time-out as configured in app spec + */ +public class AppDriverTimeoutObserver extends BaseAppDriverObserver { + + /** + * Operator may proactively terminate application if it has stay in certain state for a while. + * This helps to avoid resource deadlock when app cannot proceed. + * Such states include + * - DRIVER_REQUESTED -> goes to DRIVER_LAUNCH_TIMED_OUT if driver pod cannot be scheduled or + * cannot start running + * - DRIVER_STARTED -> goes to SPARK_SESSION_INITIALIZATION_TIMED_OUT if Spark session cannot + * be initialized + * - DRIVER_READY / EXECUTOR_REQUESTED / EXECUTOR_SCHEDULED / + * INITIALIZED_BELOW_THRESHOLD_EXECUTORS + * -> go to EXECUTORS_LAUNCH_TIMED_OUT if app cannot acquire at least minimal executors in + * given time + * Operator will NOT proactively stop the app if it has acquired enough executors and later + * lose them. User may build additional layers to alert and act on such scenario. + * Timeout check would be performed at the end of reconcile - and it would be performed only + * if there's no other updates to be performed in the same reconcile action. + */ + @Override + public Optional observe(Pod driver, + ApplicationSpec spec, + ApplicationStatus currentStatus) { + Instant lastTransitionTime = + Instant.parse(currentStatus.getCurrentState().getLastTransitionTime()); + long timeoutThreshold; + Supplier supplier; + ApplicationTimeoutConfig timeoutConfig = + spec.getApplicationTolerations().getApplicationTimeoutConfig(); + switch (currentStatus.getCurrentState().getCurrentStateSummary()) { + case DRIVER_REQUESTED: + timeoutThreshold = timeoutConfig.getDriverStartTimeoutMillis(); + supplier = ApplicationStatusUtils::driverLaunchTimedOut; + break; + case DRIVER_STARTED: + timeoutThreshold = timeoutConfig.getSparkSessionStartTimeoutMillis(); + supplier = ApplicationStatusUtils::driverReadyTimedOut; + break; + case DRIVER_READY: + case INITIALIZED_BELOW_THRESHOLD_EXECUTORS: + timeoutThreshold = timeoutConfig.getExecutorStartTimeoutMillis(); + supplier = ApplicationStatusUtils::executorLaunchTimedOut; + break; + default: + // No timeout check needed for other states + return Optional.empty(); + } + if (timeoutThreshold > 0L && + lastTransitionTime.plusMillis(timeoutThreshold).isBefore(Instant.now())) { + ApplicationState state = supplier.get(); + state.setLastObservedDriverStatus(driver.getStatus()); + return Optional.of(state); + } + return Optional.empty(); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseAppDriverObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseAppDriverObserver.java new file mode 100644 index 00000000..21d1059d --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseAppDriverObserver.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.observers; + +import io.fabric8.kubernetes.api.model.ContainerStatus; +import io.fabric8.kubernetes.api.model.Pod; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; +import org.apache.spark.kubernetes.operator.status.ApplicationAttemptSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.utils.PodPhase; +import org.apache.spark.kubernetes.operator.utils.PodUtils; + +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +import static org.apache.spark.kubernetes.operator.Constants.DriverCompletedMessage; +import static org.apache.spark.kubernetes.operator.Constants.DriverFailedInitContainersMessage; +import static org.apache.spark.kubernetes.operator.Constants.DriverFailedMessage; +import static org.apache.spark.kubernetes.operator.Constants.DriverRestartedMessage; +import static org.apache.spark.kubernetes.operator.Constants.DriverSucceededMessage; +import static org.apache.spark.kubernetes.operator.Constants.DriverTerminatedBeforeInitializationMessage; +import static org.apache.spark.kubernetes.operator.utils.ModelUtils.isDriverMainContainer; + +/** + * Observes driver pod status and update Application status as needed + */ +@Slf4j +public abstract class BaseAppDriverObserver extends + BaseSecondaryResourceObserver { + + /** + * Check whether the driver pod (and thus the application) has actually terminated + * This would be determined by status of containers - and only by the containers with name + * matches given filter + * e.g. you can use "s -> 'true'" to evaluate all containers + * Driver is considered as 'failed', if any init container failed, or any of the matched + * container(s) has + * 1. failed (isTerminated, non-zero) + * 2. restarted + * 3. (corner case) exited 0 without SparkContext / SparkSession initialization + * Driver is considered as 'succeeded', if + * 1. The pod is succeeded phase, or + * 2. The container(s) has exited 0 after SparkContext / SparkSession initialization + * + * @param driverPod the driverPod + * @param driverReady whether SparkContext / SparkSession has ever been initialized for this + * pod + * @return the ApplicationState to be updated if pod is terminated. Returning empty if pod + * is still running + */ + protected Optional observeDriverTermination(final Pod driverPod, + final boolean driverReady, + final ApplicationSpec spec) { + if (driverPod.getStatus() == null + || driverPod.getStatus().getContainerStatuses() == null + || driverPod.getStatus().getContainerStatuses().isEmpty()) { + log.warn("Cannot determine driver pod status, the pod may in pending state."); + return Optional.empty(); + } + + if (PodPhase.FAILED.equals(PodPhase.getPhase(driverPod))) { + ApplicationState applicationState = new ApplicationState(ApplicationStateSummary.FAILED, + DriverFailedMessage); + if ("Evicted".equalsIgnoreCase(driverPod.getStatus().getReason())) { + applicationState = new ApplicationState(ApplicationStateSummary.DRIVER_EVICTED, + DriverFailedMessage); + } + applicationState.setLastObservedDriverStatus(driverPod.getStatus()); + return Optional.of(applicationState); + } + + if (PodPhase.SUCCEEDED.equals(PodPhase.getPhase(driverPod))) { + ApplicationState state; + if (driverReady) { + state = new ApplicationState(ApplicationStateSummary.SUCCEEDED, + DriverCompletedMessage); + } else { + state = new ApplicationState(ApplicationStateSummary.FAILED, + DriverTerminatedBeforeInitializationMessage); + state.setLastObservedDriverStatus(driverPod.getStatus()); + } + return Optional.of(state); + } + + List initContainerStatusList = + driverPod.getStatus().getInitContainerStatuses(); + if (initContainerStatusList != null + && initContainerStatusList.parallelStream().anyMatch(PodUtils::isContainerFailed)) { + ApplicationState applicationState = new ApplicationState(ApplicationStateSummary.FAILED, + DriverFailedInitContainersMessage); + applicationState.setLastObservedDriverStatus(driverPod.getStatus()); + return Optional.of(applicationState); + } + List containerStatusList = driverPod.getStatus().getContainerStatuses(); + List terminatedCriticalContainers = containerStatusList.parallelStream() + .filter(c -> isDriverMainContainer(spec, c.getName())) + .filter(PodUtils::isContainerExited) + .collect(Collectors.toList()); + if (!terminatedCriticalContainers.isEmpty()) { + ApplicationState applicationState; + if (terminatedCriticalContainers.parallelStream() + .anyMatch(PodUtils::isContainerFailed)) { + applicationState = + new ApplicationState(ApplicationStateSummary.FAILED, DriverFailedMessage); + } else { + applicationState = new ApplicationState(ApplicationStateSummary.SUCCEEDED, + DriverSucceededMessage); + } + applicationState.setLastObservedDriverStatus(driverPod.getStatus()); + return Optional.of(applicationState); + } + if (containerStatusList.parallelStream() + .filter(c -> isDriverMainContainer(spec, c.getName())) + .anyMatch(PodUtils::isContainerRestarted)) { + ApplicationState state = + new ApplicationState(ApplicationStateSummary.FAILED, DriverRestartedMessage); + state.setLastObservedDriverStatus(driverPod.getStatus()); + return Optional.of(state); + } + return Optional.empty(); + } + +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseSecondaryResourceObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseSecondaryResourceObserver.java new file mode 100644 index 00000000..991fe4f7 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseSecondaryResourceObserver.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.observers; + +import io.fabric8.kubernetes.api.model.HasMetadata; +import org.apache.spark.kubernetes.operator.spec.BaseSpec; +import org.apache.spark.kubernetes.operator.status.BaseAttemptSummary; +import org.apache.spark.kubernetes.operator.status.BaseState; +import org.apache.spark.kubernetes.operator.status.BaseStatus; + +import java.util.Optional; + +/** + * Observe given secondary resource, return state to be updated if applicable + * These observers does not act on secondary resource. They only observe secondary resource + * status and update owner SparkApplication status if needed + */ +public abstract class BaseSecondaryResourceObserver, + SPEC extends BaseSpec, + STATUS extends BaseStatus, + SR extends HasMetadata> { + public abstract Optional observe(SR secondaryResource, + SPEC spec, + STATUS currentStatus); +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java new file mode 100644 index 00000000..153076ab --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; + +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.Pod; +import lombok.AllArgsConstructor; +import lombok.NoArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; +import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconcileUtils; +import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; +import org.apache.spark.kubernetes.operator.spec.ApplicationTolerations; +import org.apache.spark.kubernetes.operator.spec.RestartPolicy; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.function.Supplier; + +/** + * Cleanup all secondary resources when application is deleted, or at the end of each attempt + * Update Application status to indicate whether another attempt would be made + */ +@AllArgsConstructor +@NoArgsConstructor +@Slf4j +public class AppCleanUpStep extends AppReconcileStep { + private Supplier cleanUpSuccessStateSupplier; + + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + ApplicationStatus currentStatus = context.getSparkApplication().getStatus(); + ApplicationTolerations tolerations = + context.getSparkApplication().getSpec().getApplicationTolerations(); + String stateMessage = null; + if (!tolerations.getDeleteOnTermination()) { + if (tolerations.getRestartConfig() != null + && !RestartPolicy.NEVER.equals( + tolerations.getRestartConfig().getRestartPolicy())) { + stateMessage = + "Application is configured to restart, resources created in current " + + "attempt would be force released."; + log.warn(stateMessage); + } else { + ApplicationStatus updatedStatus = currentStatus.appendNewState( + new ApplicationState( + ApplicationStateSummary.TERMINATED_WITHOUT_RELEASE_RESOURCES, + "Application is terminated without releasing resources " + + "as configured.")); + long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() + .getTerminationRequeuePeriodMillis(); + return updateStateAndProceed(context, statusRecorder, updatedStatus, + requeueAfterMillis); + } + } + List resourcesToRemove = new ArrayList<>(); + if (ApplicationStateSummary.SCHEDULING_FAILURE.equals( + currentStatus.getCurrentState().getCurrentStateSummary())) { + // if app failed at scheduling, re-compute all spec and delete as they may not be fully + // owned by driver + try { + resourcesToRemove.addAll(context.getDriverPreResourcesSpec()); + resourcesToRemove.add(context.getDriverPodSpec()); + resourcesToRemove.addAll(context.getDriverResourcesSpec()); + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("Failed to build resources for application.", e); + } + ApplicationStatus updatedStatus = currentStatus.appendNewState( + new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, + "Cannot build Spark spec for given application, " + + "consider all resources as released.")); + long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() + .getTerminationRequeuePeriodMillis(); + + return updateStateAndProceed(context, statusRecorder, updatedStatus, + requeueAfterMillis); + } + } else { + Optional driver = context.getDriverPod(); + driver.ifPresent(resourcesToRemove::add); + } + boolean forceDelete = + SparkApplicationReconcileUtils.enableForceDelete(context.getSparkApplication()); + for (HasMetadata resource : resourcesToRemove) { + SparkReconcilerUtils.deleteResourceIfExists(context.getClient(), resource, forceDelete); + } + ApplicationStatus updatedStatus; + if (cleanUpSuccessStateSupplier != null) { + ApplicationState state = cleanUpSuccessStateSupplier.get(); + if (StringUtils.isNotEmpty(stateMessage)) { + state.setMessage(stateMessage); + } + updatedStatus = currentStatus.appendNewState(state); + long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() + .getTerminationRequeuePeriodMillis(); + return updateStateAndProceed(context, statusRecorder, updatedStatus, + requeueAfterMillis); + } else { + updatedStatus = + currentStatus.terminateOrRestart(tolerations.getRestartConfig(), stateMessage, + SparkOperatorConf.TrimAttemptStateTransitionHistory.getValue()); + long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() + .getTerminationRequeuePeriodMillis(); + if (ApplicationStateSummary.SCHEDULED_TO_RESTART.equals(updatedStatus.getCurrentState() + .getCurrentStateSummary())) { + requeueAfterMillis = tolerations.getRestartConfig().getRestartBackoffMillis(); + } + return updateStateAndProceed(context, statusRecorder, updatedStatus, + requeueAfterMillis); + } + + } + + private ReconcileProgress updateStateAndProceed(SparkApplicationContext context, + StatusRecorder statusRecorder, + ApplicationStatus updatedStatus, + long requeueAfterMillis) { + statusRecorder.persistStatus(context, updatedStatus); + return ReconcileProgress.completeAndRequeueAfter(Duration.ofMillis(requeueAfterMillis)); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java new file mode 100644 index 00000000..15946a1d --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; + +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.Pod; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.Constants; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.decorators.DriverResourceDecorator; +import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; +import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; + +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +import static org.apache.spark.kubernetes.operator.Constants.ScheduleFailureMessage; +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndImmediateRequeue; +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.proceed; +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndDefaultRequeue; +import static org.apache.spark.kubernetes.operator.utils.SparkExceptionUtils.buildGeneralErrorMessage; + +/** + * Request all driver and driver resources when starting an attempt + */ +@Slf4j +public class AppInitStep extends AppReconcileStep { + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + ApplicationState currentState = context.getSparkApplication().getStatus().getCurrentState(); + if (!currentState.getCurrentStateSummary().isInitializing()) { + return proceed(); + } + SparkApplication app = context.getSparkApplication(); + if (app.getStatus().getPreviousAttemptSummary() != null) { + Instant lastTransitionTime = Instant.parse(currentState.getLastTransitionTime()); + Instant restartTime = lastTransitionTime.plusMillis( + app.getSpec().getApplicationTolerations().getRestartConfig() + .getRestartBackoffMillis()); + Instant now = Instant.now(); + if (restartTime.isAfter(now)) { + return ReconcileProgress.completeAndRequeueAfter( + Duration.between(now, restartTime)); + } + } + try { + List createdPreResources = new ArrayList<>(); + for (HasMetadata resource : context.getDriverPreResourcesSpec()) { + Optional createdResource = + SparkReconcilerUtils.getOrCreateSecondaryResource(context.getClient(), + resource); + if (createdResource.isPresent()) { + createdPreResources.add(createdResource.get()); + } else { + updateStatusForCreationFailure(context, resource, statusRecorder); + return completeAndImmediateRequeue(); + } + } + Optional driverPod = + SparkReconcilerUtils.getOrCreateSecondaryResource(context.getClient(), + context.getDriverPodSpec()); + if (driverPod.isPresent()) { + DriverResourceDecorator decorator = new DriverResourceDecorator(driverPod.get()); + createdPreResources.forEach(decorator::decorate); + context.getClient().resourceList(createdPreResources).forceConflicts() + .serverSideApply(); + List driverResources = context.getDriverResourcesSpec(); + driverResources.forEach(decorator::decorate); + for (HasMetadata resource : driverResources) { + Optional createdResource = + SparkReconcilerUtils.getOrCreateSecondaryResource(context.getClient(), + resource); + if (createdResource.isEmpty()) { + updateStatusForCreationFailure(context, resource, statusRecorder); + return completeAndImmediateRequeue(); + } + } + } + ApplicationStatus updatedStatus = context.getSparkApplication().getStatus() + .appendNewState(new ApplicationState(ApplicationStateSummary.DRIVER_REQUESTED, + Constants.DriverRequestedMessage)); + statusRecorder.persistStatus(context, updatedStatus); + return completeAndDefaultRequeue(); + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("Failed to request driver resource.", e); + } + String errorMessage = ScheduleFailureMessage + + " StackTrace: " + + buildGeneralErrorMessage(e); + statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() + .appendNewState(new ApplicationState(ApplicationStateSummary.SCHEDULING_FAILURE, + errorMessage))); + return completeAndImmediateRequeue(); + } + } + + private void updateStatusForCreationFailure(SparkApplicationContext context, + HasMetadata resourceSpec, + StatusRecorder statusRecorder) { + if (log.isErrorEnabled()) { + log.error("Failed all attempts to request driver resource {}.", + resourceSpec.getMetadata()); + } + statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() + .appendNewState(new ApplicationState(ApplicationStateSummary.SCHEDULING_FAILURE, + "Failed to request resource for driver with kind: " + + resourceSpec.getKind() + + ", name: " + + resourceSpec.getMetadata().getName()))); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java new file mode 100644 index 00000000..6a1a5eef --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; + +import io.fabric8.kubernetes.api.model.Pod; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; +import org.apache.spark.kubernetes.operator.reconciler.observers.BaseAppDriverObserver; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; + +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndImmediateRequeue; +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.proceed; +import static org.apache.spark.kubernetes.operator.utils.ApplicationStatusUtils.driverUnexpectedRemoved; + +/** + * Basic reconcile step for application + */ +public abstract class AppReconcileStep { + public abstract ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder); + + protected ReconcileProgress observeDriver(final SparkApplicationContext context, + final StatusRecorder statusRecorder, + final List observers) { + Optional driverPodOptional = context.getDriverPod(); + SparkApplication app = context.getSparkApplication(); + ApplicationStatus currentStatus = app.getStatus(); + if (driverPodOptional.isPresent()) { + List stateUpdates = observers.stream() + .map(o -> o.observe(driverPodOptional.get(), app.getSpec(), app.getStatus())) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + if (stateUpdates.isEmpty()) { + return proceed(); + } else { + for (ApplicationState state : stateUpdates) { + currentStatus = currentStatus.appendNewState(state); + } + statusRecorder.persistStatus(context, currentStatus); + return completeAndImmediateRequeue(); + } + } else { + ApplicationStatus updatedStatus = + currentStatus.appendNewState(driverUnexpectedRemoved()); + statusRecorder.persistStatus(context, updatedStatus); + return completeAndImmediateRequeue(); + } + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java new file mode 100644 index 00000000..d303bfca --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; + +import lombok.RequiredArgsConstructor; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; +import org.apache.spark.kubernetes.operator.reconciler.observers.BaseAppDriverObserver; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; + +import java.util.List; + +/** + * Observes secondary resource and update app status if needed + */ +@RequiredArgsConstructor +public class AppResourceObserveStep extends AppReconcileStep { + + private final List observers; + + @Override + public ReconcileProgress reconcile(final SparkApplicationContext context, + final StatusRecorder statusRecorder) { + return observeDriver(context, statusRecorder, observers); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java new file mode 100644 index 00000000..16a076f8 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; + +import io.fabric8.kubernetes.api.model.Pod; +import org.apache.spark.kubernetes.operator.Constants; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; +import org.apache.spark.kubernetes.operator.reconciler.observers.AppDriverRunningObserver; +import org.apache.spark.kubernetes.operator.spec.InstanceConfig; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.utils.PodUtils; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; + +import java.util.Collections; +import java.util.Set; + +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndDefaultRequeue; + +/** + * Observe whether app acquires enough executors as configured in spec + */ +public class AppRunningStep extends AppReconcileStep { + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + InstanceConfig instanceConfig = + context.getSparkApplication().getSpec().getApplicationTolerations() + .getInstanceConfig(); + ApplicationStateSummary prevStateSummary = + context.getSparkApplication().getStatus().getCurrentState() + .getCurrentStateSummary(); + ApplicationStateSummary proposedStateSummary; + String stateMessage = + context.getSparkApplication().getStatus().getCurrentState().getMessage(); + if (instanceConfig == null + || instanceConfig.getInitExecutors() == 0L + || (!prevStateSummary.isStarting() && instanceConfig.getMinExecutors() == 0L)) { + proposedStateSummary = ApplicationStateSummary.RUNNING_HEALTHY; + stateMessage = Constants.RunningHealthyMessage; + } else { + Set executors = context.getExecutorsForApplication(); + long runningExecutors = executors.stream() + .filter(PodUtils::isPodReady) + .count(); + if (prevStateSummary.isStarting()) { + if (runningExecutors >= instanceConfig.getInitExecutors()) { + proposedStateSummary = ApplicationStateSummary.RUNNING_HEALTHY; + stateMessage = Constants.RunningHealthyMessage; + } else if (runningExecutors > 0L) { + proposedStateSummary = + ApplicationStateSummary.INITIALIZED_BELOW_THRESHOLD_EXECUTORS; + stateMessage = Constants.InitializedWithBelowThresholdExecutorsMessage; + } else { + // keep previous state for 0 executor + proposedStateSummary = prevStateSummary; + } + } else { + if (runningExecutors >= instanceConfig.getMinExecutors()) { + proposedStateSummary = ApplicationStateSummary.RUNNING_HEALTHY; + stateMessage = Constants.RunningHealthyMessage; + } else { + proposedStateSummary = + ApplicationStateSummary.RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS; + stateMessage = Constants.RunningWithBelowThresholdExecutorsMessage; + } + } + } + if (!proposedStateSummary.equals(prevStateSummary)) { + statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() + .appendNewState(new ApplicationState(proposedStateSummary, stateMessage))); + return completeAndDefaultRequeue(); + } else { + return observeDriver(context, statusRecorder, + Collections.singletonList(new AppDriverRunningObserver())); + } + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java new file mode 100644 index 00000000..ed253d82 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; + +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; + +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.proceed; + +/** + * Observes whether app is already terminated. If so, end the reconcile. + */ +public class AppTerminatedStep extends AppReconcileStep { + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + if (context.getSparkApplication().getStatus().getCurrentState().getCurrentStateSummary() + .isTerminated()) { + statusRecorder.removeCachedStatus(context.getSparkApplication()); + return ReconcileProgress.completeAndNoRequeue(); + } + return proceed(); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java new file mode 100644 index 00000000..fb9e1dca --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; + +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; +import org.apache.spark.kubernetes.operator.spec.DeploymentMode; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; + +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndImmediateRequeue; +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.proceed; +import static org.apache.spark.kubernetes.operator.utils.ApplicationStatusUtils.isValidApplicationStatus; + +/** + * Validates the submitted app. This can be re-factored into webhook in future. + */ +@Slf4j +public class AppValidateStep extends AppReconcileStep { + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + if (!isValidApplicationStatus(context.getSparkApplication())) { + log.warn("Spark application found with empty status. Resetting to initial state."); + statusRecorder.persistStatus(context, new ApplicationStatus()); + } + if (DeploymentMode.CLIENT_MODE.equals(context.getSparkApplication().getSpec())) { + ApplicationState failure = new ApplicationState(ApplicationStateSummary.FAILED, + "Client mode is not supported yet."); + statusRecorder.persistStatus(context, + context.getSparkApplication().getStatus().appendNewState(failure)); + return completeAndImmediateRequeue(); + } + return proceed(); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java new file mode 100644 index 00000000..becc747a --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; + +import io.fabric8.kubernetes.api.model.Pod; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; + +import java.util.Optional; + +import static org.apache.spark.kubernetes.operator.Constants.UnknownStateMessage; + +/** + * Abnormal state handler + */ +public class UnknownStateStep extends AppReconcileStep { + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + ApplicationState state = + new ApplicationState(ApplicationStateSummary.FAILED, UnknownStateMessage); + Optional driver = context.getDriverPod(); + driver.ifPresent(pod -> state.setLastObservedDriverStatus(pod.getStatus())); + statusRecorder.persistStatus(context, + context.getSparkApplication().getStatus().appendNewState(state)); + return ReconcileProgress.completeAndImmediateRequeue(); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java new file mode 100644 index 00000000..dcd1bafe --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.utils; + +import org.apache.spark.kubernetes.operator.Constants; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; + +/** + * Handy utils for create & manage Application Status + */ +public class ApplicationStatusUtils { + + public static boolean isValidApplicationStatus(SparkApplication app) { + // null check + return app.getStatus() != null + && app.getStatus().getCurrentState() != null + && app.getStatus().getCurrentState().getCurrentStateSummary() != null; + } + + public static ApplicationState driverUnexpectedRemoved() { + return new ApplicationState(ApplicationStateSummary.FAILED, + Constants.DriverUnexpectedRemovedMessage); + } + + public static ApplicationState driverLaunchTimedOut() { + return new ApplicationState(ApplicationStateSummary.DRIVER_LAUNCH_TIMED_OUT, + Constants.DriverLaunchTimeoutMessage); + } + + public static ApplicationState driverReadyTimedOut() { + return new ApplicationState(ApplicationStateSummary.SPARK_SESSION_INITIALIZATION_TIMED_OUT, + Constants.DriverLaunchTimeoutMessage); + } + + public static ApplicationState executorLaunchTimedOut() { + return new ApplicationState(ApplicationStateSummary.EXECUTORS_LAUNCH_TIMED_OUT, + Constants.ExecutorLaunchTimeoutMessage); + } + + public static ApplicationState appCancelled() { + return new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, + Constants.AppCancelledMessage); + } + + public static boolean hasReachedState(SparkApplication application, + ApplicationState stateToCheck) { + if (!isValidApplicationStatus(application)) { + return false; + } + return application.getStatus().getStateTransitionHistory().keySet().parallelStream() + .anyMatch(stateId -> stateToCheck.equals( + application.getStatus().getStateTransitionHistory().get(stateId))); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java new file mode 100644 index 00000000..80f5d621 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.utils; + +import org.apache.commons.lang3.StringUtils; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.slf4j.MDC; + +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.locks.ReentrantLock; + +public class LoggingUtils { + public static final class TrackedMDC { + public static final String NamespaceKey = "app_namespace"; + public static final String NameKey = "app_name"; + public static final String UuidKey = "app_uuid"; + public static final String GenerationKey = "app_generation"; + private final ReentrantLock lock = new ReentrantLock(); + private Set keys = new HashSet<>(); + + public void set(final SparkApplication application) { + if (application != null && application.getMetadata() != null) { + try { + lock.lock(); + if (StringUtils.isNotEmpty(application.getMetadata().getNamespace())) { + MDC.put(NamespaceKey, application.getMetadata().getNamespace()); + keys.add(NamespaceKey); + } + if (StringUtils.isNotEmpty(application.getMetadata().getName())) { + MDC.put(NameKey, application.getMetadata().getName()); + keys.add(NameKey); + } + if (StringUtils.isNotEmpty(application.getMetadata().getUid())) { + MDC.put(UuidKey, application.getMetadata().getUid()); + keys.add(UuidKey); + } + MDC.put(GenerationKey, + String.valueOf(application.getMetadata().getGeneration())); + keys.add(GenerationKey); + } finally { + lock.unlock(); + } + } + } + + public void reset() { + try { + lock.lock(); + for (String mdcKey : keys) { + MDC.remove(mdcKey); + } + keys.clear(); + } finally { + lock.unlock(); + } + } + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodPhase.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodPhase.java new file mode 100644 index 00000000..a430928e --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodPhase.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.utils; + +import io.fabric8.kubernetes.api.model.Pod; + +public enum PodPhase { + // hope this is provided by k8s client in future + PENDING("pending"), + RUNNING("running"), + FAILED("failed"), + SUCCEEDED("succeeded"), + TERMINATING("terminating"), + UNKNOWN("unknown"); + + private final String description; + + PodPhase(String description) { + this.description = description; + } + + public static PodPhase getPhase(final Pod pod) { + if (pod != null && pod.getStatus() != null) { + for (PodPhase podPhase : values()) { + if (podPhase.description.equalsIgnoreCase(pod.getStatus().getPhase())) { + return podPhase; + } + } + } + return UNKNOWN; + } +} + diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodUtils.java new file mode 100644 index 00000000..233b50ce --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodUtils.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.utils; + +import io.fabric8.kubernetes.api.model.ContainerStatus; +import io.fabric8.kubernetes.api.model.Pod; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; + +import java.util.List; + +import static org.apache.spark.kubernetes.operator.utils.ModelUtils.isDriverMainContainer; + +public class PodUtils { + + public static boolean isPodReady(final Pod pod) { + if (pod == null || pod.getStatus() == null + || pod.getStatus().getConditions() == null + || pod.getStatus().getConditions().isEmpty()) { + return false; + } + return pod.getStatus().getConditions().parallelStream() + .anyMatch(condition -> "ready".equalsIgnoreCase(condition.getType()) + && "true".equalsIgnoreCase(condition.getStatus())); + } + + public static boolean isPodStarted(final Pod driver, + final ApplicationSpec spec) { + // Consider pod as 'started' if any of Spark container is started and ready + if (driver == null || driver.getStatus() == null + || driver.getStatus().getContainerStatuses() == null + || driver.getStatus().getContainerStatuses().isEmpty()) { + return false; + } + + List containerStatusList = driver.getStatus().getContainerStatuses(); + + // If there's only one container in given pod, evaluate it + // Otherwise, use the provided name as filter. + if (containerStatusList.size() == 1) { + return containerStatusList.get(0).getReady(); + } + + return containerStatusList + .stream() + .filter(c -> isDriverMainContainer(spec, c.getName())) + .anyMatch(ContainerStatus::getReady); + } + + public static boolean isContainerExited(final ContainerStatus containerStatus) { + return containerStatus != null + && containerStatus.getState() != null + && containerStatus.getState().getTerminated() != null; + } + + public static boolean isContainerRestarted(final ContainerStatus containerStatus) { + return containerStatus != null + && containerStatus.getRestartCount() > 0; + } + + public static boolean isContainerFailed(final ContainerStatus containerStatus) { + return isContainerExited(containerStatus) + && containerStatus.getState().getTerminated().getExitCode() > 0; + } + +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ProbeUtil.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ProbeUtil.java new file mode 100644 index 00000000..61e18e5a --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ProbeUtil.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.utils; + +import com.sun.net.httpserver.HttpExchange; +import io.javaoperatorsdk.operator.Operator; +import lombok.extern.slf4j.Slf4j; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Optional; + +@Slf4j +public class ProbeUtil { + public static void sendMessage(HttpExchange httpExchange, int code, String message) + throws IOException { + try (var outputStream = httpExchange.getResponseBody()) { + var bytes = message.getBytes(StandardCharsets.UTF_8); + httpExchange.sendResponseHeaders(code, bytes.length); + outputStream.write(bytes); + outputStream.flush(); + } + } + + public static Optional areOperatorsStarted(List operators) { + return operators.stream().map(operator -> { + var runtimeInfo = operator.getRuntimeInfo(); + if (runtimeInfo != null) { + if (!operator.getRuntimeInfo().isStarted()) { + log.error("Operator is not running"); + return false; + } + return true; + } + return false; + }).reduce((a, b) -> a && b); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkExceptionUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkExceptionUtils.java new file mode 100644 index 00000000..7ba037de --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkExceptionUtils.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.utils; + +import io.fabric8.kubernetes.client.KubernetesClientException; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; + +public class SparkExceptionUtils { + public static boolean isConflictForExistingResource(KubernetesClientException e) { + return e != null && + e.getCode() == 409 && + e.getStatus() != null && + StringUtils.isNotEmpty(e.getStatus().toString()) && + e.getStatus().toString().toLowerCase().contains("alreadyexists"); + } + + public static String buildGeneralErrorMessage(Exception e) { + return ExceptionUtils.getStackTrace(e); + } + +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java new file mode 100644 index 00000000..6fef4752 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.utils; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientException; +import io.javaoperatorsdk.operator.processing.event.ResourceID; +import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.listeners.ApplicationStatusListener; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; + +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.StatusPatchFailureBackoffSeconds; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.StatusPatchMaxRetry; + +/** + *
+ * Note - this is inspired by
+ * Flink Operator Status Recorder
+ * 
+ * Enables additional (extendable) observers for Spark App status. + * Cache & version locking might be removed in future version as batch app does not expect + * spec change after submitted. + */ +@Slf4j +public class StatusRecorder { + protected final List appStatusListeners; + protected final ObjectMapper objectMapper = new ObjectMapper(); + protected final ConcurrentHashMap statusCache; + + public StatusRecorder(List appStatusListeners) { + this.appStatusListeners = appStatusListeners; + this.statusCache = new ConcurrentHashMap<>(); + } + + /** + * Update the status of the provided kubernetes resource on the k8s cluster. We use patch + * together with null resourceVersion to try to guarantee that the status update succeeds even + * if the underlying resource spec was update in the meantime. This is necessary for the correct + * operator behavior. + * + * @param resource Resource for which status update should be performed + */ + @SneakyThrows + private void patchAndCacheStatus(SparkApplication resource, KubernetesClient client) { + ObjectNode newStatusNode = + objectMapper.convertValue(resource.getStatus(), ObjectNode.class); + ResourceID resourceId = ResourceID.fromResource(resource); + ObjectNode previousStatusNode = statusCache.get(resourceId); + + if (newStatusNode.equals(previousStatusNode)) { + log.debug("No status change."); + return; + } + + ApplicationStatus prevStatus = + objectMapper.convertValue(previousStatusNode, ApplicationStatus.class); + + Exception err = null; + for (long i = 0; i < StatusPatchMaxRetry.getValue(); i++) { + // We retry the status update 3 times to avoid some intermittent connectivity errors + try { + replaceStatus(resource, prevStatus, client); + err = null; + } catch (KubernetesClientException e) { + log.error("Error while patching status, retrying {}/3...", (i + 1), e); + Thread.sleep( + TimeUnit.SECONDS.toMillis(StatusPatchFailureBackoffSeconds.getValue())); + err = e; + } + } + + if (err != null) { + throw err; + } + + statusCache.put(resourceId, newStatusNode); + appStatusListeners.forEach(listener -> { + listener.listenStatus(resource, prevStatus, resource.getStatus()); + }); + } + + public void persistStatus(SparkApplicationContext context, + ApplicationStatus newStatus) { + context.getSparkApplication().setStatus(newStatus); + patchAndCacheStatus(context.getSparkApplication(), context.getClient()); + } + + private void replaceStatus(SparkApplication resource, ApplicationStatus prevStatus, + KubernetesClient client) + throws JsonProcessingException { + int retries = 0; + while (true) { + try { + var updated = client.resource(resource).lockResourceVersion().updateStatus(); + + // If we successfully replaced the status, update the resource version so we know + // what to lock next in the same reconciliation loop + resource.getMetadata() + .setResourceVersion(updated.getMetadata().getResourceVersion()); + return; + } catch (KubernetesClientException kce) { + // 409 is the error code for conflicts resulting from the locking + if (kce.getCode() == 409) { + var currentVersion = resource.getMetadata().getResourceVersion(); + log.debug( + "Could not apply status update for resource version {}", + currentVersion); + + var latest = client.resource(resource).get(); + var latestVersion = latest.getMetadata().getResourceVersion(); + + if (latestVersion.equals(currentVersion)) { + // This should not happen as long as the client works consistently + log.error("Unable to fetch latest resource version"); + throw kce; + } + + if (latest.getStatus().equals(prevStatus)) { + if (retries++ < 3) { + log.debug( + "Retrying status update for latest version {}", latestVersion); + resource.getMetadata().setResourceVersion(latestVersion); + } else { + // If we cannot get the latest version in 3 tries we throw the error to + // retry with delay + throw kce; + } + } else { + throw new RuntimeException( + "Status have been modified externally in version " + + latestVersion + + " Previous: " + + objectMapper.writeValueAsString(prevStatus) + + " Latest: " + + objectMapper.writeValueAsString(latest.getStatus()), kce); + } + } else { + // We simply throw non conflict errors, to trigger retry with delay + throw kce; + } + } + } + } + + /** + * Update the custom resource status based on the in-memory cached to ensure that any status + * updates that we made previously are always visible in the reconciliation loop. This is + * required due to our custom status patching logic. + * + *

If the cache doesn't have a status stored, we do no update. This happens when the operator + * reconciles a resource for the first time after a restart. + * + * @param resource Resource for which the status should be updated from the cache + */ + public void updateStatusFromCache(SparkApplication resource) { + var key = ResourceID.fromResource(resource); + var cachedStatus = statusCache.get(key); + if (cachedStatus != null) { + resource.setStatus( + objectMapper.convertValue( + cachedStatus, resource.getStatus().getClass())); + } else { + // Initialize cache with current status copy + statusCache.put(key, objectMapper.convertValue(resource.getStatus(), ObjectNode.class)); + } + } + + /** + * Remove cached status + */ + public void removeCachedStatus(SparkApplication resource) { + statusCache.remove(ResourceID.fromResource(resource)); + } +} diff --git a/spark-operator/src/main/resources/EcsLayout.json b/spark-operator/src/main/resources/EcsLayout.json new file mode 100644 index 00000000..8d215ab5 --- /dev/null +++ b/spark-operator/src/main/resources/EcsLayout.json @@ -0,0 +1,49 @@ +{ + "@timestamp": { + "$resolver": "timestamp", + "pattern": { + "format": "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'", + "timeZone": "UTC" + } + }, + "ecs.version": "1.2.0", + "log.level": { + "$resolver": "level", + "field": "name" + }, + "message": { + "$resolver": "message", + "stringified": true + }, + "process.thread.name": { + "$resolver": "thread", + "field": "name" + }, + "log.logger": { + "$resolver": "logger", + "field": "name" + }, + "labels": { + "$resolver": "mdc", + "flatten": true, + "stringified": true + }, + "tags": { + "$resolver": "ndc" + }, + "error.type": { + "$resolver": "exception", + "field": "className" + }, + "error.message": { + "$resolver": "exception", + "field": "message" + }, + "error.stack_trace": { + "$resolver": "exception", + "field": "stackTrace", + "stackTrace": { + "stringified": true + } + } +} diff --git a/spark-operator/src/main/resources/log4j2.properties b/spark-operator/src/main/resources/log4j2.properties new file mode 100644 index 00000000..9285fa00 --- /dev/null +++ b/spark-operator/src/main/resources/log4j2.properties @@ -0,0 +1,52 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +status=debug +strict=true +dest=out +name=PropertiesConfig +property.filename=/tmp/spark-operator +filter.threshold.type=ThresholdFilter +filter.threshold.level=debug +# console +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%d %p %X %C{1.} [%t] %m%n +appender.console.filter.threshold.type=ThresholdFilter +appender.console.filter.threshold.level=info +# rolling JSON +appender.rolling.type=RollingFile +appender.rolling.name=RollingFile +appender.rolling.append=true +appender.rolling.fileName=${filename}.log +appender.rolling.filePattern=${filename}-%i.log.gz +appender.rolling.layout.type=JsonTemplateLayout +appender.rolling.layout.eventTemplateUri=classpath:EcsLayout.json +appender.rolling.policies.type=Policies +appender.rolling.policies.size.type=SizeBasedTriggeringPolicy +appender.rolling.policies.size.size=100MB +appender.rolling.strategy.type=DefaultRolloverStrategy +appender.rolling.strategy.max=20 +appender.rolling.immediateFlush=true +# chatty loggers +rootLogger.level=all +logger.netty.name=io.netty +logger.netty.level=warn +log4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector +rootLogger.appenderRef.stdout.ref=STDOUT +rootLogger.appenderRef.rolling.ref=RollingFile diff --git a/spark-operator/src/main/resources/spark-pi.yaml b/spark-operator/src/main/resources/spark-pi.yaml new file mode 100644 index 00000000..cad6bb83 --- /dev/null +++ b/spark-operator/src/main/resources/spark-pi.yaml @@ -0,0 +1,35 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: spark-pi +spec: + mainClass: "org.apache.spark.examples.SparkPi" + jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.4.1.jar" + sparkConf: + spark.executor.instances: "5" + spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" + spark.kubernetes.namespace: "spark-test" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + applicationTolerations: + deleteOnTermination: false + runtimeVersions: + scalaVersion: v2_12 + sparkVersion: v3_4_1 diff --git a/spark-operator/src/main/resources/streaming.yaml b/spark-operator/src/main/resources/streaming.yaml new file mode 100644 index 00000000..5903cce5 --- /dev/null +++ b/spark-operator/src/main/resources/streaming.yaml @@ -0,0 +1,91 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +apiVersion: org.apache.spark/v1alpha1 +kind: SparkApplication +metadata: + name: network-wordcount +spec: + applicationTolerations: + restartConfig: + restartPolicy: ALWAYS + maxRestartAttempts: 3 + restartBackoffMillis: 300000 + applicationTimeoutConfig: + driverStartTimeoutMillis: 600000 + sparkSessionStartTimeoutMillis: 600000 + executorStartTimeoutMillis: 600000 + instanceConfig: + initExecutors: 5 + minExecutors: 5 + maxExecutors: 5 + driverSpec: + podTemplateSpec: + metadata: + labels: + foo: bar + spec: + containers: + - name: main-container + resources: + limits: + cpu: "1" + memory: 512Mi + requests: + cpu: "1" + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + executorSpec: + podTemplateSpec: + metadata: + labels: + foo: bar + annotations: + foo: bar + spec: + containers: + - name: executor + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + mainClass: "org.apache.spark.examples.streaming.NetworkWordCount" + jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.4.1.jar" + driverArgs: + - "localhost" + - "9999" + sparkConf: + spark.executor.instances: "5" + spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" + spark.kubernetes.namespace: "spark-test" + spark.kubernetes.authenticate.driver.serviceAccountName: "spark" + spark.kubernetes.driver.podTemplateContainerName: "main-container" + runtimeVersions: + scalaVersion: v2_12 + sparkVersion: v3_4_1 diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/ConfigOptionTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/ConfigOptionTest.java new file mode 100644 index 00000000..09b84c18 --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/ConfigOptionTest.java @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.config; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; + +class ConfigOptionTest { + @Test + void testResolveValueWithoutOverride() { + byte defaultByteValue = 9; + short defaultShortValue = 9; + long defaultLongValue = 9; + int defaultIntValue = 9; + float defaultFloatValue = 9.0f; + double defaultDoubleValue = 9.0; + boolean defaultBooleanValue = false; + String defaultStringValue = "bar"; + ConfigOption testStrConf = ConfigOption.builder() + .key("foo") + .typeParameterClass(String.class) + .description("foo foo.") + .defaultValue(defaultStringValue) + .build(); + ConfigOption testIntConf = ConfigOption.builder() + .key("fooint") + .typeParameterClass(Integer.class) + .description("foo foo.") + .defaultValue(defaultIntValue) + .build(); + ConfigOption testShortConf = ConfigOption.builder() + .key("fooshort") + .typeParameterClass(Short.class) + .description("foo foo.") + .defaultValue(defaultShortValue) + .build(); + ConfigOption testLongConf = ConfigOption.builder() + .key("foolong") + .typeParameterClass(Long.class) + .description("foo foo.") + .defaultValue(defaultLongValue) + .build(); + ConfigOption testBooleanConf = ConfigOption.builder() + .key("foobool") + .typeParameterClass(Boolean.class) + .description("foo foo.") + .defaultValue(defaultBooleanValue) + .build(); + ConfigOption testFloatConf = ConfigOption.builder() + .key("foofloat") + .typeParameterClass(Float.class) + .description("foo foo.") + .defaultValue(defaultFloatValue) + .build(); + ConfigOption testDoubleConf = ConfigOption.builder() + .key("foodouble") + .typeParameterClass(Double.class) + .description("foo foo.") + .defaultValue(defaultDoubleValue) + .build(); + ConfigOption testByteConf = ConfigOption.builder() + .key("foobyte") + .typeParameterClass(Byte.class) + .description("foo foo.") + .defaultValue(defaultByteValue) + .build(); + Assertions.assertEquals(defaultStringValue, testStrConf.getValue()); + Assertions.assertEquals(defaultIntValue, testIntConf.getValue()); + Assertions.assertEquals(defaultLongValue, testLongConf.getValue()); + Assertions.assertEquals(defaultBooleanValue, testBooleanConf.getValue()); + Assertions.assertEquals(defaultFloatValue, testFloatConf.getValue()); + Assertions.assertEquals(defaultByteValue, testByteConf.getValue()); + Assertions.assertEquals(defaultShortValue, testShortConf.getValue()); + Assertions.assertEquals(defaultDoubleValue, testDoubleConf.getValue()); + } + + @Test + void testResolveValueWithOverride() { + byte overrideByteValue = 10; + short overrideShortValue = 10; + long overrideLongValue = 10; + int overrideIntValue = 10; + float overrideFloatValue = 10.0f; + double overrideDoubleValue = 10.0; + boolean overrideBooleanValue = true; + String overrideStringValue = "barbar"; + byte defaultByteValue = 9; + short defaultShortValue = 9; + long defaultLongValue = 9; + int defaultIntValue = 9; + float defaultFloatValue = 9.0f; + double defaultDoubleValue = 9.0; + boolean defaultBooleanValue = false; + String defaultStringValue = "bar"; + Map configOverride = new HashMap<>(); + configOverride.put("foobyte", "10"); + configOverride.put("fooshort", "10"); + configOverride.put("foolong", "10"); + configOverride.put("fooint", "10"); + configOverride.put("foofloat", "10.0"); + configOverride.put("foodouble", "10.0"); + configOverride.put("foobool", "true"); + configOverride.put("foo", "barbar"); + SparkOperatorConfManager.INSTANCE.refresh(configOverride); + ConfigOption testStrConf = ConfigOption.builder() + .key("foo") + .typeParameterClass(String.class) + .description("foo foo.") + .defaultValue(defaultStringValue) + .build(); + ConfigOption testIntConf = ConfigOption.builder() + .key("fooint") + .typeParameterClass(Integer.class) + .description("foo foo.") + .defaultValue(defaultIntValue) + .build(); + ConfigOption testShortConf = ConfigOption.builder() + .key("fooshort") + .typeParameterClass(Short.class) + .description("foo foo.") + .defaultValue(defaultShortValue) + .build(); + ConfigOption testLongConf = ConfigOption.builder() + .key("foolong") + .typeParameterClass(Long.class) + .description("foo foo.") + .defaultValue(defaultLongValue) + .build(); + ConfigOption testBooleanConf = ConfigOption.builder() + .key("foobool") + .typeParameterClass(Boolean.class) + .description("foo foo.") + .defaultValue(defaultBooleanValue) + .build(); + ConfigOption testFloatConf = ConfigOption.builder() + .key("foofloat") + .typeParameterClass(Float.class) + .description("foo foo.") + .defaultValue(defaultFloatValue) + .build(); + ConfigOption testDoubleConf = ConfigOption.builder() + .key("foodouble") + .typeParameterClass(Double.class) + .description("foo foo.") + .defaultValue(defaultDoubleValue) + .build(); + ConfigOption testByteConf = ConfigOption.builder() + .key("foobyte") + .typeParameterClass(Byte.class) + .description("foo foo.") + .defaultValue(defaultByteValue) + .build(); + Assertions.assertEquals(overrideStringValue, testStrConf.getValue()); + Assertions.assertEquals(overrideIntValue, testIntConf.getValue()); + Assertions.assertEquals(overrideLongValue, testLongConf.getValue()); + Assertions.assertEquals(overrideBooleanValue, testBooleanConf.getValue()); + Assertions.assertEquals(overrideFloatValue, testFloatConf.getValue()); + Assertions.assertEquals(overrideByteValue, testByteConf.getValue()); + Assertions.assertEquals(overrideShortValue, testShortConf.getValue()); + Assertions.assertEquals(overrideDoubleValue, testDoubleConf.getValue()); + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManagerTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManagerTest.java new file mode 100644 index 00000000..82859732 --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManagerTest.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.config; + +import org.apache.commons.lang3.StringUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.Collections; + +class SparkOperatorConfManagerTest { + @Test + void testLoadPropertiesFromInitFile() throws IOException { + String propBackUp = System.getProperty("spark.operator.base.property.file.name"); + try { + String propsFilePath = SparkOperatorConfManagerTest.class.getClassLoader() + .getResource("spark-operator.properties").getPath(); + System.setProperty("spark.operator.base.property.file.name", propsFilePath); + SparkOperatorConfManager confManager = new SparkOperatorConfManager(); + Assertions.assertEquals("bar", confManager.getValue("spark.operator.foo")); + } finally { + if (StringUtils.isNotEmpty(propBackUp)) { + System.setProperty("spark.operator.base.property.file.name", propBackUp); + } else { + System.clearProperty("spark.operator.base.property.file.name"); + } + } + } + + @Test + void testOverrideProperties() { + String propBackUp = System.getProperty("spark.operator.foo"); + System.setProperty("spark.operator.foo", "bar"); + try { + SparkOperatorConfManager confManager = new SparkOperatorConfManager(); + Assertions.assertEquals("bar", confManager.getInitialValue("spark.operator.foo")); + Assertions.assertEquals("bar", confManager.getValue("spark.operator.foo")); + + confManager.refresh(Collections.singletonMap("spark.operator.foo", "barbar")); + Assertions.assertEquals("bar", confManager.getInitialValue("spark.operator.foo")); + Assertions.assertEquals("barbar", confManager.getValue("spark.operator.foo")); + + confManager.refresh(Collections.singletonMap("spark.operator.foo", "barbarbar")); + Assertions.assertEquals("bar", confManager.getInitialValue("spark.operator.foo")); + Assertions.assertEquals("barbarbar", confManager.getValue("spark.operator.foo")); + + } finally { + if (StringUtils.isNotEmpty(propBackUp)) { + System.setProperty("spark.operator.foo", propBackUp); + } else { + System.clearProperty("spark.operator.foo"); + } + } + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java new file mode 100644 index 00000000..0b270d7b --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.health; + +import io.fabric8.kubernetes.api.model.KubernetesResourceList; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; +import io.fabric8.kubernetes.client.dsl.Resource; +import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient; +import io.fabric8.kubernetes.client.server.mock.KubernetesMockServer; +import io.javaoperatorsdk.operator.processing.event.ResourceID; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConfManager; +import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; +import org.mockito.MockedStatic; + +import javax.validation.constraints.NotNull; +import java.time.Duration; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.apache.spark.kubernetes.operator.Constants.SENTINEL_LABEL; +import static org.apache.spark.kubernetes.operator.Constants.SPARK_CONF_SENTINEL_DUMMY_FIELD; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.SENTINEL_RESOURCE_RECONCILIATION_DELAY; +import static org.apache.spark.kubernetes.operator.utils.TestUtils.createMockDeployment; +import static org.apache.spark.kubernetes.operator.utils.TestUtils.notTimedOut; +import static org.mockito.Mockito.mockStatic; + +@EnableKubernetesMockClient(crud = true) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +class SentinelManagerTest { + public static final String DEFAULT = "default"; + public static final String SPARK_DEMO = "spark-demo"; + @NotNull + KubernetesClient kubernetesClient; + @NotNull + KubernetesMockServer server; + public static final int SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS = 10; + + @BeforeAll + static void beforeAll() { + Map overrideValue = + Collections.singletonMap(SENTINEL_RESOURCE_RECONCILIATION_DELAY.getKey(), + Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS) + .toString()); + SparkOperatorConfManager.INSTANCE.refresh(overrideValue); + } + + @Test + @Order(1) + void testIsSentinelResource() { + SparkApplication sparkApplication = new SparkApplication(); + var lableMap = sparkApplication.getMetadata().getLabels(); + lableMap.put(SENTINEL_LABEL, "true"); + Set namespaces = new HashSet<>(); + sparkApplication.getMetadata().setNamespace("spark-test"); + namespaces.add("spark-test"); + try (MockedStatic mockUtilsKube = + mockStatic(SparkReconcilerUtils.class)) { + mockUtilsKube.when(SparkReconcilerUtils::getWatchedNamespaces).thenReturn(namespaces); + Assertions.assertTrue(SentinelManager.isSentinelResource(sparkApplication)); + } + } + + @Test + @Order(3) + void testHandleSentinelResourceReconciliation() throws InterruptedException { + // Reduce the SENTINEL_RESOURCE_RECONCILIATION_DELAY time to 0 + SparkOperatorConfManager.INSTANCE.refresh( + Collections.singletonMap(SENTINEL_RESOURCE_RECONCILIATION_DELAY.getKey(), "10")); + + // Before Spark Reconciler Started + var mockDeployment = createMockDeployment(DEFAULT); + kubernetesClient.resource(SparkReconcilerUtils.clone(mockDeployment)).create(); + var crList = kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list(); + var sparkApplication = crList.getItems().get(0); + var generation = sparkApplication.getMetadata().getGeneration(); + Assertions.assertEquals(generation, 1L); + + // Spark Reconciler Handle Sentinel Resources at the first time + SentinelManager sentinelManager = new SentinelManager(); + sentinelManager.handleSentinelResourceReconciliation(sparkApplication, kubernetesClient); + var crList2 = + kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list(); + var sparkApplication2 = crList2.getItems().get(0); + var sparkConf2 = new HashMap<>(sparkApplication2.getSpec().getSparkConf()); + var generation2 = sparkApplication2.getMetadata().getGeneration(); + + Assertions.assertEquals(sparkConf2.get(SPARK_CONF_SENTINEL_DUMMY_FIELD), "1"); + Assertions.assertEquals(generation2, 2L); + SentinelManager.SentinelResourceState state2 = + (SentinelManager.SentinelResourceState) sentinelManager.getSentinelResources() + .get(ResourceID.fromResource(mockDeployment)); + var previousGeneration2 = state2.previousGeneration; + Assertions.assertTrue(sentinelManager.allSentinelsAreHealthy()); + Assertions.assertEquals(previousGeneration2, 1L); + + Thread.sleep( + Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS * 2).toMillis()); + var crList3 = kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list() + .getItems(); + var sparkApplication3 = crList3.get(0); + var sparkConf3 = new HashMap<>(sparkApplication3.getSpec().getSparkConf()); + // Spark Sentinel Applications' s k8s generation should change + Assertions.assertNotEquals(sparkApplication3.getMetadata().getGeneration(), generation2); + // Spark conf SPARK_CONF_SENTINEL_DUMMY_FIELD values should increase + Assertions.assertNotEquals(sparkConf2.get(SPARK_CONF_SENTINEL_DUMMY_FIELD), + sparkConf3.get(SPARK_CONF_SENTINEL_DUMMY_FIELD)); + SentinelManager.SentinelResourceState state3 = + (SentinelManager.SentinelResourceState) sentinelManager.getSentinelResources() + .get(ResourceID.fromResource(mockDeployment)); + Assertions.assertEquals(state3.previousGeneration, previousGeneration2); + // Given the 2 * SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS, the reconcile method is + // not called to handleSentinelResourceReconciliation to update + Assertions.assertFalse(sentinelManager.allSentinelsAreHealthy()); + + sentinelManager.handleSentinelResourceReconciliation(sparkApplication2, kubernetesClient); + sentinelManager.handleSentinelResourceReconciliation(sparkApplication2, kubernetesClient); + boolean isHealthy; + long currentTimeInMills = System.currentTimeMillis(); + do { + isHealthy = sentinelManager.allSentinelsAreHealthy(); + } while (!isHealthy && notTimedOut(currentTimeInMills, TimeUnit.MILLISECONDS.convert( + Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS)))); + Assertions.assertTrue(isHealthy); + kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).delete(); + } + + @Test + @Order(2) + void sentinelManagerShouldReportHealthyWhenWatchedNamespaceIsReduced() + throws InterruptedException { + Set namespaces = new HashSet<>(); + namespaces.add(DEFAULT); + namespaces.add(SPARK_DEMO); + + try (MockedStatic mockUtilsKube = + mockStatic(SparkReconcilerUtils.class)) { + mockUtilsKube.when(SparkReconcilerUtils::getWatchedNamespaces).thenReturn(namespaces); + SentinelManager sentinelManager = new SentinelManager(); + NonNamespaceOperation, + Resource> cr1 = + kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT); + NonNamespaceOperation, + Resource> cr2 = + kubernetesClient.resources(SparkApplication.class).inNamespace(SPARK_DEMO); + + var mockDeployment1 = createMockDeployment(DEFAULT); + var mockDeployment2 = createMockDeployment(SPARK_DEMO); + cr1.create(mockDeployment1); + cr2.create(mockDeployment2); + + var crList1 = + kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list(); + var crList2 = + kubernetesClient.resources(SparkApplication.class) + .inNamespace(SPARK_DEMO).list(); + var sparkApplication1 = crList1.getItems().get(0); + var sparkApplication2 = crList2.getItems().get(0); + sentinelManager.handleSentinelResourceReconciliation(sparkApplication1, + kubernetesClient); + sentinelManager.handleSentinelResourceReconciliation(sparkApplication2, + kubernetesClient); + Assertions.assertEquals(sentinelManager.getSentinelResources().size(), 2, + "Sentinel Manager should watch on resources in two namespaces"); + Assertions.assertTrue(sentinelManager.allSentinelsAreHealthy(), + "Sentinel Manager should report healthy"); + namespaces.remove(SPARK_DEMO); + Thread.sleep(Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS) + .toMillis()); + Assertions.assertTrue(sentinelManager.allSentinelsAreHealthy(), + "Sentinel Manager should report healthy after one namespace is " + + "removed from the watch"); + Assertions.assertEquals(sentinelManager.getSentinelResources().size(), 1, + "Sentinel Manager should only watch on one namespace"); + } + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactoryTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactoryTest.java new file mode 100644 index 00000000..58b4e151 --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactoryTest.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Properties; + +import static org.apache.spark.kubernetes.operator.metrics.MetricsSystemFactory.parseSinkProperties; +import static org.junit.Assert.assertThrows; + +class MetricsSystemFactoryTest { + + @Test + void testMetricsSystemFailFastWithNoClassFullName() { + Properties properties = new Properties(); + properties.put("sink.mocksink.period", "10"); + properties.put("sink.console.class", "org.apache.spark.metrics.sink.ConsoleSink"); + RuntimeException e = + assertThrows(RuntimeException.class, () -> parseSinkProperties(properties)); + Assertions.assertEquals( + "mocksink provides properties, but does not provide full class name", + e.getMessage()); + } + + @Test + void testMetricsSystemFailFastWithNotFoundClassName() { + Properties properties = new Properties(); + properties.put("sink.console.class", "org.apache.spark.metrics.sink.FooSink"); + RuntimeException e = + assertThrows(RuntimeException.class, () -> parseSinkProperties(properties)); + Assertions.assertEquals("Fail to find class org.apache.spark.metrics.sink.FooSink", + e.getMessage()); + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemTest.java new file mode 100644 index 00000000..d9381e11 --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemTest.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics; + +import org.apache.spark.kubernetes.operator.metrics.sink.MockSink; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.apache.spark.metrics.sink.Sink; +import org.apache.spark.metrics.source.Source; + +import java.util.List; +import java.util.Properties; +import java.util.concurrent.TimeUnit; + +class MetricsSystemTest { + @Test + void testMetricsSystemWithResourcesAdd() { + MetricsSystem metricsSystem = new MetricsSystem(); + List sourcesList = metricsSystem.getSources(); + List sinks = metricsSystem.getSinks(); + metricsSystem.start(); + Assertions.assertEquals(1, sourcesList.size()); + // Default no sink added + Assertions.assertEquals(0, sinks.size()); + Assertions.assertFalse(metricsSystem.getRegistry().getMetrics().isEmpty()); + metricsSystem.stop(); + Assertions.assertTrue(metricsSystem.getRegistry().getMetrics().isEmpty()); + } + + @Test + void testMetricsSystemWithCustomizedSink() { + Properties properties = new Properties(); + properties.put("sink.mocksink.class", + "org.apache.spark.kubernetes.operator.metrics.sink.MockSink"); + properties.put("sink.mocksink.period", "10"); + MetricsSystem metricsSystem = new MetricsSystem(properties); + metricsSystem.start(); + Sink mockSink = metricsSystem.getSinks().get(0); + metricsSystem.stop(); + MockSink sink = (MockSink) mockSink; + Assertions.assertEquals(sink.getPollPeriod(), 10); + Assertions.assertEquals(sink.getTimeUnit(), TimeUnit.SECONDS); + } + + @Test + void testMetricsSystemWithTwoSinkConfigurations() { + Properties properties = new Properties(); + properties.put("sink.mocksink.class", + "org.apache.spark.kubernetes.operator.metrics.sink.MockSink"); + properties.put("sink.mocksink.period", "10"); + properties.put("sink.console.class", "org.apache.spark.metrics.sink.ConsoleSink"); + MetricsSystem metricsSystem = new MetricsSystem(properties); + metricsSystem.start(); + Assertions.assertEquals(2, metricsSystem.getSinks().size()); + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/sink/MockSink.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/sink/MockSink.java new file mode 100644 index 00000000..97e874d0 --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/sink/MockSink.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics.sink; + +import org.apache.spark.metrics.sink.Sink; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Properties; +import java.util.concurrent.TimeUnit; + +import com.codahale.metrics.MetricRegistry; + +@SuppressWarnings("PMD") +public class MockSink implements Sink { + private static final Logger logger = LoggerFactory.getLogger(MockSink.class); + private Properties properties; + private MetricRegistry metricRegistry; + public static final String DEFAULT_UNIT = "SECONDS"; + public static final int DEFAULT_PERIOD = 20; + public static final String KEY_PERIOD = "period"; + public static final String KEY_UNIT = "unit"; + + public int getPollPeriod() { + return Integer.parseInt((String) properties.getOrDefault(KEY_PERIOD, DEFAULT_PERIOD)); + } + + public TimeUnit getTimeUnit() { + return TimeUnit.valueOf((String) properties.getOrDefault(KEY_UNIT, DEFAULT_UNIT)); + } + + public MockSink(Properties properties, MetricRegistry metricRegistry) { + logger.info("Current properties: {}", properties); + this.properties = properties; + this.metricRegistry = metricRegistry; + } + + @Override + public void start() { + logger.info("Mock sink started"); + } + + @Override + public void stop() { + logger.info("Mock sink stopped"); + } + + @Override + public void report() { + logger.info("Mock sink reported"); + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptorTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptorTest.java new file mode 100644 index 00000000..0f1dae98 --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptorTest.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics.source; + +import com.codahale.metrics.Meter; +import com.codahale.metrics.Metric; +import io.fabric8.kubernetes.api.model.ConfigMap; +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient; +import io.fabric8.kubernetes.client.server.mock.KubernetesMockServer; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.client.KubernetesClientFactory; +import org.apache.spark.kubernetes.operator.metrics.MetricsSystem; +import org.apache.spark.kubernetes.operator.metrics.MetricsSystemFactory; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; +import org.apache.spark.metrics.source.Source; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; + +import javax.validation.constraints.NotNull; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertThrows; + +@EnableKubernetesMockClient(crud = true) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +@SuppressWarnings("PMD") +class KubernetesMetricsInterceptorTest { + + @NotNull + KubernetesMockServer mockServer; + @NotNull + KubernetesClient kubernetesClient; + + @AfterEach + void cleanUp() { + mockServer.reset(); + } + + @Test + @Order(1) + void testMetricsEnabled() { + MetricsSystem metricsSystem = MetricsSystemFactory.createMetricsSystem(); + KubernetesClient client = KubernetesClientFactory.buildKubernetesClient(metricsSystem, + kubernetesClient.getConfiguration()); + var sparkApplication = createSparkApplication(); + var configMap = createConfigMap(); + Source source = metricsSystem.getSources().get(0); + Map metrics = new HashMap<>(source.metricRegistry().getMetrics()); + Assertions.assertEquals(9, metrics.size()); + client.resource(sparkApplication).create(); + client.resource(configMap).get(); + Map metrics2 = new HashMap<>(source.metricRegistry().getMetrics()); + Assertions.assertEquals(17, metrics2.size()); + List expectedMetricsName = + Arrays.asList("http.response.201", "http.request.post", "sparkapplications.post", + "spark-test.sparkapplications.post", "spark-test.sparkapplications.post", + "configmaps.get", + "spark-system.configmaps.get", "2xx", "4xx"); + expectedMetricsName.stream().forEach(name -> { + Meter metric = (Meter) metrics2.get(name); + Assertions.assertEquals(metric.getCount(), 1); + }); + client.resource(sparkApplication).delete(); + } + + @Test + @Order(2) + void testWhenKubernetesServerNotWorking() { + MetricsSystem metricsSystem = MetricsSystemFactory.createMetricsSystem(); + KubernetesClient client = KubernetesClientFactory.buildKubernetesClient(metricsSystem, + kubernetesClient.getConfiguration()); + int retry = client.getConfiguration().getRequestRetryBackoffLimit(); + mockServer.shutdown(); + var sparkApplication = createSparkApplication(); + assertThrows(Exception.class, () -> { + client.resource(sparkApplication).create(); + }); + Source source = metricsSystem.getSources().get(0); + Map map = source.metricRegistry().getMetrics(); + Assertions.assertEquals(21, map.size()); + Meter metric = (Meter) map.get("failed"); + Assertions.assertEquals(metric.getCount(), retry + 1); + } + + private static SparkApplication createSparkApplication() { + ObjectMeta meta = new ObjectMeta(); + meta.setName("sample-spark-application"); + meta.setNamespace("spark-test"); + var sparkApplication = new SparkApplication(); + sparkApplication.setMetadata(meta); + ApplicationSpec applicationSpec = new ApplicationSpec(); + applicationSpec.setMainClass("org.apache.spark.examples.SparkPi"); + applicationSpec.setJars("local:///opt/spark/examples/jars/spark-examples.jar"); + applicationSpec.setSparkConf(Map.of( + "spark.executor.instances", "5", + "spark.kubernetes.container.image", "spark", + "spark.kubernetes.namespace", "spark-test", + "spark.kubernetes.authenticate.driver.serviceAccountName", "spark" + )); + sparkApplication.setSpec(applicationSpec); + return sparkApplication; + } + + private static ConfigMap createConfigMap() { + ObjectMeta meta = new ObjectMeta(); + meta.setName("spark-job-operator-configuration"); + meta.setNamespace("spark-system"); + var configMap = new ConfigMap(); + configMap.setMetadata(meta); + return configMap; + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java new file mode 100644 index 00000000..eb3ab541 --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.metrics.source; + +import com.codahale.metrics.Metric; +import io.fabric8.kubernetes.api.model.ConfigMap; +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; +import io.javaoperatorsdk.operator.api.monitoring.Metrics; +import io.javaoperatorsdk.operator.api.reconciler.Constants; +import io.javaoperatorsdk.operator.processing.GroupVersionKind; +import io.javaoperatorsdk.operator.processing.event.Event; +import io.javaoperatorsdk.operator.processing.event.ResourceID; +import io.javaoperatorsdk.operator.processing.event.source.controller.ResourceAction; +import io.javaoperatorsdk.operator.processing.event.source.controller.ResourceEvent; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconciler; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.Map; + +class OperatorJosdkMetricsTest { + public static final String DEFAULT_NAMESPACE = "default"; + public static final String TEST_RESOURCE_NAME = "test1"; + private static final ResourceID resourceId = new ResourceID("spark-pi", "testns"); + + private static final Map metadata = + Map.of(Constants.RESOURCE_GVK_KEY, GroupVersionKind.gvkFor(SparkApplication.class), + Constants.CONTROLLER_NAME, "test-controller-name"); + private static final String controllerName = SparkApplicationReconciler.class.getSimpleName(); + + private OperatorJosdkMetrics operatorMetrics; + + @BeforeEach + public void setup() { + operatorMetrics = + new OperatorJosdkMetrics(); + } + + @Test + void testTimeControllerExecution() throws Exception { + var successExecution = new TestingExecutionBase<>(); + operatorMetrics.timeControllerExecution(successExecution); + Map metrics = operatorMetrics.metricRegistry().getMetrics(); + Assertions.assertEquals(4, metrics.size()); + Assertions.assertTrue( + metrics.containsKey("sparkapplication.sparkapplicationreconciler.reconcile.both")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.testns.sparkapplicationreconciler.reconcile.both")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.sparkapplicationreconciler.reconcile.success.both")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.testns.sparkapplicationreconciler.reconcile.success.both")); + + var failedExecution = new FooTestingExecutionBase<>(); + try { + operatorMetrics.timeControllerExecution(failedExecution); + } catch (Exception e) { + Assertions.assertEquals(e.getMessage(), "Foo exception"); + Assertions.assertEquals(8, metrics.size()); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.sparkapplicationreconciler.reconcile.failure")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.sparkapplicationreconciler.reconcile.failure.exception" + + ".nosuchfieldexception")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.testns.sparkapplicationreconciler.reconcile.failure")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.testns.sparkapplicationreconciler.reconcile.failure." + + "exception.nosuchfieldexception")); + } + } + + @Test + void testReconciliationFinished() { + operatorMetrics.finishedReconciliation(buildNamespacedResource(), metadata); + Map metrics = operatorMetrics.metricRegistry().getMetrics(); + Assertions.assertEquals(2, metrics.size()); + Assertions.assertTrue(metrics.containsKey("configmap.default.reconciliation.finished")); + Assertions.assertTrue(metrics.containsKey("configmap.reconciliation.finished")); + } + + @Test + void testReconciliationExecutionStartedAndFinished() { + operatorMetrics.reconciliationExecutionStarted(buildNamespacedResource(), metadata); + Map metrics = operatorMetrics.metricRegistry().getMetrics(); + Assertions.assertEquals(2, metrics.size()); + Assertions.assertTrue( + metrics.containsKey("configmap.test-controller-name.reconciliations.executions")); + Assertions.assertTrue(metrics.containsKey( + "configmap.default.test-controller-name.reconciliations.executions")); + operatorMetrics.reconciliationExecutionFinished(buildNamespacedResource(), metadata); + Assertions.assertEquals(3, metrics.size()); + Assertions.assertTrue( + metrics.containsKey("configmap.test-controller-name.reconciliations.queue.size")); + } + + @Test + void testReceivedEvent() { + Event event = + new ResourceEvent(ResourceAction.ADDED, resourceId, buildNamespacedResource()); + operatorMetrics.receivedEvent(event, metadata); + Map metrics = operatorMetrics.metricRegistry().getMetrics(); + Assertions.assertEquals(2, metrics.size()); + Assertions.assertTrue(metrics.containsKey("sparkapplication.added.resource.event")); + Assertions.assertTrue(metrics.containsKey("sparkapplication.testns.added.resource.event")); + } + + private static class TestingExecutionBase implements Metrics.ControllerExecution { + @Override + public String controllerName() { + return controllerName; + } + + @Override + public String successTypeName(Object o) { + return "both"; + } + + @Override + public ResourceID resourceID() { + return resourceId; + } + + @Override + public Map metadata() { + return metadata; + } + + @Override + public String name() { + return "reconcile"; + } + + @Override + public T execute() throws Exception { + Thread.sleep(1000); + return null; + } + } + + private static class FooTestingExecutionBase implements Metrics.ControllerExecution { + @Override + public String controllerName() { + return controllerName; + } + + @Override + public String successTypeName(Object o) { + return "resource"; + } + + @Override + public ResourceID resourceID() { + return resourceId; + } + + @Override + public Map metadata() { + return metadata; + } + + @Override + public String name() { + return "reconcile"; + } + + @Override + public T execute() throws Exception { + throw new NoSuchFieldException("Foo exception"); + } + } + + private HasMetadata buildNamespacedResource() { + var cm = new ConfigMap(); + cm.setMetadata(new ObjectMetaBuilder() + .withName(TEST_RESOURCE_NAME) + .withNamespace(DEFAULT_NAMESPACE) + .build()); + return cm; + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/HealthProbeTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/HealthProbeTest.java new file mode 100644 index 00000000..5c517634 --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/HealthProbeTest.java @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.probe; + +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient; +import io.javaoperatorsdk.operator.Operator; +import io.javaoperatorsdk.operator.RuntimeInfo; +import io.javaoperatorsdk.operator.api.config.ResourceConfiguration; +import io.javaoperatorsdk.operator.health.InformerHealthIndicator; +import io.javaoperatorsdk.operator.health.InformerWrappingEventSourceHealthIndicator; +import io.javaoperatorsdk.operator.health.Status; +import org.apache.spark.kubernetes.operator.health.SentinelManager; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.BeforeEach; + +import javax.validation.constraints.NotNull; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@EnableKubernetesMockClient(crud = true) +class HealthProbeTest { + public static Operator operator; + public static Operator sparkConfMonitor; + public static List operators; + @NotNull + KubernetesClient kubernetesClient; + private AtomicBoolean isRunning; + private AtomicBoolean isRunning2; + private Map> + unhealthyEventSources = new HashMap<>(); + private Map> + unhealthyEventSources2 = new HashMap<>(); + + @BeforeAll + public static void beforeAll() { + operator = mock(Operator.class); + sparkConfMonitor = mock(Operator.class); + operators = Arrays.asList(operator, sparkConfMonitor); + } + + @BeforeEach + public void beforeEach() { + isRunning = new AtomicBoolean(false); + isRunning2 = new AtomicBoolean(false); + var runtimeInfo = + new RuntimeInfo(new Operator( + overrider -> overrider.withKubernetesClient(kubernetesClient))) { + @Override + public boolean isStarted() { + return isRunning.get(); + } + + @Override + public Map> + unhealthyInformerWrappingEventSourceHealthIndicator() { + return unhealthyEventSources; + } + }; + + var runtimeInfo2 = + new RuntimeInfo(new Operator( + overrider -> overrider.withKubernetesClient(kubernetesClient))) { + @Override + public boolean isStarted() { + return isRunning2.get(); + } + + @Override + public Map> + unhealthyInformerWrappingEventSourceHealthIndicator() { + return unhealthyEventSources2; + } + }; + + when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); + when(sparkConfMonitor.getRuntimeInfo()).thenReturn(runtimeInfo2); + } + + @Test + void testHealthProbeWithInformerHealthWithMultiOperators() { + var healthyProbe = new HealthProbe(operators); + isRunning.set(true); + assertFalse(healthyProbe.isHealthy(), + "Healthy Probe should fail when the spark conf monitor operator is not running"); + isRunning2.set(true); + assertTrue(healthyProbe.isHealthy(), + "Healthy Probe should pass when both operators are running"); + + unhealthyEventSources2.put( + "c1", Map.of("e1", informerHealthIndicator(Map.of("i1", Status.UNHEALTHY)))); + assertFalse(healthyProbe.isHealthy(), + "Healthy Probe should fail when monitor's informer health is not healthy"); + unhealthyEventSources2.clear(); + assertTrue(healthyProbe.isHealthy(), "Healthy Probe should pass"); + } + + @Test + void testHealthProbeWithInformerHealthWithSingleOperator() { + var healthyProbe = new HealthProbe(Arrays.asList(operator)); + assertFalse(healthyProbe.isHealthy(), + "Health Probe should fail when operator is not running"); + isRunning.set(true); + unhealthyEventSources.put( + "c1", Map.of("e1", informerHealthIndicator(Map.of("i1", Status.UNHEALTHY)))); + assertFalse(healthyProbe.isHealthy(), + "Healthy Probe should fail when informer health is not healthy"); + unhealthyEventSources.clear(); + assertTrue(healthyProbe.isHealthy(), "Healthy Probe should pass"); + } + + @Test + void testHealthProbeWithSentinelHealthWithMultiOperators() { + var healthyProbe = new HealthProbe(operators); + SentinelManager sentinelManager = mock(SentinelManager.class); + healthyProbe.registerSentinelResourceManager(sentinelManager); + isRunning.set(true); + isRunning2.set(true); + when(sentinelManager.allSentinelsAreHealthy()).thenReturn(false); + assertFalse(healthyProbe.isHealthy(), + "Healthy Probe should fail when sentinels report failures"); + + when(sentinelManager.allSentinelsAreHealthy()).thenReturn(true); + assertTrue(healthyProbe.isHealthy(), "Healthy Probe should pass"); + } + + private static InformerWrappingEventSourceHealthIndicator informerHealthIndicator( + Map informerStatuses) { + Map informers = new HashMap<>(); + informerStatuses.forEach( + (n, s) -> + informers.put( + n, + new InformerHealthIndicator() { + @Override + public boolean hasSynced() { + return false; + } + + @Override + public boolean isWatching() { + return false; + } + + @Override + public boolean isRunning() { + return false; + } + + @Override + public Status getStatus() { + return s; + } + + @Override + public String getTargetNamespace() { + return null; + } + })); + + return new InformerWrappingEventSourceHealthIndicator() { + @Override + public Map informerHealthIndicators() { + return informers; + } + + @Override + public ResourceConfiguration getInformerConfiguration() { + return null; + } + }; + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java new file mode 100644 index 00000000..c1c3e30b --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.probe; + +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient; +import io.javaoperatorsdk.operator.Operator; +import io.javaoperatorsdk.operator.RuntimeInfo; +import org.apache.spark.kubernetes.operator.health.SentinelManager; +import org.junit.jupiter.api.Test; + +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Arrays; +import java.util.HashMap; + +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorProbePort; +import static org.apache.spark.kubernetes.operator.probe.ProbeService.HEALTHZ; +import static org.apache.spark.kubernetes.operator.probe.ProbeService.STARTUP; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@SuppressWarnings("PMD.JUnitTestsShouldIncludeAssert") +@EnableKubernetesMockClient +class ProbeServiceTest { + @Test + void testHealthProbeEndpointWithStaticProperties() throws Exception { + Operator operator = mock(Operator.class); + RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); + when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); + when(runtimeInfo.isStarted()).thenReturn(true).thenReturn(true); + SentinelManager sentinelManager = mock(SentinelManager.class); + when(runtimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( + new HashMap<>()); + when(sentinelManager.allSentinelsAreHealthy()).thenReturn(true); + ProbeService probeService = new ProbeService(Arrays.asList(operator), sentinelManager); + probeService.start(); + hitHealthyEndpoint(); + probeService.stop(); + } + + @Test + void testHealthProbeEndpointWithDynamicProperties() throws Exception { + Operator operator = mock(Operator.class); + Operator operator1 = mock(Operator.class); + RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); + RuntimeInfo runtimeInfo1 = mock(RuntimeInfo.class); + when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); + when(operator1.getRuntimeInfo()).thenReturn(runtimeInfo1); + + when(runtimeInfo.isStarted()).thenReturn(true).thenReturn(true); + when(runtimeInfo1.isStarted()).thenReturn(true).thenReturn(true); + + SentinelManager sentinelManager = mock(SentinelManager.class); + when(runtimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( + new HashMap<>()); + when(runtimeInfo1.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( + new HashMap<>()); + when(sentinelManager.allSentinelsAreHealthy()).thenReturn(true); + ProbeService probeService = + new ProbeService(Arrays.asList(operator, operator1), sentinelManager); + probeService.start(); + hitHealthyEndpoint(); + probeService.stop(); + } + + @Test + void testReadinessProbeEndpointWithDynamicProperties() throws Exception { + Operator operator = mock(Operator.class); + Operator operator1 = mock(Operator.class); + RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); + RuntimeInfo runtimeInfo1 = mock(RuntimeInfo.class); + when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); + when(operator1.getRuntimeInfo()).thenReturn(runtimeInfo1); + + when(runtimeInfo.isStarted()).thenReturn(true).thenReturn(true); + when(runtimeInfo1.isStarted()).thenReturn(true).thenReturn(true); + + SentinelManager sentinelManager = mock(SentinelManager.class); + KubernetesClient client = mock(KubernetesClient.class); + when(runtimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( + new HashMap<>()); + when(runtimeInfo1.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( + new HashMap<>()); + when(operator1.getKubernetesClient()).thenReturn(client); + ProbeService probeService = + new ProbeService(Arrays.asList(operator, operator1), sentinelManager); + probeService.start(); + hitStartedUpEndpoint(); + probeService.stop(); + } + + private void hitHealthyEndpoint() throws Exception { + URL u = new URL("http://localhost:" + OperatorProbePort.getValue() + HEALTHZ); + HttpURLConnection connection = (HttpURLConnection) u.openConnection(); + connection.setConnectTimeout(100000); + connection.connect(); + assertEquals(connection.getResponseCode(), 200, "Health Probe should return 200"); + } + + private void hitStartedUpEndpoint() throws Exception { + URL u = new URL("http://localhost:" + OperatorProbePort.getValue() + STARTUP); + HttpURLConnection connection = (HttpURLConnection) u.openConnection(); + connection.setConnectTimeout(100000); + connection.connect(); + assertEquals(connection.getResponseCode(), 200, "operators are not ready"); + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbeTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbeTest.java new file mode 100644 index 00000000..c03d9e2e --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbeTest.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.probe; + +import io.fabric8.kubernetes.client.KubernetesClient; +import io.javaoperatorsdk.operator.Operator; +import io.javaoperatorsdk.operator.RuntimeInfo; +import org.apache.spark.kubernetes.operator.utils.ProbeUtil; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.sun.net.httpserver.HttpExchange; +import org.mockito.MockedStatic; +import org.mockito.Mockito; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; + +@SuppressWarnings("PMD.JUnitTestsShouldIncludeAssert") +class ReadinessProbeTest { + KubernetesClient client; + HttpExchange httpExchange; + + @BeforeEach + public void beforeEach() { + OutputStream outputStream = mock(OutputStream.class); + httpExchange = mock(HttpExchange.class); + client = mock(KubernetesClient.class); + when(httpExchange.getResponseBody()).thenReturn(outputStream); + } + + @Test + void testHandleSucceed() throws IOException { + Operator operator = mock(Operator.class); + Operator sparkConfMonitor = mock(Operator.class); + RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); + RuntimeInfo sparkConfMonitorRuntimeInfo = mock(RuntimeInfo.class); + when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); + when(runtimeInfo.isStarted()).thenReturn(true); + when(sparkConfMonitor.getRuntimeInfo()).thenReturn(sparkConfMonitorRuntimeInfo); + when(sparkConfMonitorRuntimeInfo.isStarted()).thenReturn(true); + when(sparkConfMonitor.getKubernetesClient()).thenReturn(client); + ReadinessProbe readinessProbe = new ReadinessProbe(Arrays.asList(operator)); + try (MockedStatic mockedStatic = Mockito.mockStatic(ProbeUtil.class)) { + readinessProbe.handle(httpExchange); + mockedStatic.verify(() -> ProbeUtil.sendMessage(httpExchange, 200, "started")); + } + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java new file mode 100644 index 00000000..2426df1d --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.client.KubernetesClient; +import org.apache.spark.kubernetes.operator.ApplicationClientWorker; +import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.MockedStatic; +import org.mockito.Mockito; + +import java.time.Instant; +import java.util.Map; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +class SparkApplicationReconcileUtilsTest { + + @Test + void testForceDeleteEnabled() { + SparkApplication app = new SparkApplication(); + app.getStatus().getCurrentState().setLastTransitionTime( + Instant.now().minusSeconds(5).toString()); + app.getSpec().getApplicationTolerations().getApplicationTimeoutConfig() + .setForceTerminationGracePeriodMillis(3000L); + Assertions.assertTrue(SparkApplicationReconcileUtils.enableForceDelete(app)); + } + @Test + void testBuildResourceSpecCoversBasicOverride() { + SparkApplication app = new SparkApplication(); + app.setMetadata(new ObjectMetaBuilder() + .withNamespace("foo") + .withName("bar-app") + .withUid("uid") + .build()); + KubernetesClient mockClient = mock(KubernetesClient.class); + Pod mockDriver = mock(Pod.class); + when(mockDriver.getMetadata()).thenReturn(new ObjectMeta()); + try (MockedStatic worker = + Mockito.mockStatic(ApplicationClientWorker.class)) { + ApplicationResourceSpec mockSpec = mock(ApplicationResourceSpec.class); + when(mockSpec.getConfiguredPod()).thenReturn(mockDriver); + ArgumentCaptor> captor = ArgumentCaptor.forClass(Map.class); + worker.when(() -> ApplicationClientWorker.getResourceSpec( + any(), any(), captor.capture())).thenReturn(mockSpec); + ApplicationResourceSpec spec = SparkApplicationReconcileUtils.buildResourceSpec(app, + mockClient); + worker.verify(() -> ApplicationClientWorker.getResourceSpec(eq(app), + eq(mockClient), any())); + Map props = captor.getValue(); + Assertions.assertTrue(props.containsKey("spark.kubernetes.namespace")); + Assertions.assertEquals("foo", props.get("spark.kubernetes.namespace")); + ArgumentCaptor metaArgumentCaptor = + ArgumentCaptor.forClass(ObjectMeta.class); + verify(mockDriver).setMetadata(metaArgumentCaptor.capture()); + Assertions.assertEquals(mockSpec, spec); + ObjectMeta metaOverride = metaArgumentCaptor.getValue(); + Assertions.assertEquals(1, metaOverride.getOwnerReferences().size()); + Assertions.assertEquals("bar-app", + metaOverride.getOwnerReferences().get(0).getName()); + Assertions.assertEquals("uid", + metaOverride.getOwnerReferences().get(0).getUid()); + Assertions.assertEquals(app.getKind(), + metaOverride.getOwnerReferences().get(0).getKind()); + } + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java new file mode 100644 index 00000000..a22a06d8 --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator.reconciler; + +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.javaoperatorsdk.operator.api.reconciler.Context; +import io.javaoperatorsdk.operator.api.reconciler.DeleteControl; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.health.SentinelManager; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; +import org.mockito.Mockito; + +import java.util.Collections; +import java.util.Optional; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockConstruction; +import static org.mockito.Mockito.when; + +class SparkApplicationReconcilerTest { + private StatusRecorder mockRecorder = mock(StatusRecorder.class); + private SentinelManager mockSentinelManager = mock(SentinelManager.class); + private KubernetesClient mockClient = mock(KubernetesClient.class); + private Context mockContext = mock(Context.class); + private Pod mockDriver = mock(Pod.class); + SparkApplication app = new SparkApplication(); + SparkApplicationReconciler reconciler = new SparkApplicationReconciler(mockRecorder, + mockSentinelManager); + + @BeforeEach + public void beforeEach() { + when(mockContext.getClient()).thenReturn(mockClient); + doNothing().when(mockRecorder).removeCachedStatus(any(SparkApplication.class)); + doAnswer(invocation -> { + app.setStatus(invocation.getArgument(1)); + return null; + }).when(mockRecorder).persistStatus(any(SparkApplicationContext.class), + any(ApplicationStatus.class)); + } + + @Test + void testCleanupRunningApp() { + try (MockedConstruction mockAppContext = mockConstruction( + SparkApplicationContext.class, (mock, context) -> { + when(mock.getSparkApplication()).thenReturn(app); + when(mock.getClient()).thenReturn(mockClient); + when(mock.getDriverPod()).thenReturn(Optional.of(mockDriver)); + when(mock.getDriverPodSpec()).thenReturn(mockDriver); + when(mock.getDriverPreResourcesSpec()).thenReturn(Collections.emptyList()); + when(mock.getDriverResourcesSpec()).thenReturn(Collections.emptyList()); + }); MockedStatic utils = + Mockito.mockStatic(SparkReconcilerUtils.class)) { + // delete running app + app.setStatus(app.getStatus().appendNewState(new ApplicationState( + ApplicationStateSummary.RUNNING_HEALTHY, ""))); + DeleteControl deleteControl = reconciler.cleanup(app, mockContext); + Assertions.assertFalse(deleteControl.isRemoveFinalizer()); + utils.verify(() -> SparkReconcilerUtils.deleteResourceIfExists(mockClient, + mockDriver, false)); + Assertions.assertEquals(ApplicationStateSummary.RESOURCE_RELEASED, + app.getStatus().getCurrentState().getCurrentStateSummary()); + + // proceed delete for terminated app + deleteControl = reconciler.cleanup(app, mockContext); + Assertions.assertTrue(deleteControl.isRemoveFinalizer()); + } + } +} diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java new file mode 100644 index 00000000..f0076708 --- /dev/null +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.kubernetes.operator.utils; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import org.apache.spark.kubernetes.operator.SparkApplication; + +import java.io.File; +import java.util.Map; + +import static org.apache.spark.kubernetes.operator.Constants.SENTINEL_LABEL; + +public class TestUtils { + public static SparkApplication createMockDeployment(String namespace) { + var cr = new SparkApplication(); + cr.setKind("org.apache.spark/v1alpha1"); + cr.setApiVersion("SparkApplication"); + cr.setSpec(cr.initSpec()); + var meta = new ObjectMeta(); + meta.setGeneration(0L); + meta.setLabels(Map.of(SENTINEL_LABEL, "true")); + meta.setName("sentinel"); + meta.setNamespace(namespace); + cr.setMetadata(meta); + return cr; + } + + public static void cleanPropertiesFile(String filePath) { + File myObj = new File(filePath); + if (!myObj.delete()) { + throw new RuntimeException("Failed to clean properties file: " + filePath); + } + } + + public static boolean notTimedOut(long startTime, long maxWaitTimeInMills) { + long elapsedTimeInMills = calculateElapsedTimeInMills(startTime); + if (elapsedTimeInMills >= maxWaitTimeInMills) { + return false; + } + return true; + } + + public static long calculateElapsedTimeInMills(long startTime) { + return System.currentTimeMillis() - startTime; + } +} diff --git a/spark-operator/src/test/resources/spark-operator.properties b/spark-operator/src/test/resources/spark-operator.properties new file mode 100644 index 00000000..f7456f58 --- /dev/null +++ b/spark-operator/src/test/resources/spark-operator.properties @@ -0,0 +1,18 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +spark.operator.foo=bar \ No newline at end of file diff --git a/spark-submission-worker/.gitignore b/spark-submission-worker/.gitignore new file mode 100644 index 00000000..b63da455 --- /dev/null +++ b/spark-submission-worker/.gitignore @@ -0,0 +1,42 @@ +.gradle +build/ +!gradle/wrapper/gradle-wrapper.jar +!**/src/main/**/build/ +!**/src/test/**/build/ + +### IntelliJ IDEA ### +.idea/modules.xml +.idea/jarRepositories.xml +.idea/compiler.xml +.idea/libraries/ +*.iws +*.iml +*.ipr +out/ +!**/src/main/**/out/ +!**/src/test/**/out/ + +### Eclipse ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache +bin/ +!**/src/main/**/bin/ +!**/src/test/**/bin/ + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ + +### VS Code ### +.vscode/ + +### Mac OS ### +.DS_Store \ No newline at end of file diff --git a/spark-submission-worker/build.gradle b/spark-submission-worker/build.gradle new file mode 100644 index 00000000..d3860117 --- /dev/null +++ b/spark-submission-worker/build.gradle @@ -0,0 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +dependencies { + implementation project(":spark-operator-api") + + implementation("org.apache.spark:spark-kubernetes_$sparkScalaVersion:$sparkVersion") { + exclude group: "org.apache.logging.log4j" + exclude group: "org.slf4j" + } + + compileOnly("org.projectlombok:lombok:$lombokVersion") + annotationProcessor("org.projectlombok:lombok:$lombokVersion") + + // logging + implementation("org.apache.logging.log4j:log4j-layout-template-json:$log4jLayoutVersion") + implementation("org.apache.logging.log4j:log4j-api:$log4jVersion") + implementation("org.apache.logging.log4j:log4j-core:$log4jVersion") + implementation("org.apache.logging.log4j:log4j-slf4j-impl:$log4jVersion") + implementation("org.apache.logging.log4j:log4j-1.2-api:$log4jVersion") + + testImplementation platform("org.junit:junit-bom:$junitVersion") + testImplementation "org.mockito:mockito-core:$mockitoVersion" + testImplementation "org.junit.jupiter:junit-jupiter:$junitVersion" + testImplementation "io.fabric8:kubernetes-server-mock:$fabric8Version" +} + +test { + useJUnitPlatform() +} diff --git a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java new file mode 100644 index 00000000..4d89f439 --- /dev/null +++ b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +import io.fabric8.kubernetes.client.KubernetesClient; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.spark.SparkConf; +import org.apache.spark.deploy.k8s.KubernetesDriverSpec; +import org.apache.spark.deploy.k8s.submit.JavaMainAppResource; +import org.apache.spark.deploy.k8s.submit.KubernetesDriverBuilder; +import org.apache.spark.deploy.k8s.submit.MainAppResource; +import org.apache.spark.deploy.k8s.submit.PythonMainAppResource; +import org.apache.spark.deploy.k8s.submit.RMainAppResource; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; +import scala.Option; + +import java.util.Map; + +/** + * Similar to org.apache.spark.deploy.k8s.submit.KubernetesClientApplication + * this reads args from SparkApplication instead of starting separate spark-submit process + */ +public class ApplicationClientWorker { + + public static ApplicationResourceSpec getResourceSpec( + org.apache.spark.kubernetes.operator.SparkApplication app, + KubernetesClient client, + Map confOverrides) { + ApplicationDriverConf applicationDriverConf = buildDriverConf(app, confOverrides); + return buildResourceSpec(applicationDriverConf, client); + } + + protected static ApplicationDriverConf buildDriverConf( + org.apache.spark.kubernetes.operator.SparkApplication app, + Map confOverrides) { + ApplicationSpec applicationSpec = app.getSpec(); + SparkConf effectiveSparkConf = new SparkConf(); + if (MapUtils.isNotEmpty(applicationSpec.getSparkConf())) { + for (String confKey : applicationSpec.getSparkConf().keySet()) { + effectiveSparkConf.set(confKey, applicationSpec.getSparkConf().get(confKey)); + } + } + if (MapUtils.isNotEmpty(confOverrides)) { + for (Map.Entry entry : confOverrides.entrySet()) { + effectiveSparkConf.set(entry.getKey(), entry.getValue()); + } + } + effectiveSparkConf.set("spark.kubernetes.namespace", app.getMetadata().getNamespace()); + MainAppResource primaryResource = new JavaMainAppResource(Option.empty()); + if (StringUtils.isNotEmpty(applicationSpec.getJars())) { + primaryResource = new JavaMainAppResource(Option.apply(applicationSpec.getJars())); + effectiveSparkConf.setIfMissing("spark.jars", applicationSpec.getJars()); + } else if (StringUtils.isNotEmpty(applicationSpec.getPyFiles())) { + primaryResource = new PythonMainAppResource(applicationSpec.getPyFiles()); + effectiveSparkConf.setIfMissing("spark.submit.pyFiles", applicationSpec.getPyFiles()); + } else if (StringUtils.isNotEmpty(applicationSpec.getSparkRFiles())) { + primaryResource = new RMainAppResource(applicationSpec.getSparkRFiles()); + } + effectiveSparkConf.setMaster( + "k8s://https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT"); + return ApplicationDriverConf.create(effectiveSparkConf, + createSparkAppId(app), + primaryResource, + applicationSpec.getMainClass(), + applicationSpec.getDriverArgs().toArray(new String[0]), + Option.apply(applicationSpec.getProxyUser())); + } + + protected static ApplicationResourceSpec buildResourceSpec( + ApplicationDriverConf kubernetesDriverConf, + KubernetesClient client) { + KubernetesDriverBuilder builder = new KubernetesDriverBuilder(); + KubernetesDriverSpec kubernetesDriverSpec = + builder.buildFromFeatures(kubernetesDriverConf, client); + return new ApplicationResourceSpec(kubernetesDriverConf, kubernetesDriverSpec); + } + + /** + * Spark application id need to be deterministic per attempt per Spark App. + * This is to ensure operator reconciliation idempotency + */ + protected static String createSparkAppId( + final org.apache.spark.kubernetes.operator.SparkApplication app) { + long attemptId = 0L; + if (app.getStatus() != null && app.getStatus().getCurrentAttemptSummary() != null) { + attemptId = app.getStatus().getCurrentAttemptSummary().getAttemptInfo().getId(); + } + return String.format("%s-%d", app.getMetadata().getName(), attemptId); + } +} diff --git a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java new file mode 100644 index 00000000..92f5ea3f --- /dev/null +++ b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +import org.apache.spark.SparkConf; +import org.apache.spark.deploy.k8s.Config; +import org.apache.spark.deploy.k8s.KubernetesDriverConf; +import org.apache.spark.deploy.k8s.KubernetesVolumeUtils; +import org.apache.spark.deploy.k8s.submit.KubernetesClientUtils; +import org.apache.spark.deploy.k8s.submit.MainAppResource; +import scala.Option; + +public class ApplicationDriverConf extends KubernetesDriverConf { + private ApplicationDriverConf(SparkConf sparkConf, + String appId, + MainAppResource mainAppResource, + String mainClass, + String[] appArgs, + Option proxyUser) { + super(sparkConf, appId, mainAppResource, mainClass, appArgs, proxyUser); + } + + public static ApplicationDriverConf create(SparkConf sparkConf, + String appId, + MainAppResource mainAppResource, + String mainClass, + String[] appArgs, + Option proxyUser) { + // pre-create check only + KubernetesVolumeUtils.parseVolumesWithPrefix(sparkConf, + Config.KUBERNETES_EXECUTOR_VOLUMES_PREFIX()); + return new ApplicationDriverConf(sparkConf, appId, mainAppResource, mainClass, appArgs, + proxyUser); + } + + /** + * Application managed by operator has a deterministic prefix + */ + @Override + public String resourceNamePrefix() { + return sparkConf().getOption(Config.KUBERNETES_DRIVER_POD_NAME_PREFIX().key()).isEmpty() + ? appId() : sparkConf().get(Config.KUBERNETES_DRIVER_POD_NAME_PREFIX().key()); + } + + public String configMapNameDriver() { + return KubernetesClientUtils.configMapName( + String.format("spark-drv-%s", resourceNamePrefix())); + } +} diff --git a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java new file mode 100644 index 00000000..7c2a6a93 --- /dev/null +++ b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +import io.fabric8.kubernetes.api.model.Container; +import io.fabric8.kubernetes.api.model.ContainerBuilder; +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.api.model.PodBuilder; +import lombok.Getter; +import org.apache.spark.deploy.k8s.Config; +import org.apache.spark.deploy.k8s.Constants; +import org.apache.spark.deploy.k8s.KubernetesDriverSpec; +import org.apache.spark.deploy.k8s.SparkPod; +import org.apache.spark.deploy.k8s.submit.KubernetesClientUtils; +import scala.Tuple2; +import scala.collection.JavaConverters; +import scala.collection.immutable.HashMap; +import scala.collection.immutable.Map; + +import java.util.ArrayList; +import java.util.List; + +/** + * Resembles resources that would be directly launched by operator. + * Operator would later create + * This includes below task: + * + Add ConfigMap as a pre-resource for driver + * + Converts scala types to Java for easier reference + *

+ * This is not thread safe + */ +public class ApplicationResourceSpec { + @Getter + private final Pod configuredPod; + @Getter + private final List driverPreResources; + @Getter + private final List driverResources; + private final ApplicationDriverConf kubernetesDriverConf; + + public ApplicationResourceSpec(ApplicationDriverConf kubernetesDriverConf, + KubernetesDriverSpec kubernetesDriverSpec) { + this.kubernetesDriverConf = kubernetesDriverConf; + String namespace = + kubernetesDriverConf.sparkConf().get(Config.KUBERNETES_NAMESPACE().key()); + Map confFilesMap = KubernetesClientUtils.buildSparkConfDirFilesMap( + kubernetesDriverConf.configMapNameDriver(), + kubernetesDriverConf.sparkConf(), kubernetesDriverSpec.systemProperties()) + .$plus(new Tuple2<>(Config.KUBERNETES_NAMESPACE().key(), namespace)); + SparkPod sparkPod = addConfigMap(kubernetesDriverSpec.pod(), confFilesMap); + this.configuredPod = new PodBuilder(sparkPod.pod()) + .editSpec() + .addToContainers(sparkPod.container()) + .endSpec() + .build(); + this.driverPreResources = new ArrayList<>( + JavaConverters.seqAsJavaList(kubernetesDriverSpec.driverPreKubernetesResources())); + this.driverResources = new ArrayList<>( + JavaConverters.seqAsJavaList(kubernetesDriverSpec.driverKubernetesResources())); + this.driverResources.add( + KubernetesClientUtils.buildConfigMap(kubernetesDriverConf.configMapNameDriver(), + confFilesMap, new HashMap<>())); + this.driverPreResources.forEach(r -> r.getMetadata().setNamespace(namespace)); + this.driverResources.forEach(r -> r.getMetadata().setNamespace(namespace)); + } + + private SparkPod addConfigMap(SparkPod pod, Map confFilesMap) { + Container containerWithVolume = new ContainerBuilder(pod.container()) + .addNewEnv() + .withName(org.apache.spark.deploy.k8s.Constants.ENV_SPARK_CONF_DIR()) + .withValue(org.apache.spark.deploy.k8s.Constants.SPARK_CONF_DIR_INTERNAL()) + .endEnv() + .addNewVolumeMount() + .withName(org.apache.spark.deploy.k8s.Constants.SPARK_CONF_VOLUME_DRIVER()) + .withMountPath(org.apache.spark.deploy.k8s.Constants.SPARK_CONF_DIR_INTERNAL()) + .endVolumeMount() + .build(); + Pod podWithVolume = new PodBuilder(pod.pod()) + .editSpec() + .addNewVolume() + .withName(Constants.SPARK_CONF_VOLUME_DRIVER()) + .withNewConfigMap() + .withItems(JavaConverters.seqAsJavaList( + KubernetesClientUtils.buildKeyToPathObjects(confFilesMap))) + .withName(kubernetesDriverConf.configMapNameDriver()) + .endConfigMap() + .endVolume() + .endSpec() + .build(); + return new SparkPod(podWithVolume, containerWithVolume); + } +} diff --git a/spark-submission-worker/src/main/resources/EcsLayout.json b/spark-submission-worker/src/main/resources/EcsLayout.json new file mode 100644 index 00000000..8d215ab5 --- /dev/null +++ b/spark-submission-worker/src/main/resources/EcsLayout.json @@ -0,0 +1,49 @@ +{ + "@timestamp": { + "$resolver": "timestamp", + "pattern": { + "format": "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'", + "timeZone": "UTC" + } + }, + "ecs.version": "1.2.0", + "log.level": { + "$resolver": "level", + "field": "name" + }, + "message": { + "$resolver": "message", + "stringified": true + }, + "process.thread.name": { + "$resolver": "thread", + "field": "name" + }, + "log.logger": { + "$resolver": "logger", + "field": "name" + }, + "labels": { + "$resolver": "mdc", + "flatten": true, + "stringified": true + }, + "tags": { + "$resolver": "ndc" + }, + "error.type": { + "$resolver": "exception", + "field": "className" + }, + "error.message": { + "$resolver": "exception", + "field": "message" + }, + "error.stack_trace": { + "$resolver": "exception", + "field": "stackTrace", + "stackTrace": { + "stringified": true + } + } +} diff --git a/spark-submission-worker/src/main/resources/log4j2.properties b/spark-submission-worker/src/main/resources/log4j2.properties new file mode 100644 index 00000000..9285fa00 --- /dev/null +++ b/spark-submission-worker/src/main/resources/log4j2.properties @@ -0,0 +1,52 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +status=debug +strict=true +dest=out +name=PropertiesConfig +property.filename=/tmp/spark-operator +filter.threshold.type=ThresholdFilter +filter.threshold.level=debug +# console +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%d %p %X %C{1.} [%t] %m%n +appender.console.filter.threshold.type=ThresholdFilter +appender.console.filter.threshold.level=info +# rolling JSON +appender.rolling.type=RollingFile +appender.rolling.name=RollingFile +appender.rolling.append=true +appender.rolling.fileName=${filename}.log +appender.rolling.filePattern=${filename}-%i.log.gz +appender.rolling.layout.type=JsonTemplateLayout +appender.rolling.layout.eventTemplateUri=classpath:EcsLayout.json +appender.rolling.policies.type=Policies +appender.rolling.policies.size.type=SizeBasedTriggeringPolicy +appender.rolling.policies.size.size=100MB +appender.rolling.strategy.type=DefaultRolloverStrategy +appender.rolling.strategy.max=20 +appender.rolling.immediateFlush=true +# chatty loggers +rootLogger.level=all +logger.netty.name=io.netty +logger.netty.level=warn +log4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector +rootLogger.appenderRef.stdout.ref=STDOUT +rootLogger.appenderRef.rolling.ref=RollingFile diff --git a/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java new file mode 100644 index 00000000..95137fc5 --- /dev/null +++ b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; +import org.apache.spark.SparkConf; +import org.apache.spark.deploy.k8s.submit.JavaMainAppResource; +import org.apache.spark.deploy.k8s.submit.PythonMainAppResource; +import org.apache.spark.deploy.k8s.submit.RMainAppResource; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; +import org.apache.spark.kubernetes.operator.status.ApplicationAttemptSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.status.AttemptInfo; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.mockito.MockedConstruction; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockConstruction; +import static org.mockito.Mockito.when; + +class ApplicationClientWorkerTest { + @Test + void buildDriverConfShouldApplySpecAndPropertiesOverride() { + Map> constructorArgs = new HashMap<>(); + try (MockedConstruction mocked = mockConstruction( + ApplicationDriverConf.class, + (mock, context) -> constructorArgs.put(mock, + new ArrayList<>(context.arguments())))) { + SparkApplication mockApp = mock(SparkApplication.class); + ApplicationSpec mockSpec = mock(ApplicationSpec.class); + ObjectMeta appMeta = new ObjectMetaBuilder() + .withName("app1") + .withNamespace("ns1") + .build(); + Map appProps = new HashMap<>(); + appProps.put("foo", "bar"); + appProps.put("spark.executor.instances", "1"); + appProps.put("spark.kubernetes.namespace", "ns2"); + Map overrides = new HashMap<>(); + overrides.put("spark.executor.instances", "5"); + overrides.put("spark.kubernetes.namespace", "ns3"); + when(mockSpec.getSparkConf()).thenReturn(appProps); + when(mockApp.getSpec()).thenReturn(mockSpec); + when(mockApp.getMetadata()).thenReturn(appMeta); + when(mockSpec.getProxyUser()).thenReturn("foo-user"); + when(mockSpec.getMainClass()).thenReturn("foo-class"); + when(mockSpec.getDriverArgs()).thenReturn(List.of("a", "b")); + + ApplicationDriverConf conf = + ApplicationClientWorker.buildDriverConf(mockApp, overrides); + Assertions.assertEquals(6, constructorArgs.get(conf).size()); + + // validate SparkConf with override + Assertions.assertTrue(constructorArgs.get(conf).get(0) instanceof SparkConf); + SparkConf createdConf = (SparkConf) constructorArgs.get(conf).get(0); + Assertions.assertEquals("bar", createdConf.get("foo")); + Assertions.assertEquals("5", createdConf.get("spark.executor.instances")); + + // namespace from CR takes highest precedence + Assertions.assertEquals("ns1", createdConf.get("spark.kubernetes.namespace")); + + // validate main resources + Assertions.assertTrue(constructorArgs.get(conf).get(2) instanceof JavaMainAppResource); + JavaMainAppResource mainResource = + (JavaMainAppResource) constructorArgs.get(conf).get(2); + Assertions.assertTrue(mainResource.primaryResource().isEmpty()); + + Assertions.assertEquals("foo-class", constructorArgs.get(conf).get(3)); + + Assertions.assertTrue(constructorArgs.get(conf).get(4) instanceof String[]); + String[] capturedArgs = (String[]) constructorArgs.get(conf).get(4); + Assertions.assertEquals(2, capturedArgs.length); + Assertions.assertEquals("a", capturedArgs[0]); + Assertions.assertEquals("b", capturedArgs[1]); + } + } + + @Test + void buildDriverConfForPythonApp() { + Map> constructorArgs = new HashMap<>(); + try (MockedConstruction mocked = mockConstruction( + ApplicationDriverConf.class, + (mock, context) -> constructorArgs.put(mock, + new ArrayList<>(context.arguments())))) { + SparkApplication mockApp = mock(SparkApplication.class); + ApplicationSpec mockSpec = mock(ApplicationSpec.class); + ObjectMeta appMeta = new ObjectMetaBuilder() + .withName("app1") + .withNamespace("ns1") + .build(); + when(mockApp.getSpec()).thenReturn(mockSpec); + when(mockApp.getMetadata()).thenReturn(appMeta); + when(mockSpec.getPyFiles()).thenReturn("foo"); + + ApplicationDriverConf conf = + ApplicationClientWorker.buildDriverConf(mockApp, Collections.emptyMap()); + Assertions.assertEquals(6, constructorArgs.get(conf).size()); + + // validate main resources + Assertions.assertTrue( + constructorArgs.get(conf).get(2) instanceof PythonMainAppResource); + PythonMainAppResource mainResource = + (PythonMainAppResource) constructorArgs.get(conf).get(2); + Assertions.assertEquals("foo", mainResource.primaryResource()); + } + } + + @Test + void buildDriverConfForRApp() { + Map> constructorArgs = new HashMap<>(); + try (MockedConstruction mocked = mockConstruction( + ApplicationDriverConf.class, + (mock, context) -> constructorArgs.put(mock, + new ArrayList<>(context.arguments())))) { + SparkApplication mockApp = mock(SparkApplication.class); + ApplicationSpec mockSpec = mock(ApplicationSpec.class); + ObjectMeta appMeta = new ObjectMetaBuilder() + .withName("app1") + .withNamespace("ns1") + .build(); + when(mockApp.getSpec()).thenReturn(mockSpec); + when(mockApp.getMetadata()).thenReturn(appMeta); + when(mockSpec.getSparkRFiles()).thenReturn("foo"); + + ApplicationDriverConf conf = + ApplicationClientWorker.buildDriverConf(mockApp, Collections.emptyMap()); + Assertions.assertEquals(6, constructorArgs.get(conf).size()); + + // validate main resources + Assertions.assertTrue(constructorArgs.get(conf).get(2) instanceof RMainAppResource); + RMainAppResource mainResource = (RMainAppResource) constructorArgs.get(conf).get(2); + Assertions.assertEquals("foo", mainResource.primaryResource()); + } + } + + @Test + void sparkAppIdShouldBeDeterministicPerAppPerAttempt() { + SparkApplication mockApp1 = mock(SparkApplication.class); + SparkApplication mockApp2 = mock(SparkApplication.class); + ApplicationStatus mockStatus1 = mock(ApplicationStatus.class); + ApplicationStatus mockStatus2 = mock(ApplicationStatus.class); + String appName1 = "app1"; + String appName2 = "app2"; + ObjectMeta appMeta1 = new ObjectMetaBuilder() + .withName(appName1) + .withNamespace("ns") + .build(); + ObjectMeta appMeta2 = new ObjectMetaBuilder() + .withName(appName2) + .withNamespace("ns") + .build(); + when(mockApp1.getMetadata()).thenReturn(appMeta1); + when(mockApp2.getMetadata()).thenReturn(appMeta2); + when(mockApp1.getStatus()).thenReturn(mockStatus1); + when(mockApp2.getStatus()).thenReturn(mockStatus2); + + String appId1 = ApplicationClientWorker.createSparkAppId(mockApp1); + String appId2 = ApplicationClientWorker.createSparkAppId(mockApp2); + + Assertions.assertNotEquals(appId1, appId2); + Assertions.assertTrue(appId1.contains(appName1)); + // multiple invoke shall give same result + Assertions.assertEquals(appId1, ApplicationClientWorker.createSparkAppId(mockApp1)); + + ApplicationAttemptSummary mockAttempt = mock(ApplicationAttemptSummary.class); + AttemptInfo mockAttemptInfo = mock(AttemptInfo.class); + when(mockAttempt.getAttemptInfo()).thenReturn(mockAttemptInfo); + when(mockAttemptInfo.getId()).thenReturn(2L); + when(mockStatus1.getCurrentAttemptSummary()).thenReturn(mockAttempt); + + String appId1Attempt2 = ApplicationClientWorker.createSparkAppId(mockApp1); + Assertions.assertTrue(appId1Attempt2.contains(appName1)); + Assertions.assertNotEquals(appId1, appId1Attempt2); + + Assertions.assertEquals(appId1Attempt2, ApplicationClientWorker.createSparkAppId(mockApp1)); + } +} diff --git a/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java new file mode 100644 index 00000000..9f744d93 --- /dev/null +++ b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.spark.kubernetes.operator; + +import io.fabric8.kubernetes.api.model.ConfigMap; +import io.fabric8.kubernetes.api.model.Container; +import io.fabric8.kubernetes.api.model.ContainerBuilder; +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.api.model.PodBuilder; +import io.fabric8.kubernetes.api.model.Volume; +import io.fabric8.kubernetes.api.model.VolumeMount; +import org.apache.spark.SparkConf; +import org.apache.spark.deploy.k8s.KubernetesDriverSpec; +import org.apache.spark.deploy.k8s.SparkPod; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import scala.collection.immutable.HashMap; + +import java.util.Collections; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class ApplicationResourceSpecTest { + + @Test + void testDriverResourceIncludesConfigMap() { + ApplicationDriverConf mockConf = mock(ApplicationDriverConf.class); + when(mockConf.configMapNameDriver()).thenReturn("foo-configmap"); + when(mockConf.sparkConf()).thenReturn( + new SparkConf().set("spark.kubernetes.namespace", "foo-namespace")); + + KubernetesDriverSpec mockSpec = mock(KubernetesDriverSpec.class); + Container container = new ContainerBuilder() + .withName("foo-container") + .addNewVolumeMount() + .withName("placeholder") + .endVolumeMount() + .build(); + Pod pod = new PodBuilder() + .withNewMetadata() + .endMetadata() + .withNewSpec() + .addNewContainer() + .withName("placeholder") + .endContainer() + .addNewVolume() + .withName("placeholder") + .endVolume() + .endSpec() + .build(); + SparkPod sparkPod = new SparkPod(pod, container); + when(mockSpec.driverKubernetesResources()).thenReturn( + scala.collection.JavaConverters.asScalaBuffer(Collections.emptyList())); + when(mockSpec.driverPreKubernetesResources()).thenReturn( + scala.collection.JavaConverters.asScalaBuffer(Collections.emptyList())); + when(mockSpec.pod()).thenReturn(sparkPod); + when(mockSpec.systemProperties()).thenReturn(new HashMap<>()); + + ApplicationResourceSpec applicationResourceSpec = + new ApplicationResourceSpec(mockConf, mockSpec); + + Assertions.assertEquals(1, applicationResourceSpec.getDriverResources().size()); + Assertions.assertEquals(ConfigMap.class, + applicationResourceSpec.getDriverResources().get(0).getClass()); + + ConfigMap proposedConfigMap = + (ConfigMap) applicationResourceSpec.getDriverResources().get(0); + Assertions.assertEquals("foo-configmap", proposedConfigMap.getMetadata().getName()); + Assertions.assertEquals("foo-namespace", + proposedConfigMap.getData().get("spark.kubernetes.namespace")); + Assertions.assertEquals("foo-namespace", proposedConfigMap.getMetadata().getNamespace()); + + Assertions.assertEquals(2, + applicationResourceSpec.getConfiguredPod().getSpec().getVolumes().size()); + Volume proposedConfigVolume = + applicationResourceSpec.getConfiguredPod().getSpec().getVolumes().get(1); + Assertions.assertEquals("foo-configmap", proposedConfigVolume.getConfigMap().getName()); + + Assertions.assertEquals(2, + applicationResourceSpec.getConfiguredPod().getSpec().getContainers().size()); + Assertions.assertEquals(2, + applicationResourceSpec.getConfiguredPod().getSpec().getContainers().get(1) + .getVolumeMounts().size()); + VolumeMount proposedConfigVolumeMount = + applicationResourceSpec.getConfiguredPod().getSpec().getContainers().get(1) + .getVolumeMounts().get(1); + Assertions.assertEquals(proposedConfigVolume.getName(), + proposedConfigVolumeMount.getName()); + } + +} From 89d52c76696a0563555658284af60494def12108 Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Thu, 4 Apr 2024 16:21:18 -0700 Subject: [PATCH 02/14] Update supported Spark and JDK version, with multiple style fix and document updates. * Update supported Spark version to remove EOL versions (except 3.4.2 which does not have official image, added 3.4.2 as interim example) * Update restartPolicy key to align with k8s naming pattern * Rename /startup port to /readyz * Update deploymentMode keys * Remove unused exclude filter for spotbug * Remove unused checkstyle supress config * Support override dynamic config name from chart deployment * Update operations doc for default chart values * Fixed typos in docs * Fixed unexpected removed untry in gitignore --- .gitattributes | 3 - .gitignore | 13 ++- .../sparkapplications.org.apache.spark-v1.yml | 19 ++-- .../templates/_helpers.tpl | 40 +++++---- .../templates/spark-operator.yaml | 4 +- .../spark-kubernetes-operator/values.yaml | 8 +- build.gradle | 4 - config/checkstyle/checkstyle-suppressions.xml | 63 ------------- config/spotbugs/exclude.xml | 25 ------ .../operator/spec/ApplicationSpec.java | 2 +- .../operator/spec/ApplicationTolerations.java | 2 +- .../operator/spec/DeploymentMode.java | 4 +- .../kubernetes/operator/spec/JDKVersion.java | 3 +- .../operator/spec/RestartConfig.java | 2 +- .../operator/spec/RestartPolicy.java | 18 ++-- .../operator/spec/SparkVersion.java | 6 -- spark-operator-docs/architecture.md | 2 +- spark-operator-docs/configuration.md | 2 +- spark-operator-docs/getting_started.md | 4 +- spark-operator-docs/metrics_logging.md | 7 +- spark-operator-docs/operations.md | 88 ++++++++++--------- spark-operator-docs/operator_probes.md | 2 +- spark-operator-docs/spark_application.md | 10 +-- .../operator/config/SparkOperatorConf.java | 2 +- .../operator/probe/ProbeService.java | 4 +- .../reconcilesteps/AppCleanUpStep.java | 2 +- .../reconcilesteps/AppValidateStep.java | 2 +- .../src/main/resources/streaming.yaml | 2 +- .../operator/health/SentinelManagerTest.java | 8 +- .../operator/probe/ProbeServiceTest.java | 4 +- .../kubernetes/operator/utils/TestUtils.java | 5 +- 31 files changed, 134 insertions(+), 226 deletions(-) delete mode 100644 config/checkstyle/checkstyle-suppressions.xml delete mode 100644 config/spotbugs/exclude.xml diff --git a/.gitattributes b/.gitattributes index afd59d8f..617550d6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3,6 +3,3 @@ # # Linux start script should use lf /gradlew text eol=lf - -# These are Windows script files and should use crlf -*.bat text eol=crlf diff --git a/.gitignore b/.gitignore index 6334619e..96bb0346 100644 --- a/.gitignore +++ b/.gitignore @@ -38,24 +38,29 @@ Thumbs.db .gradle .m2 -# IntelliJ specific files/directories +# IntelliJ specific files/directories # +####################################### out .idea *.ipr *.iws *.iml -# Eclipse specific files/directories +# Eclipse specific files/directories # +###################################### .classpath .project .settings .metadata **/.cache-main -# build -**/generated +# Other build and generated files # +################################### .out +.java-version +.vscode build/ **/build/ +**/generated lib target diff --git a/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml b/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml index ec02b5c3..4bf760cc 100644 --- a/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml +++ b/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml @@ -29,12 +29,6 @@ spec: - v3_5_0 - v3_4_2 - v3_4_1 - - v3_4_0 - - v3_3_3 - - v3_3_2 - - v3_3_1 - - v3_3_0 - - v3_2_0 type: string scalaVersion: enum: @@ -45,6 +39,7 @@ spec: enum: - Java11 - Java17 + - Java23 type: string required: - sparkVersion @@ -59,8 +54,8 @@ spec: type: string deploymentMode: enum: - - CLUSTER_MODE - - CLIENT_MODE + - ClusterMode + - ClientMode type: string proxyUser: type: string @@ -74,10 +69,10 @@ spec: properties: restartPolicy: enum: - - ALWAYS - - NEVER - - ON_FAILURE - - ON_INFRASTRUCTURE_FAILURE + - Always + - Never + - OnFailure + - OnInfrastructureFailure type: string maxRestartAttempts: type: integer diff --git a/build-tools/helm/spark-kubernetes-operator/templates/_helpers.tpl b/build-tools/helm/spark-kubernetes-operator/templates/_helpers.tpl index 34ad34d8..cb442b18 100644 --- a/build-tools/helm/spark-kubernetes-operator/templates/_helpers.tpl +++ b/build-tools/helm/spark-kubernetes-operator/templates/_helpers.tpl @@ -1,19 +1,21 @@ -{{/*Licensed to the Apache Software Foundation (ASF) under one*/}} -{{/*or more contributor license agreements. See the NOTICE file*/}} -{{/*distributed with this work for additional information*/}} -{{/*regarding copyright ownership. The ASF licenses this file*/}} -{{/*to you under the Apache License, Version 2.0 (the*/}} -{{/*"License"); you may not use this file except in compliance*/}} -{{/*with the License. You may obtain a copy of the License at*/}} - -{{/* http://www.apache.org/licenses/LICENSE-2.0*/}} - -{{/*Unless required by applicable law or agreed to in writing,*/}} -{{/*software distributed under the License is distributed on an*/}} -{{/*"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY*/}} -{{/*KIND, either express or implied. See the License for the*/}} -{{/*specific language governing permissions and limitations*/}} -{{/*under the License.*/}} +{{/* +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. +*/}} {{/* Expand the name of the chart. @@ -106,6 +108,12 @@ Create the name of the operator service account to use {{- end }} {{- end }} +{{/* +Create the name of the operator dynamic configmap to use +*/}} +{{- define "spark-operator.dynamicConfigMapName" -}} +{{- default (include "spark-operator.fullname" .) .Values.operatorConfiguration.dynamicConfig.name }} +{{- end }} {{/* Default property overrides diff --git a/build-tools/helm/spark-kubernetes-operator/templates/spark-operator.yaml b/build-tools/helm/spark-kubernetes-operator/templates/spark-operator.yaml index b8de2100..91f64da0 100644 --- a/build-tools/helm/spark-kubernetes-operator/templates/spark-operator.yaml +++ b/build-tools/helm/spark-kubernetes-operator/templates/spark-operator.yaml @@ -122,7 +122,7 @@ spec: readinessProbe: httpGet: port: probe-port - path: /startup + path: /readyz failureThreshold: {{ include "spark-operator.readinessProbe.failureThreshold" . }} periodSeconds: {{ include "spark-operator.readinessProbe.periodSeconds" . }} livenessProbe: @@ -198,7 +198,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: spark-kubernetes-operator-dynamic-configuration + name: {{ include "spark-operator.dynamicConfigMapName" . }} namespace: {{ .Release.Namespace }} labels: {{- include "spark-operator.dynamicConfigLabels" . | nindent 4 }} diff --git a/build-tools/helm/spark-kubernetes-operator/values.yaml b/build-tools/helm/spark-kubernetes-operator/values.yaml index 8a4af554..567d39bf 100644 --- a/build-tools/helm/spark-kubernetes-operator/values.yaml +++ b/build-tools/helm/spark-kubernetes-operator/values.yaml @@ -52,11 +52,11 @@ operatorDeployment: limits: cpu: "1" ephemeral-storage: 2Gi - memory: 2Gi + memory: 4Gi requests: cpu: "1" ephemeral-storage: 2Gi - memory: 2Gi + memory: 4Gi probes: port: 18080 livenessProbe: @@ -115,6 +115,7 @@ appResources: watchGivenNamespacesOnly: false data: # - "spark-demo" + # - "spark-demo" serviceAccounts: create: true name: "spark" @@ -137,7 +138,7 @@ appResources: data: # When enabled, sentinel resources will be deployed to namespace(s) provided in data field. # Note that sentinelNamespaces list shall be a subset of appResources.namespaces.data. -# - "spark-demo" + # - "spark-demo" # App resources are by default annotated to avoid app abort due to operator upgrade annotations: # "helm.sh/resource-policy": keep @@ -170,6 +171,7 @@ operatorConfiguration: # If set to true, a config map would be created & watched by operator as source of truth # for hot properties loading. create: false + name: spark-kubernetes-operator-dynamic-configuration annotations: # "helm.sh/resource-policy": keep data: diff --git a/build.gradle b/build.gradle index 2a576b80..2e3655bb 100644 --- a/build.gradle +++ b/build.gradle @@ -51,9 +51,6 @@ subprojects { checkstyle { toolVersion = checkstyleVersion configFile = file("$rootDir/config/checkstyle/checkstyle.xml") - configFile = file("$rootDir/config/checkstyle/checkstyle.xml") - configProperties = [ - "org.checkstyle.google.suppressionfilter.config": rootProject.file("config/checkstyle/checkstyle-suppressions.xml")] ignoreFailures = false showViolations = true } @@ -73,7 +70,6 @@ subprojects { afterEvaluate { reportsDir = file("${project.reporting.baseDir}/findbugs") } - excludeFilter = file("$rootDir/config/spotbugs/exclude.xml") ignoreFailures = false } diff --git a/config/checkstyle/checkstyle-suppressions.xml b/config/checkstyle/checkstyle-suppressions.xml deleted file mode 100644 index 86098403..00000000 --- a/config/checkstyle/checkstyle-suppressions.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/config/spotbugs/exclude.xml b/config/spotbugs/exclude.xml deleted file mode 100644 index c7d2498e..00000000 --- a/config/spotbugs/exclude.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java index 9d07d4b7..1dab3642 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java @@ -46,7 +46,7 @@ public class ApplicationSpec extends BaseSpec { protected String sparkRFiles; protected String files; @Builder.Default - protected DeploymentMode deploymentMode = DeploymentMode.CLUSTER_MODE; + protected DeploymentMode deploymentMode = DeploymentMode.ClusterMode; protected String proxyUser; @Builder.Default protected List driverArgs = new ArrayList<>(); diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java index b9254c1b..a8f36724 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java @@ -45,7 +45,7 @@ public class ApplicationTolerations { * If disabled, operator would not attempt to delete resources after app terminates. * While this can be helpful in dev phase, it shall not be enabled for prod use cases. * Caution: in order to avoid resource conflicts among multiple attempts, this can be disabled - * iff restart policy is set to NEVER. + * iff restart policy is set to Never. */ @Builder.Default protected Boolean deleteOnTermination = true; diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java index d13cddfe..cd21b5ff 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java @@ -19,6 +19,6 @@ package org.apache.spark.kubernetes.operator.spec; public enum DeploymentMode { - CLUSTER_MODE, - CLIENT_MODE + ClusterMode, + ClientMode } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java index 26394ca5..3bdac31b 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java @@ -20,5 +20,6 @@ public enum JDKVersion { Java11, - Java17 + Java17, + Java23 } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartConfig.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartConfig.java index c25b1b1e..fefc033a 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartConfig.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartConfig.java @@ -33,7 +33,7 @@ @JsonIgnoreProperties(ignoreUnknown = true) public class RestartConfig { @Builder.Default - protected RestartPolicy restartPolicy = RestartPolicy.NEVER; + protected RestartPolicy restartPolicy = RestartPolicy.Never; @Builder.Default protected Long maxRestartAttempts = 3L; @Builder.Default diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java index 856c8fe5..d87e87cb 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java @@ -21,21 +21,21 @@ import org.apache.spark.kubernetes.operator.status.BaseStateSummary; public enum RestartPolicy { - ALWAYS, - NEVER, - ON_FAILURE, - ON_INFRASTRUCTURE_FAILURE; + Always, + Never, + OnFailure, + OnInfrastructureFailure; public static boolean attemptRestartOnState(final RestartPolicy policy, final BaseStateSummary stateSummary) { switch (policy) { - case NEVER: - return false; - case ALWAYS: + case Always: return true; - case ON_FAILURE: + case Never: + return false; + case OnFailure: return stateSummary.isFailure(); - case ON_INFRASTRUCTURE_FAILURE: + case OnInfrastructureFailure: return stateSummary.isInfrastructureFailure(); } return false; diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java index df21d398..991bc83b 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java @@ -27,10 +27,4 @@ public enum SparkVersion { v3_5_0, v3_4_2, v3_4_1, - v3_4_0, - v3_3_3, - v3_3_2, - v3_3_1, - v3_3_0, - v3_2_0 } diff --git a/spark-operator-docs/architecture.md b/spark-operator-docs/architecture.md index a37000ad..69347448 100644 --- a/spark-operator-docs/architecture.md +++ b/spark-operator-docs/architecture.md @@ -58,7 +58,7 @@ Kubernetes API Server. * Spark application are expected to run from submitted to succeeded before releasing resources * User may configure the app CR to time-out after given threshold of time -* In User may configure the app CR to skip releasing resources after terminated. This is +* In addition, user may configure the app CR to skip releasing resources after terminated. This is typically used at dev phase: pods / configmaps. etc would be kept for debugging. They have ownerreference to the Application CR and therefore can still be cleaned up when the owner SparkApplication CR is deleted. diff --git a/spark-operator-docs/configuration.md b/spark-operator-docs/configuration.md index cb6428d6..db256fac 100644 --- a/spark-operator-docs/configuration.md +++ b/spark-operator-docs/configuration.md @@ -72,7 +72,7 @@ operatorConfiguration: | spark.operator.retry.max.interval.seconds | integer | -1 | false | Max interval(in seconds) of retries on unhandled controller errors. Set to -1 for unlimited. | | spark.operator.retry.max.attempts | integer | 15 | false | Max attempts of retries on unhandled controller errors. | | spark.operator.driver.create.max.attempts | integer | 3 | true | Maximal number of retry attempts of requesting driver for Spark application. | -| spark.operator.max.retry.attempts.on.kube.failure | long | 3 | true | Maximal number of retry attempts of requests to k8s server upon response 429 and 5xx. | +| spark.operator.max.retry.attempts.on.k8s.failure | long | 3 | true | Maximal number of retry attempts of requests to k8s server upon response 429 and 5xx. | | spark.operator.retry.attempt.after.seconds | long | 1 | true | Default time (in seconds) to wait till next request. This would be used if server does not set Retry-After in response. | | spark.operator.max.retry.attempt.after.seconds | long | 15 | true | Maximal time (in seconds) to wait till next request. | | spark.operator.status.patch.max.retry | long | 3 | true | Maximal number of retry attempts of requests to k8s server for resource status update. | diff --git a/spark-operator-docs/getting_started.md b/spark-operator-docs/getting_started.md index 60696155..28cdce7f 100644 --- a/spark-operator-docs/getting_started.md +++ b/spark-operator-docs/getting_started.md @@ -33,9 +33,9 @@ cluster. ### Compatibility -- JDK11 or later +- JDK11, 17, or 23 - Operator used fabric8 which assumes to be compatible with available k8s versions. -- Spark versions 3.2 and above +- Spark versions 3.4 and above ### Start minikube diff --git a/spark-operator-docs/metrics_logging.md b/spark-operator-docs/metrics_logging.md index 5faff025..d943dbec 100644 --- a/spark-operator-docs/metrics_logging.md +++ b/spark-operator-docs/metrics_logging.md @@ -22,7 +22,7 @@ under the License. Spark operator, following [Apache Spark](https://spark.apache.org/docs/latest/monitoring.html#metrics), has a configurable metrics system based on -the [Dropwizard Metrics Library](https://metrics.dropwizard.io/4.2.0/). Note that Spark Operator +the [Dropwizard Metrics Library](https://metrics.dropwizard.io/4.2.25/). Note that Spark Operator does not have Spark UI, MetricsServlet and PrometheusServlet from org.apache.spark.metrics.sink package are not supported. If you are interested in Prometheus metrics exporting, please take a look at below section `Forward Metrics to Prometheus` @@ -45,8 +45,6 @@ via [Codahale JVM Metrics](https://javadoc.io/doc/com.codahale.metrics/metrics-j | kubernetes.client.http.request | Meter | Tracking the rates of HTTP request sent to the Kubernetes API Server | | kubernetes.client.http.response | Meter | Tracking the rates of HTTP response from the Kubernetes API Server | | kubernetes.client.http.response.failed | Meter | Tracking the rates of HTTP requests which have no response from the Kubernetes API Server | -| kubernetes.client.http.response.failed | Meter | Tracking the rates of HTTP requests which have no response from the Kubernetes API Server | -| kubernetes.client.http.response.failed | Meter | Tracking the rates of HTTP requests which have no response from the Kubernetes API Server | | kubernetes.client.http.response.latency.nanos | Histograms | Measures the statistical distribution of HTTP response latency from the Kubernetes API Server | | kubernetes.client.http.response. | Meter | Tracking the rates of HTTP response based on response code from the Kubernetes API Server | | kubernetes.client.http.request. | Meter | Tracking the rates of HTTP request based type of method to the Kubernetes API Server | @@ -68,7 +66,8 @@ to [Prometheus](https://prometheus.io). ```properties metrics.properties:|+ - spark.metrics.conf.operator.sink.mosaic.class=org.apache.spark.kubernetes.operator.metrics.sink.PrometheusPullModelSink + spark.metrics.conf.operator.sink.prometheus.class=org.apache.spark.kubernetes.operator.metrics. +sink.PrometheusPullModelSink ``` * Install the Spark Operator diff --git a/spark-operator-docs/operations.md b/spark-operator-docs/operations.md index c205fb76..2002de23 100644 --- a/spark-operator-docs/operations.md +++ b/spark-operator-docs/operations.md @@ -70,49 +70,51 @@ helm install spark-kubernetes-operator \ The configurable parameters of the Helm chart and which default values as detailed in the following table: -| Parameters | Description | -|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| image.repository | The image repository of spark-kubernetes-operator. | -| image.pullPolicy | The image pull policy of spark-kubernetes-operator. | -| image.tag | The image tag of spark-kubernetes-operator. | -| image.digest | The image tag of spark-kubernetes-operator. If set then it takes precedence and the image tag will be ignored. | -| imagePullSecrets | The image pull secrets of spark-kubernetes-operator. | -| operatorDeployment.replica | Operator replica count. Must be 1 unless leader election is configured. | -| operatorDeployment.strategy.type | Operator pod upgrade strategy. Must be Recreate unless leader election is configured. | -| operatorDeployment.operatorPod.annotations | Custom annotations to be added to the operator pod | -| operatorDeployment.operatorPod.labels | Custom labels to be added to the operator pod | -| operatorDeployment.operatorPod.nodeSelector | Custom nodeSelector to be added to the operator pod. | -| operatorDeployment.operatorPod.topologySpreadConstraints | Custom topologySpreadConstraints to be added to the operator pod. | -| operatorDeployment.operatorPod.dnsConfig | DNS configuration to be used by the operator pod. | -| operatorDeployment.operatorPod.volumes | Additional volumes to be added to the operator pod. | -| operatorDeployment.operatorPod.priorityClassName | Priority class name to be used for the operator pod | -| operatorDeployment.operatorPod.securityContext | Security context overrides for the operator pod | -| operatorDeployment.operatorContainer.jvmArgs | JVM arg override for the operator container. | -| operatorDeployment.operatorContainer.env | Custom env to be added to the operator container. | -| operatorDeployment.operatorContainer.envFrom | Custom envFrom to be added to the operator container, e.g. for downward API. | -| operatorDeployment.operatorContainer.probes | Probe config for the operator container. | -| operatorDeployment.operatorContainer.securityContext | Security context overrides for the operator container. | -| operatorDeployment.operatorContainer.resources | Resources for the operator container. | -| operatorDeployment.additionalContainers | Additional containers to be added to the operator pod, e.g. sidecar. | -| operatorRbac.serviceAccount.create | Whether to crete service account for operator to use. | -| operatorRbac.clusterRole.create | Whether to crete ClusterRole for operator to use. If disabled, a role would be created in operator & app namespaces | -| operatorRbac.clusterRoleBinding.create | Whether to crete ClusterRoleBinding for operator to use. If disabled, a rolebinding would be created in operator & app namespaces | -| operatorRbac.clusterRole.configManagement.roleName | Role name for operator configuration management (hot property loading and leader election) | -| appResources.namespaces.create | Whether to create dedicated namespaces for Spark apps. | -| appResources.namespaces.watchGivenNamespacesOnly | When enabled, operator would by default only watch namespace(s) provided in data field | -| appResources.namespaces.data | list of namespaces to create for apps | -| appResources.clusterRole.create | if enabled, a clusterrole would be created for Spark app service accounts to use | -| appResources.role.create | if enabled, a role would be created in each app namespace for Spark apps | -| appResources.serviceAccounts.data | list of namespaces to create for apps | -| appResources.labels | Labels to be applied for all app resources | -| appResources.annotations | Annotaions to be applied for all app resources | -| appResources.sparkApplicationSentinel.create | If enabled, sentinel resources will be created for operator to watch and reconcile for the health probe purpose. | -| appResources.sparkApplicationSentinel.sentinelNamespaces | A list of namespaces where sentinel resources will be created in. Note that these namespaces have to be a subset of appResources.namespaces.watchGivenNamespacesOnly | -| operatorConfiguration.append | If set to true, below conf file & properties would be appended to default conf. Otherwise, they would override default properties | -| operatorConfiguration.log4j2.properties | The default log4j2 configuration | -| operatorConfiguration.spark-operator.properties | The default operator configuration | -| operatorConfiguration.metrics.properties | The default operator metrics (sink) configuration | -| operatorConfiguration.dynamicConfig.create | If set to true, a config map would be created & watched by operator as source of truth for hot properties loading. | | +| Parameters | Description | Default value | +|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| image.repository | The image repository of spark-kubernetes-operator. | spark-kubernetes-operator | +| image.pullPolicy | The image pull policy of spark-kubernetes-operator. | IfNotPresent | +| image.tag | The image tag of spark-kubernetes-operator. | | +| image.digest | The image digest of spark-kubernetes-operator. If set then it takes precedence and the image tag will be ignored. | | +| imagePullSecrets | The image pull secrets of spark-kubernetes-operator. | | +| operatorDeployment.replica | Operator replica count. Must be 1 unless leader election is configured. | 1 | +| operatorDeployment.strategy.type | Operator pod upgrade strategy. Must be Recreate unless leader election is configured. | Recreate | +| operatorDeployment.operatorPod.annotations | Custom annotations to be added to the operator pod | | +| operatorDeployment.operatorPod.labels | Custom labels to be added to the operator pod | | +| operatorDeployment.operatorPod.nodeSelector | Custom nodeSelector to be added to the operator pod. | | +| operatorDeployment.operatorPod.topologySpreadConstraints | Custom topologySpreadConstraints to be added to the operator pod. | | +| operatorDeployment.operatorPod.dnsConfig | DNS configuration to be used by the operator pod. | | +| operatorDeployment.operatorPod.volumes | Additional volumes to be added to the operator pod. | | +| operatorDeployment.operatorPod.priorityClassName | Priority class name to be used for the operator pod | | +| operatorDeployment.operatorPod.securityContext | Security context overrides for the operator pod | | +| operatorDeployment.operatorContainer.jvmArgs | JVM arg override for the operator container. | `-XX:+UseG1GC -Xms3G -Xmx3G -Dfile.encoding=UTF8` | +| operatorDeployment.operatorContainer.env | Custom env to be added to the operator container. | | +| operatorDeployment.operatorContainer.envFrom | Custom envFrom to be added to the operator container, e.g. for downward API. | | +| operatorDeployment.operatorContainer.probes | Probe config for the operator container. | | +| operatorDeployment.operatorContainer.securityContext | Security context overrides for the operator container. | run as non root for baseline secuirty standard compliance | +| operatorDeployment.operatorContainer.resources | Resources for the operator container. | memory 4Gi, ephemeral storage 2Gi and 1 cpu | +| operatorDeployment.additionalContainers | Additional containers to be added to the operator pod, e.g. sidecar. | | +| operatorRbac.serviceAccount.create | Whether to create service account for operator to use. | | +| operatorRbac.clusterRole.create | Whether to create ClusterRole for operator to use. If disabled, a role would be created in operator & app namespaces | true | +| operatorRbac.clusterRoleBinding.create | Whether to create ClusterRoleBinding for operator to use. If disabled, a rolebinding would be created in operator & app namespaces | true | +| operatorRbac.clusterRole.configManagement.roleName | Role name for operator configuration management (hot property loading and leader election) | `spark-operator-config-role` | +| appResources.namespaces.create | Whether to create dedicated namespaces for Spark apps. | `spark-operator-config-role-binding` | +| appResources.namespaces.watchGivenNamespacesOnly | When enabled, operator would by default only watch namespace(s) provided in data field. | false | +| appResources.namespaces.data | list of namespaces to create for apps | | +| appResources.clusterRole.create | Enable a ClusterRole to be created for apps. If neither role nor clusterrole is enabled: Spark app would use the same access as operator. | false | +| appResources.role.create | Enable a Role to be created in each app namespace for apps. If neither role nor clusterrole is enabled: Spark app would use the same access as operator. | false | +| appResources.serviceAccounts.create | Whether to create a service account for apps | true | +| appResources.serviceAccounts.name | The name of Spark app service account | `spark` | +| appResources.labels | Labels to be applied for all app resources | `"app.kubernetes.io/component": "spark-apps"` | +| appResources.annotations | Annotations to be applied for all app resources | | +| appResources.sparkApplicationSentinel.create | If enabled, sentinel resources will be created for operator to watch and reconcile for the health probe purpose. | false | +| appResources.sparkApplicationSentinel.sentinelNamespaces | A list of namespaces where sentinel resources will be created in. Note that these namespaces have to be a subset of appResources.namespaces.data | | +| operatorConfiguration.append | If set to true, below conf file & properties would be appended to default conf. Otherwise, they would override default properties | true | +| operatorConfiguration.log4j2.properties | The default log4j2 configuration | Refer default [log4j2.properties](../build-tools/helm/spark-kubernetes-operator/conf/log4j2.properties) | +| operatorConfiguration.spark-operator.properties | The default operator configuration | | +| operatorConfiguration.metrics.properties | The default operator metrics (sink) configuration | | +| operatorConfiguration.dynamicConfig.create | If set to true, a config map would be created & watched by operator as source of truth for hot properties loading. | false | +| operatorConfiguration.dynamicConfig.name | Name of the dynamic config map for hot property loading. | spark-kubernetes-operator-dynamic-configuration | For more information check the [Helm documentation](https://helm.sh/docs/helm/helm_install/). diff --git a/spark-operator-docs/operator_probes.md b/spark-operator-docs/operator_probes.md index 7aeecd19..f3dabf84 100644 --- a/spark-operator-docs/operator_probes.md +++ b/spark-operator-docs/operator_probes.md @@ -37,7 +37,7 @@ livenessProbe: readinessProbe: httpGet: port: probe-port - path: /startup + path: /readyz failureThreshold: 30 periodSeconds: 10 ``` diff --git a/spark-operator-docs/spark_application.md b/spark-operator-docs/spark_application.md index 689c5bd6..bcb50377 100644 --- a/spark-operator-docs/spark_application.md +++ b/spark-operator-docs/spark_application.md @@ -23,21 +23,21 @@ metadata: namespace: spark-test spec: mainClass: "org.apache.spark.examples.SparkPi" - jars: "local:///opt/spark/examples/jars/spark-examples.jar" + jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.1.jar" sparkConf: spark.executor.instances: "5" - spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" + spark.kubernetes.container.image: "spark:3.5.1-scala2.12-java17-python3-ubuntu" spark.kubernetes.namespace: "spark-test" spark.kubernetes.authenticate.driver.serviceAccountName: "spark" runtimeVersions: scalaVersion: v2_12 - sparkVersion: v3_4_1 + sparkVersion: v3_5_1 ``` -After submitted, Operator will add status information to your application based on the -observed state: +After application is submitted, Operator will add status information to your application based on +the observed state: ``` kubectl get sparkapp spark-pi -o yaml diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java index a2d3d49c..6175f360 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java @@ -169,7 +169,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption MaxRetryAttemptOnKubeServerFailure = ConfigOption.builder() - .key(PREFIX + "max.retry.attempts.on.kube.failure") + .key(PREFIX + "max.retry.attempts.on.k8s.failure") .description( "Maximal number of retry attempts of requests to k8s server upon " + "response 429 and 5xx.") diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java index 30cd980d..7e417728 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java @@ -31,7 +31,7 @@ @Slf4j public class ProbeService { public static final String HEALTHZ = "/healthz"; - public static final String STARTUP = "/startup"; + public static final String READYZ = "/readyz"; HttpServer server; public ProbeService(List operators, SentinelManager sentinelManager) { @@ -42,7 +42,7 @@ public ProbeService(List operators, SentinelManager sentinelManager) { } catch (IOException e) { throw new RuntimeException("Failed to create Probe Service Server", e); } - server.createContext(STARTUP, new ReadinessProbe(operators)); + server.createContext(READYZ, new ReadinessProbe(operators)); server.createContext(HEALTHZ, healthProbe); server.setExecutor(null); } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java index 153076ab..3790bdac 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java @@ -61,7 +61,7 @@ public ReconcileProgress reconcile(SparkApplicationContext context, String stateMessage = null; if (!tolerations.getDeleteOnTermination()) { if (tolerations.getRestartConfig() != null - && !RestartPolicy.NEVER.equals( + && !RestartPolicy.Never.equals( tolerations.getRestartConfig().getRestartPolicy())) { stateMessage = "Application is configured to restart, resources created in current " + diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java index fb9e1dca..1bbb3012 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java @@ -43,7 +43,7 @@ public ReconcileProgress reconcile(SparkApplicationContext context, log.warn("Spark application found with empty status. Resetting to initial state."); statusRecorder.persistStatus(context, new ApplicationStatus()); } - if (DeploymentMode.CLIENT_MODE.equals(context.getSparkApplication().getSpec())) { + if (DeploymentMode.ClientMode.equals(context.getSparkApplication().getSpec())) { ApplicationState failure = new ApplicationState(ApplicationStateSummary.FAILED, "Client mode is not supported yet."); statusRecorder.persistStatus(context, diff --git a/spark-operator/src/main/resources/streaming.yaml b/spark-operator/src/main/resources/streaming.yaml index 5903cce5..61b119fc 100644 --- a/spark-operator/src/main/resources/streaming.yaml +++ b/spark-operator/src/main/resources/streaming.yaml @@ -23,7 +23,7 @@ metadata: spec: applicationTolerations: restartConfig: - restartPolicy: ALWAYS + restartPolicy: Always maxRestartAttempts: 3 restartBackoffMillis: 300000 applicationTimeoutConfig: diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java index 0b270d7b..452d07d5 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java @@ -80,9 +80,9 @@ void testIsSentinelResource() { Set namespaces = new HashSet<>(); sparkApplication.getMetadata().setNamespace("spark-test"); namespaces.add("spark-test"); - try (MockedStatic mockUtilsKube = + try (MockedStatic mockUtils = mockStatic(SparkReconcilerUtils.class)) { - mockUtilsKube.when(SparkReconcilerUtils::getWatchedNamespaces).thenReturn(namespaces); + mockUtils.when(SparkReconcilerUtils::getWatchedNamespaces).thenReturn(namespaces); Assertions.assertTrue(SentinelManager.isSentinelResource(sparkApplication)); } } @@ -159,9 +159,9 @@ void sentinelManagerShouldReportHealthyWhenWatchedNamespaceIsReduced() namespaces.add(DEFAULT); namespaces.add(SPARK_DEMO); - try (MockedStatic mockUtilsKube = + try (MockedStatic mockUtils = mockStatic(SparkReconcilerUtils.class)) { - mockUtilsKube.when(SparkReconcilerUtils::getWatchedNamespaces).thenReturn(namespaces); + mockUtils.when(SparkReconcilerUtils::getWatchedNamespaces).thenReturn(namespaces); SentinelManager sentinelManager = new SentinelManager(); NonNamespaceOperation, Resource> cr1 = diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java index c1c3e30b..3ce0875c 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java @@ -31,7 +31,7 @@ import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorProbePort; import static org.apache.spark.kubernetes.operator.probe.ProbeService.HEALTHZ; -import static org.apache.spark.kubernetes.operator.probe.ProbeService.STARTUP; +import static org.apache.spark.kubernetes.operator.probe.ProbeService.READYZ; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -115,7 +115,7 @@ private void hitHealthyEndpoint() throws Exception { } private void hitStartedUpEndpoint() throws Exception { - URL u = new URL("http://localhost:" + OperatorProbePort.getValue() + STARTUP); + URL u = new URL("http://localhost:" + OperatorProbePort.getValue() + READYZ); HttpURLConnection connection = (HttpURLConnection) u.openConnection(); connection.setConnectTimeout(100000); connection.connect(); diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java index f0076708..401f350d 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java @@ -49,10 +49,7 @@ public static void cleanPropertiesFile(String filePath) { public static boolean notTimedOut(long startTime, long maxWaitTimeInMills) { long elapsedTimeInMills = calculateElapsedTimeInMills(startTime); - if (elapsedTimeInMills >= maxWaitTimeInMills) { - return false; - } - return true; + return elapsedTimeInMills < maxWaitTimeInMills; } public static long calculateElapsedTimeInMills(long startTime) { From 734a48b1239e3664d12cb724343fe5b8c7cda62c Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Thu, 4 Apr 2024 16:51:24 -0700 Subject: [PATCH 03/14] Style fix: indent and imports * Uses 2-space indent as Apache Spark * Fix import order to java - 3pc - spark --- .../kubernetes/operator/BaseResource.java | 5 +- .../spark/kubernetes/operator/Constants.java | 110 ++- .../kubernetes/operator/SparkApplication.java | 21 +- .../decorators/ResourceDecorator.java | 2 +- .../operator/spec/ApplicationSpec.java | 38 +- .../spec/ApplicationTimeoutConfig.java | 20 +- .../operator/spec/ApplicationTolerations.java | 34 +- .../spec/BaseApplicationTemplateSpec.java | 2 +- .../kubernetes/operator/spec/BaseSpec.java | 9 +- .../operator/spec/DeploymentMode.java | 4 +- .../operator/spec/InstanceConfig.java | 20 +- .../kubernetes/operator/spec/JDKVersion.java | 6 +- .../operator/spec/RestartPolicy.java | 32 +- .../operator/spec/RuntimeVersions.java | 8 +- .../operator/spec/ScalaVersion.java | 4 +- .../operator/spec/SparkVersion.java | 8 +- .../status/ApplicationAttemptSummary.java | 10 +- .../operator/status/ApplicationState.java | 24 +- .../status/ApplicationStateSummary.java | 246 +++--- .../operator/status/ApplicationStatus.java | 167 ++-- .../operator/status/AttemptInfo.java | 10 +- .../operator/status/BaseAttemptSummary.java | 2 +- .../kubernetes/operator/status/BaseState.java | 10 +- .../operator/status/BaseStateSummary.java | 4 +- .../operator/status/BaseStatus.java | 62 +- .../kubernetes/operator/utils/ModelUtils.java | 135 ++-- .../status/ApplicationStatusTest.java | 34 +- .../operator/AppSubmitToSucceedTest.java | 273 +++---- .../kubernetes/operator/SparkOperator.java | 305 ++++---- .../client/KubernetesClientFactory.java | 67 +- .../operator/client/RetryInterceptor.java | 121 +-- .../operator/config/ConfigOption.java | 110 +-- .../operator/config/SparkOperatorConf.java | 721 +++++++++--------- .../config/SparkOperatorConfManager.java | 136 ++-- .../SparkOperatorConfigMapReconciler.java | 57 +- .../controller/SparkApplicationContext.java | 83 +- .../operator/decorators/DriverDecorator.java | 29 +- .../decorators/DriverResourceDecorator.java | 40 +- .../operator/health/SentinelManager.java | 293 +++---- .../listeners/ApplicationStatusListener.java | 2 +- .../listeners/BaseStatusListener.java | 4 +- .../operator/metrics/JVMMetricSet.java | 64 +- .../operator/metrics/MetricsService.java | 58 +- .../operator/metrics/MetricsSystem.java | 179 ++--- .../metrics/MetricsSystemFactory.java | 121 +-- .../metrics/sink/PrometheusPullModelSink.java | 85 ++- .../operator/metrics/source/JVMSource.java | 21 +- .../source/KubernetesMetricsInterceptor.java | 269 +++---- .../metrics/source/OperatorJosdkMetrics.java | 405 +++++----- .../operator/probe/HealthProbe.java | 129 ++-- .../operator/probe/ProbeService.java | 59 +- .../operator/probe/ReadinessProbe.java | 42 +- .../reconciler/ReconcileProgress.java | 56 +- .../SparkApplicationReconcileUtils.java | 257 +++---- .../SparkApplicationReconciler.java | 290 +++---- .../reconciler/SparkReconcilerUtils.java | 393 +++++----- .../observers/AppDriverReadyObserver.java | 31 +- .../observers/AppDriverRunningObserver.java | 17 +- .../observers/AppDriverStartObserver.java | 31 +- .../observers/AppDriverTimeoutObserver.java | 111 +-- .../observers/BaseAppDriverObserver.java | 189 ++--- .../BaseSecondaryResourceObserver.java | 21 +- .../reconcilesteps/AppCleanUpStep.java | 193 ++--- .../reconcilesteps/AppInitStep.java | 173 ++--- .../reconcilesteps/AppReconcileStep.java | 65 +- .../AppResourceObserveStep.java | 17 +- .../reconcilesteps/AppRunningStep.java | 105 +-- .../reconcilesteps/AppTerminatedStep.java | 18 +- .../reconcilesteps/AppValidateStep.java | 31 +- .../reconcilesteps/UnknownStateStep.java | 27 +- .../utils/ApplicationStatusUtils.java | 68 +- .../operator/utils/LoggingUtils.java | 89 +-- .../kubernetes/operator/utils/PodPhase.java | 38 +- .../kubernetes/operator/utils/PodUtils.java | 87 +-- .../kubernetes/operator/utils/ProbeUtil.java | 50 +- .../operator/utils/SparkExceptionUtils.java | 20 +- .../operator/utils/StatusRecorder.java | 289 +++---- .../operator/config/ConfigOptionTest.java | 310 ++++---- .../config/SparkOperatorConfManagerTest.java | 80 +- .../operator/health/SentinelManagerTest.java | 311 ++++---- .../metrics/MetricsSystemFactoryTest.java | 44 +- .../operator/metrics/MetricsSystemTest.java | 87 +-- .../operator/metrics/sink/MockSink.java | 68 +- .../KubernetesMetricsInterceptorTest.java | 179 ++--- .../source/OperatorJosdkMetricsTest.java | 289 +++---- .../operator/probe/HealthProbeTest.java | 315 ++++---- .../operator/probe/ProbeServiceTest.java | 159 ++-- .../operator/probe/ReadinessProbeTest.java | 68 +- .../SparkApplicationReconcileUtilsTest.java | 106 +-- .../SparkApplicationReconcilerTest.java | 105 +-- .../kubernetes/operator/utils/TestUtils.java | 59 +- .../operator/ApplicationClientWorker.java | 127 +-- .../operator/ApplicationDriverConf.java | 67 +- .../operator/ApplicationResourceSpec.java | 129 ++-- .../operator/ApplicationClientWorkerTest.java | 323 ++++---- .../operator/ApplicationResourceSpecTest.java | 127 +-- 96 files changed, 5187 insertions(+), 5132 deletions(-) diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/BaseResource.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/BaseResource.java index 2df46de7..695ecfc0 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/BaseResource.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/BaseResource.java @@ -20,12 +20,13 @@ import io.fabric8.kubernetes.api.model.Namespaced; import io.fabric8.kubernetes.client.CustomResource; + import org.apache.spark.kubernetes.operator.spec.BaseSpec; import org.apache.spark.kubernetes.operator.status.BaseAttemptSummary; import org.apache.spark.kubernetes.operator.status.BaseState; import org.apache.spark.kubernetes.operator.status.BaseStatus; public class BaseResource, - SPEC extends BaseSpec, STATUS extends BaseStatus> - extends CustomResource implements Namespaced { + SPEC extends BaseSpec, STATUS extends BaseStatus> + extends CustomResource implements Namespaced { } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/Constants.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/Constants.java index a5f256ca..c7b88642 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/Constants.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/Constants.java @@ -19,63 +19,59 @@ package org.apache.spark.kubernetes.operator; public class Constants { - public static final String API_GROUP = "org.apache.spark"; - public static final String API_VERSION = "v1alpha1"; - public static final String LABEL_SPARK_APPLICATION_NAME = "spark.operator/spark-app-name"; - public static final String LABEL_SPARK_OPERATOR_NAME = "spark.operator/name"; - public static final String LABEL_RESOURCE_NAME = "app.kubernetes.io/name"; - public static final String LABEL_SPARK_ROLE_NAME = "spark-role"; - public static final String LABEL_SPARK_ROLE_DRIVER_VALUE = "driver"; - public static final String LABEL_SPARK_ROLE_EXECUTOR_VALUE = "executor"; - public static final String SPARK_CONF_SENTINEL_DUMMY_FIELD = "sentinel.dummy.number"; + public static final String API_GROUP = "org.apache.spark"; + public static final String API_VERSION = "v1alpha1"; + public static final String LABEL_SPARK_APPLICATION_NAME = "spark.operator/spark-app-name"; + public static final String LABEL_SPARK_OPERATOR_NAME = "spark.operator/name"; + public static final String LABEL_RESOURCE_NAME = "app.kubernetes.io/name"; + public static final String LABEL_SPARK_ROLE_NAME = "spark-role"; + public static final String LABEL_SPARK_ROLE_DRIVER_VALUE = "driver"; + public static final String LABEL_SPARK_ROLE_EXECUTOR_VALUE = "executor"; + public static final String SPARK_CONF_SENTINEL_DUMMY_FIELD = "sentinel.dummy.number"; - public static final String SENTINEL_LABEL = "spark.operator/sentinel"; + public static final String SENTINEL_LABEL = "spark.operator/sentinel"; - // Default state messages - public static final String DriverRequestedMessage = - "Requested driver from resource scheduler. "; - public static final String DriverCompletedMessage = - "Spark application completed successfully. "; - public static final String DriverTerminatedBeforeInitializationMessage = - "Driver container is terminated without SparkContext / SparkSession initialization. "; - public static final String DriverFailedInitContainersMessage = - "Driver has failed init container(s). Refer last observed status for details. "; - public static final String DriverFailedMessage = - "Driver has one or more failed critical container(s), refer last observed status for " + - "details. "; - public static final String DriverSucceededMessage = - "Driver has critical container(s) exited with 0. "; - public static final String DriverRestartedMessage = - "Driver has one or more critical container(s) restarted unexpectedly, refer last " + - "observed status for details. "; - public static final String AppStopRequestReceivedMessage = - "Received request to shutdown Spark application. "; - public static final String AppCancelledMessage = - "Spark application has been shutdown as requested. "; - public static final String DriverUnexpectedRemovedMessage = - "Driver removed. This could caused by 'exit' called in driver process with non-zero " + - "code, involuntary disruptions or unintentional destroy behavior, check " + - "Kubernetes events for more details. "; - public static final String DriverLaunchTimeoutMessage = - "The driver has not responded to the initial health check request within the " + - "allotted start-up time. This can be configured by setting " + - ".spec.applicationTolerations.applicationTimeoutConfig "; - public static final String DriverRunning = "Driver has started running. "; - public static final String DriverReady = "Driver has reached ready state. "; - public static final String SubmittedStateMessage = - "Spark application has been created on Kubernetes Cluster. "; - public static final String UnknownStateMessage = "Cannot process application status. "; - public static final String ExceedMaxRetryAttemptMessage = - "The maximum number of restart attempts (%d) has been exceeded. "; - public static final String ScheduleFailureMessage = - "Failed to request driver from scheduler backend. "; - public static final String RunningHealthyMessage = "Application is running healthy. "; - public static final String InitializedWithBelowThresholdExecutorsMessage = - "The application is running with less than minimal number of requested initial " + - "executors. "; - public static final String RunningWithBelowThresholdExecutorsMessage = - "The Spark application is running with less than minimal number of requested " + - "executors. "; - public static final String ExecutorLaunchTimeoutMessage = - "The Spark application failed to get enough executors in the given time threshold. "; + // Default state messages + public static final String DriverRequestedMessage = "Requested driver from resource scheduler. "; + public static final String DriverCompletedMessage = "Spark application completed successfully. "; + public static final String DriverTerminatedBeforeInitializationMessage = + "Driver container is terminated without SparkContext / SparkSession initialization. "; + public static final String DriverFailedInitContainersMessage = + "Driver has failed init container(s). Refer last observed status for details. "; + public static final String DriverFailedMessage = + "Driver has one or more failed critical container(s), refer last observed status for " + + "details. "; + public static final String DriverSucceededMessage = + "Driver has critical container(s) exited with 0. "; + public static final String DriverRestartedMessage = + "Driver has one or more critical container(s) restarted unexpectedly, refer last " + + "observed status for details. "; + public static final String AppCancelledMessage = + "Spark application has been shutdown as requested. "; + public static final String DriverUnexpectedRemovedMessage = + "Driver removed. This could caused by 'exit' called in driver process with non-zero " + + "code, involuntary disruptions or unintentional destroy behavior, check " + + "Kubernetes events for more details. "; + public static final String DriverLaunchTimeoutMessage = + "The driver has not responded to the initial health check request within the " + + "allotted start-up time. This can be configured by setting " + + ".spec.applicationTolerations.applicationTimeoutConfig "; + public static final String DriverRunning = "Driver has started running. "; + public static final String DriverReady = "Driver has reached ready state. "; + public static final String SubmittedStateMessage = + "Spark application has been created on Kubernetes Cluster. "; + public static final String UnknownStateMessage = "Cannot process application status. "; + public static final String ExceedMaxRetryAttemptMessage = + "The maximum number of restart attempts (%d) has been exceeded. "; + public static final String ScheduleFailureMessage = + "Failed to request driver from scheduler backend. "; + public static final String RunningHealthyMessage = "Application is running healthy. "; + public static final String InitializedWithBelowThresholdExecutorsMessage = + "The application is running with less than minimal number of requested initial " + + "executors. "; + public static final String RunningWithBelowThresholdExecutorsMessage = + "The Spark application is running with less than minimal number of requested " + + "executors. "; + public static final String ExecutorLaunchTimeoutMessage = + "The Spark application failed to get enough executors in the given time threshold. "; } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplication.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplication.java index 07b2b5a3..0b7fa7bd 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplication.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/SparkApplication.java @@ -24,6 +24,7 @@ import io.fabric8.kubernetes.model.annotation.Group; import io.fabric8.kubernetes.model.annotation.ShortNames; import io.fabric8.kubernetes.model.annotation.Version; + import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; import org.apache.spark.kubernetes.operator.status.ApplicationAttemptSummary; import org.apache.spark.kubernetes.operator.status.ApplicationState; @@ -37,15 +38,15 @@ @ShortNames({"sparkapp"}) @JsonIgnoreProperties(ignoreUnknown = true) public class SparkApplication extends - BaseResource { - @Override - public ApplicationStatus initStatus() { - return new ApplicationStatus(); - } + BaseResource { + @Override + public ApplicationStatus initStatus() { + return new ApplicationStatus(); + } - @Override - public ApplicationSpec initSpec() { - return new ApplicationSpec(); - } + @Override + public ApplicationSpec initSpec() { + return new ApplicationSpec(); + } } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/decorators/ResourceDecorator.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/decorators/ResourceDecorator.java index dc4b34c8..1c705eef 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/decorators/ResourceDecorator.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/decorators/ResourceDecorator.java @@ -21,5 +21,5 @@ import io.fabric8.kubernetes.api.model.HasMetadata; public interface ResourceDecorator { - T decorate(T resource); + T decorate(T resource); } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java index 1dab3642..f6032d58 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationSpec.java @@ -18,6 +18,9 @@ package org.apache.spark.kubernetes.operator.spec; +import java.util.ArrayList; +import java.util.List; + import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import io.fabric8.generator.annotation.Required; @@ -27,9 +30,6 @@ import lombok.EqualsAndHashCode; import lombok.NoArgsConstructor; -import java.util.ArrayList; -import java.util.List; - @Data @NoArgsConstructor @AllArgsConstructor @@ -38,20 +38,20 @@ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class ApplicationSpec extends BaseSpec { - protected String mainClass; - @Required - protected RuntimeVersions runtimeVersions; - protected String jars; - protected String pyFiles; - protected String sparkRFiles; - protected String files; - @Builder.Default - protected DeploymentMode deploymentMode = DeploymentMode.ClusterMode; - protected String proxyUser; - @Builder.Default - protected List driverArgs = new ArrayList<>(); - @Builder.Default - protected ApplicationTolerations applicationTolerations = new ApplicationTolerations(); - protected BaseApplicationTemplateSpec driverSpec; - protected BaseApplicationTemplateSpec executorSpec; + protected String mainClass; + @Required + protected RuntimeVersions runtimeVersions; + protected String jars; + protected String pyFiles; + protected String sparkRFiles; + protected String files; + @Builder.Default + protected DeploymentMode deploymentMode = DeploymentMode.ClusterMode; + protected String proxyUser; + @Builder.Default + protected List driverArgs = new ArrayList<>(); + @Builder.Default + protected ApplicationTolerations applicationTolerations = new ApplicationTolerations(); + protected BaseApplicationTemplateSpec driverSpec; + protected BaseApplicationTemplateSpec executorSpec; } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTimeoutConfig.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTimeoutConfig.java index cdd1a2ba..4f51a7af 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTimeoutConfig.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTimeoutConfig.java @@ -32,14 +32,14 @@ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class ApplicationTimeoutConfig { - @Builder.Default - protected Long driverStartTimeoutMillis = 300 * 1000L; - @Builder.Default - protected Long sparkSessionStartTimeoutMillis = 300 * 1000L; - @Builder.Default - protected Long executorStartTimeoutMillis = 300 * 1000L; - @Builder.Default - protected Long forceTerminationGracePeriodMillis = 300 * 1000L; - @Builder.Default - protected Long terminationRequeuePeriodMillis = 2 * 1000L; + @Builder.Default + protected Long driverStartTimeoutMillis = 300 * 1000L; + @Builder.Default + protected Long sparkSessionStartTimeoutMillis = 300 * 1000L; + @Builder.Default + protected Long executorStartTimeoutMillis = 300 * 1000L; + @Builder.Default + protected Long forceTerminationGracePeriodMillis = 300 * 1000L; + @Builder.Default + protected Long terminationRequeuePeriodMillis = 2 * 1000L; } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java index a8f36724..ceeaa12c 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java @@ -32,21 +32,21 @@ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class ApplicationTolerations { - @Builder.Default - protected RestartConfig restartConfig = new RestartConfig(); - @Builder.Default - protected ApplicationTimeoutConfig applicationTimeoutConfig = new ApplicationTimeoutConfig(); - /** - * Determine the toleration behavior for executor / worker instances. - */ - @Builder.Default - protected InstanceConfig instanceConfig = new InstanceConfig(); - /** - * If disabled, operator would not attempt to delete resources after app terminates. - * While this can be helpful in dev phase, it shall not be enabled for prod use cases. - * Caution: in order to avoid resource conflicts among multiple attempts, this can be disabled - * iff restart policy is set to Never. - */ - @Builder.Default - protected Boolean deleteOnTermination = true; + @Builder.Default + protected RestartConfig restartConfig = new RestartConfig(); + @Builder.Default + protected ApplicationTimeoutConfig applicationTimeoutConfig = new ApplicationTimeoutConfig(); + /** + * Determine the toleration behavior for executor / worker instances. + */ + @Builder.Default + protected InstanceConfig instanceConfig = new InstanceConfig(); + /** + * If disabled, operator would not attempt to delete resources after app terminates. + * While this can be helpful in dev phase, it shall not be enabled for prod use cases. + * Caution: in order to avoid resource conflicts among multiple attempts, this can be disabled + * iff restart policy is set to Never. + */ + @Builder.Default + protected Boolean deleteOnTermination = true; } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseApplicationTemplateSpec.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseApplicationTemplateSpec.java index 894cf7ad..54d52290 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseApplicationTemplateSpec.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseApplicationTemplateSpec.java @@ -33,5 +33,5 @@ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class BaseApplicationTemplateSpec { - protected PodTemplateSpec podTemplateSpec; + protected PodTemplateSpec podTemplateSpec; } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseSpec.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseSpec.java index 57a992c8..8a00f780 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseSpec.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/BaseSpec.java @@ -18,17 +18,18 @@ package org.apache.spark.kubernetes.operator.spec; +import java.util.HashMap; +import java.util.Map; + import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import lombok.Data; -import org.apache.spark.kubernetes.operator.diff.Diffable; -import java.util.HashMap; -import java.util.Map; +import org.apache.spark.kubernetes.operator.diff.Diffable; @Data @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class BaseSpec implements Diffable { - protected Map sparkConf = new HashMap<>(); + protected Map sparkConf = new HashMap<>(); } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java index cd21b5ff..8512d50b 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/DeploymentMode.java @@ -19,6 +19,6 @@ package org.apache.spark.kubernetes.operator.spec; public enum DeploymentMode { - ClusterMode, - ClientMode + ClusterMode, + ClientMode } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/InstanceConfig.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/InstanceConfig.java index 229c920f..c5e7fd00 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/InstanceConfig.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/InstanceConfig.java @@ -43,12 +43,12 @@ * Spark would try to bring up 10 executors as defined in SparkConf. In addition, from SparkApp * perspective, * + If Spark app acquires less than 5 executors in given tine window - * (.spec.applicationTolerations.applicationTimeoutConfig.executorStartTimeoutMillis) after - * submitted, it would be shut down proactively in order to avoid resource deadlock. + * (.spec.applicationTolerations.applicationTimeoutConfig.executorStartTimeoutMillis) after + * submitted, it would be shut down proactively in order to avoid resource deadlock. * + Spark app would be marked as 'RUNNING_WITH_PARTIAL_CAPACITY' if it loses executors after - * successfully start up. + * successfully start up. * + Spark app would be marked as 'RUNNING_HEALTHY' if it has at least min executors after - * successfully start up. + * successfully start up. */ @Data @NoArgsConstructor @@ -57,10 +57,10 @@ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class InstanceConfig { - @Builder.Default - protected long initExecutors = 0L; - @Builder.Default - protected long minExecutors = 0L; - @Builder.Default - protected long maxExecutors = 0L; + @Builder.Default + protected long initExecutors = 0L; + @Builder.Default + protected long minExecutors = 0L; + @Builder.Default + protected long maxExecutors = 0L; } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java index 3bdac31b..8834a28c 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java @@ -19,7 +19,7 @@ package org.apache.spark.kubernetes.operator.spec; public enum JDKVersion { - Java11, - Java17, - Java23 + Java11, + Java17, + Java23 } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java index d87e87cb..f5fbf9a6 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RestartPolicy.java @@ -21,23 +21,23 @@ import org.apache.spark.kubernetes.operator.status.BaseStateSummary; public enum RestartPolicy { - Always, - Never, - OnFailure, - OnInfrastructureFailure; + Always, + Never, + OnFailure, + OnInfrastructureFailure; - public static boolean attemptRestartOnState(final RestartPolicy policy, - final BaseStateSummary stateSummary) { - switch (policy) { - case Always: - return true; - case Never: - return false; - case OnFailure: - return stateSummary.isFailure(); - case OnInfrastructureFailure: - return stateSummary.isInfrastructureFailure(); - } + public static boolean attemptRestartOnState(final RestartPolicy policy, + final BaseStateSummary stateSummary) { + switch (policy) { + case Always: + return true; + case Never: return false; + case OnFailure: + return stateSummary.isFailure(); + case OnInfrastructureFailure: + return stateSummary.isInfrastructureFailure(); } + return false; + } } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RuntimeVersions.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RuntimeVersions.java index 8dd7b38d..ec8517cd 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RuntimeVersions.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/RuntimeVersions.java @@ -33,8 +33,8 @@ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class RuntimeVersions { - @Required - protected SparkVersion sparkVersion; - protected ScalaVersion scalaVersion; - protected JDKVersion jdkVersion; + @Required + protected SparkVersion sparkVersion; + protected ScalaVersion scalaVersion; + protected JDKVersion jdkVersion; } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ScalaVersion.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ScalaVersion.java index d786d691..496eba86 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ScalaVersion.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ScalaVersion.java @@ -19,6 +19,6 @@ package org.apache.spark.kubernetes.operator.spec; public enum ScalaVersion { - v2_12, - v2_13 + v2_12, + v2_13 } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java index 991bc83b..6e787562 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java @@ -23,8 +23,8 @@ * official Spark images */ public enum SparkVersion { - v3_5_1, - v3_5_0, - v3_4_2, - v3_4_1, + v3_5_1, + v3_5_0, + v3_4_2, + v3_4_1, } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationAttemptSummary.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationAttemptSummary.java index a3013b36..e65a471a 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationAttemptSummary.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationAttemptSummary.java @@ -18,6 +18,8 @@ package org.apache.spark.kubernetes.operator.status; +import java.util.Map; + import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import lombok.AllArgsConstructor; @@ -25,8 +27,6 @@ import lombok.EqualsAndHashCode; import lombok.NoArgsConstructor; -import java.util.Map; - @Data @NoArgsConstructor @AllArgsConstructor @@ -34,7 +34,7 @@ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class ApplicationAttemptSummary extends BaseAttemptSummary { - // The state transition history for given attempt - // This is used when state history trimming is enabled - protected Map stateTransitionHistory; + // The state transition history for given attempt + // This is used when state history trimming is enabled + protected Map stateTransitionHistory; } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationState.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationState.java index e8fbb7d7..d67e32af 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationState.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationState.java @@ -18,6 +18,9 @@ package org.apache.spark.kubernetes.operator.status; +import java.io.Serializable; +import java.time.Instant; + import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import io.fabric8.kubernetes.api.model.PodStatus; @@ -26,9 +29,6 @@ import lombok.Setter; import lombok.ToString; -import java.io.Serializable; -import java.time.Instant; - import static org.apache.spark.kubernetes.operator.Constants.SubmittedStateMessage; @ToString(callSuper = true) @@ -37,15 +37,15 @@ @JsonIgnoreProperties(ignoreUnknown = true) public class ApplicationState extends BaseState implements Serializable { - @Getter - @Setter - PodStatus lastObservedDriverStatus; + @Getter + @Setter + PodStatus lastObservedDriverStatus; - public ApplicationState() { - super(ApplicationStateSummary.SUBMITTED, Instant.now().toString(), SubmittedStateMessage); - } + public ApplicationState() { + super(ApplicationStateSummary.SUBMITTED, Instant.now().toString(), SubmittedStateMessage); + } - public ApplicationState(ApplicationStateSummary currentStateSummary, String message) { - super(currentStateSummary, Instant.now().toString(), message); - } + public ApplicationState(ApplicationStateSummary currentStateSummary, String message) { + super(currentStateSummary, Instant.now().toString(), message); + } } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStateSummary.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStateSummary.java index 88a30e08..96a7aa28 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStateSummary.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStateSummary.java @@ -21,128 +21,128 @@ import java.util.Set; public enum ApplicationStateSummary implements BaseStateSummary { - /** - * Spark application is submitted to the cluster but yet scheduled. - */ - SUBMITTED, - - /** - * Spark application will be restarted with same configuration - */ - SCHEDULED_TO_RESTART, - - /** - * A request has been made to start driver pod in the cluster - */ - DRIVER_REQUESTED, - - /** - * Driver pod has reached running state - */ - DRIVER_STARTED, - - /** - * Spark session is initialized - */ - DRIVER_READY, - - /** - * Less that minimal required executor pods become ready during starting up - */ - INITIALIZED_BELOW_THRESHOLD_EXECUTORS, - - /** - * All required executor pods started - */ - RUNNING_HEALTHY, - - /** - * The application has lost a fraction of executors for external reasons - */ - RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS, - - /** - * The request timed out for driver - */ - DRIVER_LAUNCH_TIMED_OUT, - - /** - * The request timed out for executors - */ - EXECUTORS_LAUNCH_TIMED_OUT, - - /** - * Timed out waiting for context to be initialized - */ - SPARK_SESSION_INITIALIZATION_TIMED_OUT, - - /** - * The application completed successfully, or System.exit is called explicitly with zero state - */ - SUCCEEDED, - - /** - * The application has failed, JVM exited abnormally, or System.exit is called explicitly - * with non-zero state - */ - FAILED, - - /** - * The job has failed because of a scheduler side issue. e.g. driver scheduled on node with - * insufficient resources - */ - SCHEDULING_FAILURE, - - /** - * The driver pod was failed with Evicted reason - */ - DRIVER_EVICTED, - - /** - * all resources (pods, services .etc have been cleaned up) - */ - RESOURCE_RELEASED, - - /** - * If configured, operator may mark app as terminated without releasing resources. While this - * can be helpful in dev phase, it shall not be enabled for prod use cases. - */ - TERMINATED_WITHOUT_RELEASE_RESOURCES; - - public boolean isInitializing() { - return SUBMITTED.equals(this) || SCHEDULED_TO_RESTART.equals(this); - } - - public boolean isStarting() { - return SCHEDULED_TO_RESTART.ordinal() < this.ordinal() - && RUNNING_HEALTHY.ordinal() > this.ordinal(); - } - - public boolean isTerminated() { - return RESOURCE_RELEASED.equals(this) - || TERMINATED_WITHOUT_RELEASE_RESOURCES.equals(this); - } - - public boolean isStopping() { - return RUNNING_HEALTHY.ordinal() < this.ordinal() && !isTerminated(); - } - - public static final Set infrastructureFailures = - Set.of(DRIVER_LAUNCH_TIMED_OUT, - EXECUTORS_LAUNCH_TIMED_OUT, SCHEDULING_FAILURE); - - public static final Set failures = Set.of(DRIVER_LAUNCH_TIMED_OUT, - EXECUTORS_LAUNCH_TIMED_OUT, SCHEDULING_FAILURE, FAILED, - SPARK_SESSION_INITIALIZATION_TIMED_OUT); - - @Override - public boolean isFailure() { - return failures.contains(this); - } - - @Override - public boolean isInfrastructureFailure() { - return infrastructureFailures.contains(this); - } + /** + * Spark application is submitted to the cluster but yet scheduled. + */ + SUBMITTED, + + /** + * Spark application will be restarted with same configuration + */ + SCHEDULED_TO_RESTART, + + /** + * A request has been made to start driver pod in the cluster + */ + DRIVER_REQUESTED, + + /** + * Driver pod has reached running state + */ + DRIVER_STARTED, + + /** + * Spark session is initialized + */ + DRIVER_READY, + + /** + * Less that minimal required executor pods become ready during starting up + */ + INITIALIZED_BELOW_THRESHOLD_EXECUTORS, + + /** + * All required executor pods started + */ + RUNNING_HEALTHY, + + /** + * The application has lost a fraction of executors for external reasons + */ + RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS, + + /** + * The request timed out for driver + */ + DRIVER_LAUNCH_TIMED_OUT, + + /** + * The request timed out for executors + */ + EXECUTORS_LAUNCH_TIMED_OUT, + + /** + * Timed out waiting for context to be initialized + */ + SPARK_SESSION_INITIALIZATION_TIMED_OUT, + + /** + * The application completed successfully, or System.exit is called explicitly with zero state + */ + SUCCEEDED, + + /** + * The application has failed, JVM exited abnormally, or System.exit is called explicitly + * with non-zero state + */ + FAILED, + + /** + * The job has failed because of a scheduler side issue. e.g. driver scheduled on node with + * insufficient resources + */ + SCHEDULING_FAILURE, + + /** + * The driver pod was failed with Evicted reason + */ + DRIVER_EVICTED, + + /** + * all resources (pods, services .etc have been cleaned up) + */ + RESOURCE_RELEASED, + + /** + * If configured, operator may mark app as terminated without releasing resources. While this + * can be helpful in dev phase, it shall not be enabled for prod use cases. + */ + TERMINATED_WITHOUT_RELEASE_RESOURCES; + + public boolean isInitializing() { + return SUBMITTED.equals(this) || SCHEDULED_TO_RESTART.equals(this); + } + + public boolean isStarting() { + return SCHEDULED_TO_RESTART.ordinal() < this.ordinal() + && RUNNING_HEALTHY.ordinal() > this.ordinal(); + } + + public boolean isTerminated() { + return RESOURCE_RELEASED.equals(this) + || TERMINATED_WITHOUT_RELEASE_RESOURCES.equals(this); + } + + public boolean isStopping() { + return RUNNING_HEALTHY.ordinal() < this.ordinal() && !isTerminated(); + } + + public static final Set infrastructureFailures = + Set.of(DRIVER_LAUNCH_TIMED_OUT, + EXECUTORS_LAUNCH_TIMED_OUT, SCHEDULING_FAILURE); + + public static final Set failures = Set.of(DRIVER_LAUNCH_TIMED_OUT, + EXECUTORS_LAUNCH_TIMED_OUT, SCHEDULING_FAILURE, FAILED, + SPARK_SESSION_INITIALIZATION_TIMED_OUT); + + @Override + public boolean isFailure() { + return failures.contains(this); + } + + @Override + public boolean isInfrastructureFailure() { + return infrastructureFailures.contains(this); + } } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStatus.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStatus.java index bc944d53..ce77b67f 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStatus.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/ApplicationStatus.java @@ -18,18 +18,19 @@ package org.apache.spark.kubernetes.operator.status; +import java.util.Collections; +import java.util.Map; +import java.util.TreeMap; + import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import lombok.EqualsAndHashCode; import lombok.ToString; import org.apache.commons.lang3.StringUtils; + import org.apache.spark.kubernetes.operator.spec.RestartConfig; import org.apache.spark.kubernetes.operator.spec.RestartPolicy; -import java.util.Collections; -import java.util.Map; -import java.util.TreeMap; - import static org.apache.spark.kubernetes.operator.Constants.ExceedMaxRetryAttemptMessage; @EqualsAndHashCode(callSuper = true) @@ -37,93 +38,93 @@ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class ApplicationStatus - extends BaseStatus { + extends BaseStatus { - public ApplicationStatus() { - super(new ApplicationState(), new ApplicationAttemptSummary()); - } + public ApplicationStatus() { + super(new ApplicationState(), new ApplicationAttemptSummary()); + } - public ApplicationStatus(ApplicationState currentState, - Map stateTransitionHistory, - ApplicationAttemptSummary previousAttemptSummary, - ApplicationAttemptSummary currentAttemptSummary) { - super(currentState, stateTransitionHistory, previousAttemptSummary, currentAttemptSummary); - } + public ApplicationStatus(ApplicationState currentState, + Map stateTransitionHistory, + ApplicationAttemptSummary previousAttemptSummary, + ApplicationAttemptSummary currentAttemptSummary) { + super(currentState, stateTransitionHistory, previousAttemptSummary, currentAttemptSummary); + } - /** - * Create a new ApplicationStatus, set the given latest state as current and update state - * history - */ - public ApplicationStatus appendNewState(ApplicationState state) { - return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), - previousAttemptSummary, currentAttemptSummary); - } + /** + * Create a new ApplicationStatus, set the given latest state as current and update state + * history + */ + public ApplicationStatus appendNewState(ApplicationState state) { + return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), + previousAttemptSummary, currentAttemptSummary); + } - /** - * Create ApplicationStatus to be updated upon termination of current attempt, with respect - * to current state and restart config. - * - * @param restartConfig restart config for the app - * @param stateMessageOverride state message to be applied - * @param trimStateTransitionHistory if enabled, operator would trim the state history, - * keeping only previous and current attempt. - * @return updated ApplicationStatus - */ - public ApplicationStatus terminateOrRestart(final RestartConfig restartConfig, - String stateMessageOverride, - boolean trimStateTransitionHistory) { - if (!currentState.currentStateSummary.isStopping()) { - // application is not stopping, skip - throw new RuntimeException( - "Spark application cannot be directly terminated unless in stopping " + - "state, current state is: " + currentState); - } + /** + * Create ApplicationStatus to be updated upon termination of current attempt, with respect + * to current state and restart config. + * + * @param restartConfig restart config for the app + * @param stateMessageOverride state message to be applied + * @param trimStateTransitionHistory if enabled, operator would trim the state history, + * keeping only previous and current attempt. + * @return updated ApplicationStatus + */ + public ApplicationStatus terminateOrRestart(final RestartConfig restartConfig, + String stateMessageOverride, + boolean trimStateTransitionHistory) { + if (!currentState.currentStateSummary.isStopping()) { + // application is not stopping, skip + throw new RuntimeException( + "Spark application cannot be directly terminated unless in stopping " + + "state, current state is: " + currentState); + } - if (!RestartPolicy.attemptRestartOnState(restartConfig.getRestartPolicy(), - currentState.getCurrentStateSummary())) { - // no restart configured - ApplicationState state = new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, - stateMessageOverride); - return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), - previousAttemptSummary, currentAttemptSummary); - } + if (!RestartPolicy.attemptRestartOnState(restartConfig.getRestartPolicy(), + currentState.getCurrentStateSummary())) { + // no restart configured + ApplicationState state = new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, + stateMessageOverride); + return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), + previousAttemptSummary, currentAttemptSummary); + } - if (currentAttemptSummary.getAttemptInfo().getId() >= - restartConfig.getMaxRestartAttempts()) { - String stateMessage = String.format(ExceedMaxRetryAttemptMessage, - restartConfig.getMaxRestartAttempts()); - if (StringUtils.isNotEmpty(stateMessageOverride)) { - stateMessage += stateMessageOverride; - } - // max number of restart attempt reached - ApplicationState state = - new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, stateMessage); - // still use previous & current attempt summary - they are to be updated only upon - // new restart - return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), - previousAttemptSummary, currentAttemptSummary); - } + if (currentAttemptSummary.getAttemptInfo().getId() >= + restartConfig.getMaxRestartAttempts()) { + String stateMessage = String.format(ExceedMaxRetryAttemptMessage, + restartConfig.getMaxRestartAttempts()); + if (StringUtils.isNotEmpty(stateMessageOverride)) { + stateMessage += stateMessageOverride; + } + // max number of restart attempt reached + ApplicationState state = + new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, stateMessage); + // still use previous & current attempt summary - they are to be updated only upon + // new restart + return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), + previousAttemptSummary, currentAttemptSummary); + } - ApplicationAttemptSummary nextAttemptSummary = new ApplicationAttemptSummary(); - nextAttemptSummary.setAttemptInfo( - currentAttemptSummary.getAttemptInfo().createNextAttemptInfo()); - ApplicationState state = new ApplicationState(ApplicationStateSummary.SCHEDULED_TO_RESTART, - stateMessageOverride); + ApplicationAttemptSummary nextAttemptSummary = new ApplicationAttemptSummary(); + nextAttemptSummary.setAttemptInfo( + currentAttemptSummary.getAttemptInfo().createNextAttemptInfo()); + ApplicationState state = new ApplicationState(ApplicationStateSummary.SCHEDULED_TO_RESTART, + stateMessageOverride); - if (trimStateTransitionHistory) { - currentAttemptSummary.setStateTransitionHistory(stateTransitionHistory); - return new ApplicationStatus(state, - Collections.singletonMap(getCurrentStateId() + 1, state), currentAttemptSummary, - nextAttemptSummary); - } else { - return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), - currentAttemptSummary, nextAttemptSummary); - } + if (trimStateTransitionHistory) { + currentAttemptSummary.setStateTransitionHistory(stateTransitionHistory); + return new ApplicationStatus(state, + Collections.singletonMap(getCurrentStateId() + 1, state), currentAttemptSummary, + nextAttemptSummary); + } else { + return new ApplicationStatus(state, createUpdatedHistoryWithNewState(state), + currentAttemptSummary, nextAttemptSummary); } + } - private Map createUpdatedHistoryWithNewState(ApplicationState state) { - TreeMap updatedHistory = new TreeMap<>(stateTransitionHistory); - updatedHistory.put(updatedHistory.lastKey() + 1L, state); - return updatedHistory; - } + private Map createUpdatedHistoryWithNewState(ApplicationState state) { + TreeMap updatedHistory = new TreeMap<>(stateTransitionHistory); + updatedHistory.put(updatedHistory.lastKey() + 1L, state); + return updatedHistory; + } } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/AttemptInfo.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/AttemptInfo.java index eed944eb..35e200cc 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/AttemptInfo.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/AttemptInfo.java @@ -32,10 +32,10 @@ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class AttemptInfo { - @Builder.Default - protected final Long id = 0L; + @Builder.Default + protected final Long id = 0L; - public AttemptInfo createNextAttemptInfo() { - return new AttemptInfo(id + 1L); - } + public AttemptInfo createNextAttemptInfo() { + return new AttemptInfo(id + 1L); + } } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseAttemptSummary.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseAttemptSummary.java index 0d2d5412..bd9bc2fb 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseAttemptSummary.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseAttemptSummary.java @@ -26,5 +26,5 @@ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class BaseAttemptSummary { - protected AttemptInfo attemptInfo = new AttemptInfo(); + protected AttemptInfo attemptInfo = new AttemptInfo(); } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseState.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseState.java index 92a5a5c1..aba5b7c4 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseState.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseState.java @@ -18,19 +18,19 @@ package org.apache.spark.kubernetes.operator.status; +import java.io.Serializable; + import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import lombok.AllArgsConstructor; import lombok.Data; -import java.io.Serializable; - @Data @AllArgsConstructor @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class BaseState implements Serializable { - protected T currentStateSummary; - protected String lastTransitionTime; - protected String message; + protected T currentStateSummary; + protected String lastTransitionTime; + protected String message; } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStateSummary.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStateSummary.java index 6e1af4bb..21519b6f 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStateSummary.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStateSummary.java @@ -19,7 +19,7 @@ package org.apache.spark.kubernetes.operator.status; public interface BaseStateSummary { - boolean isFailure(); + boolean isFailure(); - boolean isInfrastructureFailure(); + boolean isInfrastructureFailure(); } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStatus.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStatus.java index 0144aa1d..5f085ad4 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStatus.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/status/BaseStatus.java @@ -18,49 +18,49 @@ package org.apache.spark.kubernetes.operator.status; +import java.util.Collections; +import java.util.Map; +import java.util.TreeMap; + import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.ToString; -import java.util.Collections; -import java.util.Map; -import java.util.TreeMap; - @ToString @EqualsAndHashCode @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class BaseStatus, AS extends BaseAttemptSummary> { - @Getter - STATE currentState; - @Getter - Map stateTransitionHistory; - @Getter - AS previousAttemptSummary; - @Getter - AS currentAttemptSummary; + @Getter + STATE currentState; + @Getter + Map stateTransitionHistory; + @Getter + AS previousAttemptSummary; + @Getter + AS currentAttemptSummary; - public BaseStatus(STATE initState, AS currentAttemptSummary) { - this.currentState = initState; - this.stateTransitionHistory = new TreeMap<>(); - this.stateTransitionHistory.put(0L, initState); - this.previousAttemptSummary = null; - this.currentAttemptSummary = currentAttemptSummary; - } + public BaseStatus(STATE initState, AS currentAttemptSummary) { + this.currentState = initState; + this.stateTransitionHistory = new TreeMap<>(); + this.stateTransitionHistory.put(0L, initState); + this.previousAttemptSummary = null; + this.currentAttemptSummary = currentAttemptSummary; + } - public BaseStatus(STATE currentState, - Map stateTransitionHistory, - AS previousAttemptSummary, - AS currentAttemptSummary) { - this.currentState = currentState; - this.stateTransitionHistory = new TreeMap<>(stateTransitionHistory); - this.previousAttemptSummary = previousAttemptSummary; - this.currentAttemptSummary = currentAttemptSummary; - } + public BaseStatus(STATE currentState, + Map stateTransitionHistory, + AS previousAttemptSummary, + AS currentAttemptSummary) { + this.currentState = currentState; + this.stateTransitionHistory = new TreeMap<>(stateTransitionHistory); + this.previousAttemptSummary = previousAttemptSummary; + this.currentAttemptSummary = currentAttemptSummary; + } - protected long getCurrentStateId() { - return Collections.max(stateTransitionHistory.keySet()); - } + protected long getCurrentStateId() { + return Collections.max(stateTransitionHistory.keySet()); + } } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/utils/ModelUtils.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/utils/ModelUtils.java index d3d71381..fd71fbf2 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/utils/ModelUtils.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/utils/ModelUtils.java @@ -26,84 +26,85 @@ import io.fabric8.kubernetes.api.model.Pod; import io.fabric8.kubernetes.api.model.PodBuilder; import io.fabric8.kubernetes.api.model.PodTemplateSpec; + import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; public class ModelUtils { - public static final String DRIVER_SPARK_CONTAINER_PROP_KEY = - "spark.kubernetes.driver.podTemplateContainerName"; - public static final String DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY = - "spark.kubernetes.driver.podTemplateFile"; - public static final String EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY = - "spark.kubernetes.executor.podTemplateFile"; - public static final ObjectMapper objectMapper = new ObjectMapper(); + public static final String DRIVER_SPARK_CONTAINER_PROP_KEY = + "spark.kubernetes.driver.podTemplateContainerName"; + public static final String DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY = + "spark.kubernetes.driver.podTemplateFile"; + public static final String EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY = + "spark.kubernetes.executor.podTemplateFile"; + public static final ObjectMapper objectMapper = new ObjectMapper(); - public static Pod defaultPod() { - return new PodBuilder() - .withNewMetadata() - .endMetadata() - .withNewSpec() - .endSpec() - .build(); - } + public static Pod defaultPod() { + return new PodBuilder() + .withNewMetadata() + .endMetadata() + .withNewSpec() + .endSpec() + .build(); + } - public static Pod getPodFromTemplateSpec(PodTemplateSpec podTemplateSpec) { - if (podTemplateSpec != null) { - return new PodBuilder() - .withMetadata(podTemplateSpec.getMetadata()) - .withSpec(podTemplateSpec.getSpec()) - .withAdditionalProperties(podTemplateSpec.getAdditionalProperties()) - .build(); - } else { - return defaultPod(); - } + public static Pod getPodFromTemplateSpec(PodTemplateSpec podTemplateSpec) { + if (podTemplateSpec != null) { + return new PodBuilder() + .withMetadata(podTemplateSpec.getMetadata()) + .withSpec(podTemplateSpec.getSpec()) + .withAdditionalProperties(podTemplateSpec.getAdditionalProperties()) + .build(); + } else { + return defaultPod(); } + } - /** - * Return true if given container name is main container in driver pod - * If `spark.kubernetes.driver.podTemplateContainerName` is not set, all containers are - * considered as main - */ - public static boolean isDriverMainContainer(final ApplicationSpec appSpec, - final String containerName) { - if (appSpec == null || appSpec.getSparkConf() == null - || !appSpec.getSparkConf().containsKey(DRIVER_SPARK_CONTAINER_PROP_KEY)) { - return true; - } - return appSpec.getSparkConf().get(DRIVER_SPARK_CONTAINER_PROP_KEY) - .equalsIgnoreCase(containerName); + /** + * Return true if given container name is main container in driver pod + * If `spark.kubernetes.driver.podTemplateContainerName` is not set, all containers are + * considered as main + */ + public static boolean isDriverMainContainer(final ApplicationSpec appSpec, + final String containerName) { + if (appSpec == null || appSpec.getSparkConf() == null + || !appSpec.getSparkConf().containsKey(DRIVER_SPARK_CONTAINER_PROP_KEY)) { + return true; } + return appSpec.getSparkConf().get(DRIVER_SPARK_CONTAINER_PROP_KEY) + .equalsIgnoreCase(containerName); + } - /** - * Build OwnerReference to the given resource - * - * @param owner the owner - * @return OwnerReference to be used for subresources - */ - public static OwnerReference buildOwnerReferenceTo(HasMetadata owner) { - return new OwnerReferenceBuilder() - .withName(owner.getMetadata().getName()) - .withApiVersion(owner.getApiVersion()) - .withKind(owner.getKind()) - .withUid(owner.getMetadata().getUid()) - .withBlockOwnerDeletion(true) - .build(); - } + /** + * Build OwnerReference to the given resource + * + * @param owner the owner + * @return OwnerReference to be used for subresources + */ + public static OwnerReference buildOwnerReferenceTo(HasMetadata owner) { + return new OwnerReferenceBuilder() + .withName(owner.getMetadata().getName()) + .withApiVersion(owner.getApiVersion()) + .withKind(owner.getKind()) + .withUid(owner.getMetadata().getUid()) + .withBlockOwnerDeletion(true) + .build(); + } - public static String asJsonString(T resource) { - try { - return objectMapper.writeValueAsString(resource); - } catch (JsonProcessingException e) { - throw new RuntimeException(e); - } + public static String asJsonString(T resource) { + try { + return objectMapper.writeValueAsString(resource); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); } + } - public static boolean overrideDriverTemplate(ApplicationSpec applicationSpec) { - return applicationSpec != null && applicationSpec.getDriverSpec() != null - && applicationSpec.getDriverSpec().getPodTemplateSpec() != null; - } + public static boolean overrideDriverTemplate(ApplicationSpec applicationSpec) { + return applicationSpec != null && applicationSpec.getDriverSpec() != null + && applicationSpec.getDriverSpec().getPodTemplateSpec() != null; + } - public static boolean overrideExecutorTemplate(ApplicationSpec applicationSpec) { - return applicationSpec != null && applicationSpec.getExecutorSpec() != null - && applicationSpec.getExecutorSpec().getPodTemplateSpec() != null; - } + public static boolean overrideExecutorTemplate(ApplicationSpec applicationSpec) { + return applicationSpec != null && applicationSpec.getExecutorSpec() != null + && applicationSpec.getExecutorSpec().getPodTemplateSpec() != null; + } } diff --git a/spark-operator-api/src/test/java/org/apache/spark/kubernetes/operator/status/ApplicationStatusTest.java b/spark-operator-api/src/test/java/org/apache/spark/kubernetes/operator/status/ApplicationStatusTest.java index 89a1b3ce..0cbe94c8 100644 --- a/spark-operator-api/src/test/java/org/apache/spark/kubernetes/operator/status/ApplicationStatusTest.java +++ b/spark-operator-api/src/test/java/org/apache/spark/kubernetes/operator/status/ApplicationStatusTest.java @@ -25,23 +25,23 @@ class ApplicationStatusTest { - @Test - void testInitStatus() { - ApplicationStatus applicationStatus = new ApplicationStatus(); - Assertions.assertEquals(SUBMITTED, applicationStatus.currentState.currentStateSummary); - Assertions.assertEquals(1, applicationStatus.stateTransitionHistory.size()); - Assertions.assertEquals(applicationStatus.currentState, - applicationStatus.stateTransitionHistory.get(0L)); - } + @Test + void testInitStatus() { + ApplicationStatus applicationStatus = new ApplicationStatus(); + Assertions.assertEquals(SUBMITTED, applicationStatus.currentState.currentStateSummary); + Assertions.assertEquals(1, applicationStatus.stateTransitionHistory.size()); + Assertions.assertEquals(applicationStatus.currentState, + applicationStatus.stateTransitionHistory.get(0L)); + } - @Test - void testAppendNewState() { - ApplicationStatus applicationStatus = new ApplicationStatus(); - ApplicationState newState = - new ApplicationState(ApplicationStateSummary.RUNNING_HEALTHY, "foo"); - ApplicationStatus newStatus = applicationStatus.appendNewState(newState); - Assertions.assertEquals(2, newStatus.stateTransitionHistory.size()); - Assertions.assertEquals(newState, newStatus.stateTransitionHistory.get(1L)); - } + @Test + void testAppendNewState() { + ApplicationStatus applicationStatus = new ApplicationStatus(); + ApplicationState newState = + new ApplicationState(ApplicationStateSummary.RUNNING_HEALTHY, "foo"); + ApplicationStatus newStatus = applicationStatus.appendNewState(newState); + Assertions.assertEquals(2, newStatus.stateTransitionHistory.size()); + Assertions.assertEquals(newState, newStatus.stateTransitionHistory.get(1L)); + } } diff --git a/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java b/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java index f19151c2..9d404316 100644 --- a/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java +++ b/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java @@ -18,12 +18,6 @@ package org.apache.spark.kubernetes.operator; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.fabric8.kubernetes.client.KubernetesClientBuilder; -import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; import java.time.Duration; import java.time.Instant; @@ -39,155 +33,162 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -class AppSubmitToSucceedTest { - private static final Logger logger = LoggerFactory.getLogger(AppSubmitToSucceedTest.class); +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; - /** - * Create Spark app(s) & wait them for complete. - * This sample would check apps periodically, force delete them after timeout if they have - * not completed. - * Exit 0 iff all given app(s) isTerminated successfully. - * E.g. when test cluster is up and kube config is configured, this can be invoked as - * java -cp /path/to/test.jar -Dspark.operator.test.app.yaml.files.dir=/path/to/e2e-tests/ - * org.apache.spark.kubernetes.operator.AppSubmitToSucceedTest - * - * @param args directory path(s) to load SparkApp yaml file(s) from - */ - public static void main(String[] args) throws InterruptedException { - KubernetesClient client = new KubernetesClientBuilder().build(); +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; - Duration observeInterval = Duration.ofMinutes( - Long.parseLong( - System.getProperty("spark.operator.test.observe.interval.min", "1"))); - Duration appExecTimeout = Duration.ofMinutes( - Long.parseLong( - System.getProperty("spark.operator.test.app.timeout.min", "10"))); - Duration testTimeout = Duration.ofMinutes( - Long.parseLong( - System.getProperty("spark.operator.test.timeout.min", "30"))); - Integer execParallelism = Integer.parseInt( - System.getProperty("spark.operator.test.exec.parallelism", "2")); - String testAppYamlFilesDir = System.getProperty("spark.operator.test.app.yaml.files.dir", - "e2e-tests/spark-apps/"); - String testAppNamespace = System.getProperty("spark.operator.test.app.namespace", - "spark-test"); +class AppSubmitToSucceedTest { + private static final Logger logger = LoggerFactory.getLogger(AppSubmitToSucceedTest.class); - Set testApps = - loadSparkAppsFromFile(client, new File(testAppYamlFilesDir)); - ConcurrentMap failedApps = new ConcurrentHashMap<>(); + /** + * Create Spark app(s) & wait them for complete. + * This sample would check apps periodically, force delete them after timeout if they have + * not completed. + * Exit 0 iff all given app(s) isTerminated successfully. + * E.g. when test cluster is up and kube config is configured, this can be invoked as + * java -cp /path/to/test.jar -Dspark.operator.test.app.yaml.files.dir=/path/to/e2e-tests/ + * org.apache.spark.kubernetes.operator.AppSubmitToSucceedTest + * + * @param args directory path(s) to load SparkApp yaml file(s) from + */ + public static void main(String[] args) throws InterruptedException { + KubernetesClient client = new KubernetesClientBuilder().build(); - ExecutorService execPool = Executors.newFixedThreadPool(execParallelism); - List> todos = new ArrayList<>(testApps.size()); + Duration observeInterval = Duration.ofMinutes( + Long.parseLong( + System.getProperty("spark.operator.test.observe.interval.min", "1"))); + Duration appExecTimeout = Duration.ofMinutes( + Long.parseLong( + System.getProperty("spark.operator.test.app.timeout.min", "10"))); + Duration testTimeout = Duration.ofMinutes( + Long.parseLong( + System.getProperty("spark.operator.test.timeout.min", "30"))); + Integer execParallelism = Integer.parseInt( + System.getProperty("spark.operator.test.exec.parallelism", "2")); + String testAppYamlFilesDir = System.getProperty("spark.operator.test.app.yaml.files.dir", + "e2e-tests/spark-apps/"); + String testAppNamespace = System.getProperty("spark.operator.test.app.namespace", + "spark-test"); - for (SparkApplication app : testApps) { - todos.add(() -> { - try { - Instant timeoutTime = Instant.now().plus(appExecTimeout); - SparkApplication updatedApp = - client.resource(app).inNamespace(testAppNamespace).create(); - if (logger.isInfoEnabled()) { - logger.info("Submitting app {}", updatedApp.getMetadata().getName()); - } - while (Instant.now().isBefore(timeoutTime)) { - Thread.sleep(observeInterval.toMillis()); - updatedApp = client.resource(app).inNamespace(testAppNamespace).get(); - if (appCompleted(updatedApp)) { - boolean succeeded = updatedApp.getStatus().getStateTransitionHistory() - .entrySet() - .stream() - .anyMatch(e -> ApplicationStateSummary.SUCCEEDED.equals( - e.getValue().getCurrentStateSummary())); - if (succeeded) { - if (logger.isInfoEnabled()) { - logger.info("App succeeded: {}", - updatedApp.getMetadata().getName()); - } - } else { - if (logger.isErrorEnabled()) { - logger.error("App failed: {}", - updatedApp.getMetadata().getName()); - } - failedApps.put(updatedApp.getMetadata().getName(), - updatedApp.getStatus().toString()); - } - return null; - } else { - if (logger.isInfoEnabled()) { - logger.info("Application {} not completed...", - app.getMetadata().getName()); - } - } - } - if (logger.isInfoEnabled()) { - logger.info("App {} timed out.", app.getMetadata().getName()); - } - failedApps.put(updatedApp.getMetadata().getName(), - "timed out: " + updatedApp.getStatus().toString()); - return null; - } catch (Exception e) { - failedApps.put(app.getMetadata().getName(), "failed: " + e.getMessage()); - return null; - } - }); - } + Set testApps = + loadSparkAppsFromFile(client, new File(testAppYamlFilesDir)); + ConcurrentMap failedApps = new ConcurrentHashMap<>(); + + ExecutorService execPool = Executors.newFixedThreadPool(execParallelism); + List> todos = new ArrayList<>(testApps.size()); - int testSucceeded = 1; + for (SparkApplication app : testApps) { + todos.add(() -> { try { - execPool.invokeAll(todos, testTimeout.toMillis(), TimeUnit.MILLISECONDS); - if (failedApps.isEmpty()) { + Instant timeoutTime = Instant.now().plus(appExecTimeout); + SparkApplication updatedApp = + client.resource(app).inNamespace(testAppNamespace).create(); + if (logger.isInfoEnabled()) { + logger.info("Submitting app {}", updatedApp.getMetadata().getName()); + } + while (Instant.now().isBefore(timeoutTime)) { + Thread.sleep(observeInterval.toMillis()); + updatedApp = client.resource(app).inNamespace(testAppNamespace).get(); + if (appCompleted(updatedApp)) { + boolean succeeded = updatedApp.getStatus().getStateTransitionHistory() + .entrySet() + .stream() + .anyMatch(e -> ApplicationStateSummary.SUCCEEDED.equals( + e.getValue().getCurrentStateSummary())); + if (succeeded) { if (logger.isInfoEnabled()) { - logger.info("Test completed successfully"); + logger.info("App succeeded: {}", + updatedApp.getMetadata().getName()); } - testSucceeded = 0; - } else { + } else { if (logger.isErrorEnabled()) { - logger.error("Failed apps found. "); - failedApps.forEach((k, v) -> { - logger.error("Application failed: {}", k); - logger.error("\t status: {}", v); - }); - } - } - } finally { - for (SparkApplication app : testApps) { - try { - client.resource(app).inNamespace(testAppNamespace).delete(); - } catch (Exception e) { - if (logger.isErrorEnabled()) { - logger.error("Failed to remove app {}", app.getMetadata().getName()); - } + logger.error("App failed: {}", + updatedApp.getMetadata().getName()); } + failedApps.put(updatedApp.getMetadata().getName(), + updatedApp.getStatus().toString()); + } + return null; + } else { + if (logger.isInfoEnabled()) { + logger.info("Application {} not completed...", + app.getMetadata().getName()); + } } + } + if (logger.isInfoEnabled()) { + logger.info("App {} timed out.", app.getMetadata().getName()); + } + failedApps.put(updatedApp.getMetadata().getName(), + "timed out: " + updatedApp.getStatus().toString()); + return null; + } catch (Exception e) { + failedApps.put(app.getMetadata().getName(), "failed: " + e.getMessage()); + return null; } - System.exit(testSucceeded); + }); } - private static Set loadSparkAppsFromFile(KubernetesClient client, - File appsFile) { - if (appsFile.exists()) { - if (appsFile.isFile()) { - return Collections.singleton( - client.resources(SparkApplication.class).load(appsFile).item()); - } else { - Set applications = new HashSet<>(); - File[] subDirs = appsFile.listFiles(); - if (subDirs != null) { - for (File file : subDirs) { - applications.addAll(loadSparkAppsFromFile(client, file)); - } - } - return applications; - } + int testSucceeded = 1; + try { + execPool.invokeAll(todos, testTimeout.toMillis(), TimeUnit.MILLISECONDS); + if (failedApps.isEmpty()) { + if (logger.isInfoEnabled()) { + logger.info("Test completed successfully"); } + testSucceeded = 0; + } else { if (logger.isErrorEnabled()) { - logger.error("No SparkApp found at {}", appsFile.getAbsolutePath()); + logger.error("Failed apps found. "); + failedApps.forEach((k, v) -> { + logger.error("Application failed: {}", k); + logger.error("\t status: {}", v); + }); + } + } + } finally { + for (SparkApplication app : testApps) { + try { + client.resource(app).inNamespace(testAppNamespace).delete(); + } catch (Exception e) { + if (logger.isErrorEnabled()) { + logger.error("Failed to remove app {}", app.getMetadata().getName()); + } } - return Collections.emptySet(); + } } + System.exit(testSucceeded); + } - private static boolean appCompleted(SparkApplication app) { - return app != null && app.getStatus() != null && app.getStatus().getCurrentState() != null - && app.getStatus().getStateTransitionHistory() != null - && app.getStatus().getCurrentState().getCurrentStateSummary().isTerminated(); + private static Set loadSparkAppsFromFile(KubernetesClient client, + File appsFile) { + if (appsFile.exists()) { + if (appsFile.isFile()) { + return Collections.singleton( + client.resources(SparkApplication.class).load(appsFile).item()); + } else { + Set applications = new HashSet<>(); + File[] subDirs = appsFile.listFiles(); + if (subDirs != null) { + for (File file : subDirs) { + applications.addAll(loadSparkAppsFromFile(client, file)); + } + } + return applications; + } + } + if (logger.isErrorEnabled()) { + logger.error("No SparkApp found at {}", appsFile.getAbsolutePath()); } + return Collections.emptySet(); + } + + private static boolean appCompleted(SparkApplication app) { + return app != null && app.getStatus() != null && app.getStatus().getCurrentState() != null + && app.getStatus().getStateTransitionHistory() != null + && app.getStatus().getCurrentState().getCurrentStateSummary().isTerminated(); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java index b6a31069..113e2d0f 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java @@ -18,22 +18,32 @@ package org.apache.spark.kubernetes.operator; +import java.time.Duration; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.stream.Collectors; +import java.util.stream.Stream; + import io.fabric8.kubernetes.client.KubernetesClient; import io.javaoperatorsdk.operator.Operator; +import io.javaoperatorsdk.operator.RegisteredController; import io.javaoperatorsdk.operator.api.config.ConfigurationServiceOverrider; import io.javaoperatorsdk.operator.api.config.ControllerConfigurationOverrider; import io.javaoperatorsdk.operator.processing.event.rate.LinearRateLimiter; import io.javaoperatorsdk.operator.processing.event.rate.RateLimiter; import io.javaoperatorsdk.operator.processing.retry.GenericRetry; +import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections.CollectionUtils; + import org.apache.spark.kubernetes.operator.client.KubernetesClientFactory; -import org.apache.spark.kubernetes.operator.config.SparkOperatorConfigMapReconciler; import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConfigMapReconciler; import org.apache.spark.kubernetes.operator.health.SentinelManager; import org.apache.spark.kubernetes.operator.metrics.MetricsService; import org.apache.spark.kubernetes.operator.metrics.MetricsSystem; -import io.javaoperatorsdk.operator.RegisteredController; -import lombok.extern.slf4j.Slf4j; import org.apache.spark.kubernetes.operator.metrics.MetricsSystemFactory; import org.apache.spark.kubernetes.operator.metrics.source.OperatorJosdkMetrics; import org.apache.spark.kubernetes.operator.probe.ProbeService; @@ -41,15 +51,6 @@ import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; import org.apache.spark.kubernetes.operator.utils.StatusRecorder; -import java.time.Duration; -import java.util.HashSet; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.stream.Collectors; -import java.util.stream.Stream; - import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.DynamicConfigEnabled; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.DynamicConfigSelectorStr; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorNamespace; @@ -63,155 +64,155 @@ */ @Slf4j public class SparkOperator { - private Operator sparkOperator; - private Operator sparkOperatorConfMonitor; - private KubernetesClient client; - private StatusRecorder statusRecorder; - private MetricsSystem metricsSystem; - protected Set> registeredSparkControllers; - protected Set watchedNamespaces; - - private SentinelManager sentinelManager; - private ProbeService probeService; - private MetricsService metricsService; - private ExecutorService metricsResourcesSingleThreadPool; - - public SparkOperator() { - this.metricsSystem = MetricsSystemFactory.createMetricsSystem(); - this.client = KubernetesClientFactory.buildKubernetesClient(metricsSystem); - this.statusRecorder = new StatusRecorder(SparkOperatorConf.getApplicationStatusListener()); - this.registeredSparkControllers = new HashSet<>(); - this.watchedNamespaces = SparkReconcilerUtils.getWatchedNamespaces(); - this.sentinelManager = new SentinelManager(); - this.sparkOperator = createOperator(); - this.sparkOperatorConfMonitor = createSparkOperatorConfMonitor(); - var operators = Stream.of(this.sparkOperator, this.sparkOperatorConfMonitor) - .filter(Objects::nonNull).collect(Collectors.toList()); - this.probeService = new ProbeService(operators, this.sentinelManager); - this.metricsService = new MetricsService(metricsSystem); - this.metricsResourcesSingleThreadPool = Executors.newSingleThreadExecutor(); + private Operator sparkOperator; + private Operator sparkOperatorConfMonitor; + private KubernetesClient client; + private StatusRecorder statusRecorder; + private MetricsSystem metricsSystem; + protected Set> registeredSparkControllers; + protected Set watchedNamespaces; + + private SentinelManager sentinelManager; + private ProbeService probeService; + private MetricsService metricsService; + private ExecutorService metricsResourcesSingleThreadPool; + + public SparkOperator() { + this.metricsSystem = MetricsSystemFactory.createMetricsSystem(); + this.client = KubernetesClientFactory.buildKubernetesClient(metricsSystem); + this.statusRecorder = new StatusRecorder(SparkOperatorConf.getApplicationStatusListener()); + this.registeredSparkControllers = new HashSet<>(); + this.watchedNamespaces = SparkReconcilerUtils.getWatchedNamespaces(); + this.sentinelManager = new SentinelManager(); + this.sparkOperator = createOperator(); + this.sparkOperatorConfMonitor = createSparkOperatorConfMonitor(); + var operators = Stream.of(this.sparkOperator, this.sparkOperatorConfMonitor) + .filter(Objects::nonNull).collect(Collectors.toList()); + this.probeService = new ProbeService(operators, this.sentinelManager); + this.metricsService = new MetricsService(metricsSystem); + this.metricsResourcesSingleThreadPool = Executors.newSingleThreadExecutor(); + } + + protected Operator createOperator() { + Operator op = new Operator(this::overrideOperatorConfigs); + registeredSparkControllers.add( + op.register(new SparkApplicationReconciler(statusRecorder, sentinelManager), + this::overrideControllerConfigs)); + return op; + } + + protected Operator createSparkOperatorConfMonitor() { + if (DynamicConfigEnabled.getValue()) { + Operator op = new Operator(client, c -> { + c.withStopOnInformerErrorDuringStartup(true); + c.withCloseClientOnStop(false); + c.withInformerStoppedHandler( + (informer, ex) -> log.error( + "Dynamic config informer stopped: operator will not accept " + + "config updates.") + ); + }); + op.register(new SparkOperatorConfigMapReconciler(this::updateWatchingNamespaces), c -> { + c.settingNamespaces(OperatorNamespace.getValue()); + c.withLabelSelector(DynamicConfigSelectorStr.getValue()); + }); + return op; + } else { + return null; } + } - protected Operator createOperator() { - Operator op = new Operator(this::overrideOperatorConfigs); - registeredSparkControllers.add( - op.register(new SparkApplicationReconciler(statusRecorder, sentinelManager), - this::overrideControllerConfigs)); - return op; - } + protected Operator getOperator() { + return this.sparkOperator; + } - protected Operator createSparkOperatorConfMonitor() { - if (DynamicConfigEnabled.getValue()) { - Operator op = new Operator(client, c -> { - c.withStopOnInformerErrorDuringStartup(true); - c.withCloseClientOnStop(false); - c.withInformerStoppedHandler( - (informer, ex) -> log.error( - "Dynamic config informer stopped: operator will not accept " + - "config updates.") - ); - }); - op.register(new SparkOperatorConfigMapReconciler(this::updateWatchingNamespaces), c -> { - c.settingNamespaces(OperatorNamespace.getValue()); - c.withLabelSelector(DynamicConfigSelectorStr.getValue()); - }); - return op; - } else { - return null; - } - } + protected ProbeService getProbeService() { + return this.probeService; + } - protected Operator getOperator() { - return this.sparkOperator; + protected boolean updateWatchingNamespaces(Set namespaces) { + if (watchedNamespaces.equals(namespaces)) { + log.info("No watched namespace change detected"); + return false; } - - protected ProbeService getProbeService() { - return this.probeService; + if (CollectionUtils.isEmpty(namespaces)) { + log.error("Cannot updating namespaces to empty"); + return false; } - - protected boolean updateWatchingNamespaces(Set namespaces) { - if (watchedNamespaces.equals(namespaces)) { - log.info("No watched namespace change detected"); - return false; - } - if (CollectionUtils.isEmpty(namespaces)) { - log.error("Cannot updating namespaces to empty"); - return false; - } - registeredSparkControllers.forEach(c -> { - if (c.allowsNamespaceChanges()) { - log.info("Updating operator namespaces to {}", namespaces); - c.changeNamespaces(namespaces); - } - }); - this.watchedNamespaces = new HashSet<>(namespaces); - return true; + registeredSparkControllers.forEach(c -> { + if (c.allowsNamespaceChanges()) { + log.info("Updating operator namespaces to {}", namespaces); + c.changeNamespaces(namespaces); + } + }); + this.watchedNamespaces = new HashSet<>(namespaces); + return true; + } + + protected void overrideOperatorConfigs(ConfigurationServiceOverrider overrider) { + overrider.withKubernetesClient(client); + overrider.withStopOnInformerErrorDuringStartup(TerminateOnInformerFailure.getValue()); + overrider.withTerminationTimeoutSeconds(TerminationTimeoutSeconds.getValue()); + int parallelism = ReconcilerParallelism.getValue(); + if (parallelism > 0) { + log.info("Configuring operator with {} reconciliation threads.", parallelism); + overrider.withConcurrentReconciliationThreads(parallelism); + } else { + log.info("Configuring operator with unbounded reconciliation thread pool."); + overrider.withExecutorService(Executors.newCachedThreadPool()); } - - protected void overrideOperatorConfigs(ConfigurationServiceOverrider overrider) { - overrider.withKubernetesClient(client); - overrider.withStopOnInformerErrorDuringStartup(TerminateOnInformerFailure.getValue()); - overrider.withTerminationTimeoutSeconds(TerminationTimeoutSeconds.getValue()); - int parallelism = ReconcilerParallelism.getValue(); - if (parallelism > 0) { - log.info("Configuring operator with {} reconciliation threads.", parallelism); - overrider.withConcurrentReconciliationThreads(parallelism); - } else { - log.info("Configuring operator with unbounded reconciliation thread pool."); - overrider.withExecutorService(Executors.newCachedThreadPool()); - } - if (SparkOperatorConf.LEADER_ELECTION_ENABLED.getValue()) { - overrider.withLeaderElectionConfiguration(SparkOperatorConf.getLeaderElectionConfig()); - } - if (SparkOperatorConf.JOSDKMetricsEnabled.getValue()) { - log.info("Adding OperatorJosdkMetrics."); - OperatorJosdkMetrics operatorJosdkMetrics = new OperatorJosdkMetrics(); - overrider.withMetrics(operatorJosdkMetrics); - metricsSystem.registerSource(operatorJosdkMetrics); - } + if (SparkOperatorConf.LEADER_ELECTION_ENABLED.getValue()) { + overrider.withLeaderElectionConfiguration(SparkOperatorConf.getLeaderElectionConfig()); } - - protected void overrideControllerConfigs(ControllerConfigurationOverrider overrider) { - if (watchedNamespaces.isEmpty()) { - log.info("Initializing operator watching at cluster level."); - } else { - log.info("Initializing with watched namespaces {}", watchedNamespaces); - } - overrider.settingNamespaces(watchedNamespaces); - - RateLimiter rateLimiter = new LinearRateLimiter( - Duration.ofSeconds(SparkOperatorConf.RateLimiterRefreshPeriodSeconds.getValue()), - SparkOperatorConf.RateLimiterLimit.getValue()); - overrider.withRateLimiter(rateLimiter); - - GenericRetry genericRetry = new GenericRetry() - .setMaxAttempts(SparkOperatorConf.RetryMaxAttempts.getValue()) - .setInitialInterval( - Duration.ofSeconds(SparkOperatorConf.RetryInitialInternalSeconds.getValue()) - .toMillis()) - .setIntervalMultiplier(SparkOperatorConf.RetryInternalMultiplier.getValue()); - if (SparkOperatorConf.RetryMaxIntervalSeconds.getValue() > 0) { - genericRetry.setMaxInterval( - Duration.ofSeconds(SparkOperatorConf.RetryMaxIntervalSeconds.getValue()) - .toMillis()); - } - overrider.withRetry(genericRetry); + if (SparkOperatorConf.JOSDKMetricsEnabled.getValue()) { + log.info("Adding OperatorJosdkMetrics."); + OperatorJosdkMetrics operatorJosdkMetrics = new OperatorJosdkMetrics(); + overrider.withMetrics(operatorJosdkMetrics); + metricsSystem.registerSource(operatorJosdkMetrics); } + } - public static void main(String[] args) { - SparkOperator sparkOperator = new SparkOperator(); - sparkOperator.getOperator().start(); - if (DynamicConfigEnabled.getValue() && sparkOperator.sparkOperatorConfMonitor != null) { - sparkOperator.sparkOperatorConfMonitor.start(); - } - sparkOperator.probeService.start(); - // MetricsServer start follows the MetricsSystem start - // so that MetricsSystem::getSinks will not return an empty list - sparkOperator.metricsResourcesSingleThreadPool.submit(() -> { - sparkOperator.metricsSystem.start(); - }); - sparkOperator.metricsResourcesSingleThreadPool.submit(() -> { - sparkOperator.metricsService.start(); - }); + protected void overrideControllerConfigs(ControllerConfigurationOverrider overrider) { + if (watchedNamespaces.isEmpty()) { + log.info("Initializing operator watching at cluster level."); + } else { + log.info("Initializing with watched namespaces {}", watchedNamespaces); + } + overrider.settingNamespaces(watchedNamespaces); + + RateLimiter rateLimiter = new LinearRateLimiter( + Duration.ofSeconds(SparkOperatorConf.RateLimiterRefreshPeriodSeconds.getValue()), + SparkOperatorConf.RateLimiterLimit.getValue()); + overrider.withRateLimiter(rateLimiter); + + GenericRetry genericRetry = new GenericRetry() + .setMaxAttempts(SparkOperatorConf.RetryMaxAttempts.getValue()) + .setInitialInterval( + Duration.ofSeconds(SparkOperatorConf.RetryInitialInternalSeconds.getValue()) + .toMillis()) + .setIntervalMultiplier(SparkOperatorConf.RetryInternalMultiplier.getValue()); + if (SparkOperatorConf.RetryMaxIntervalSeconds.getValue() > 0) { + genericRetry.setMaxInterval( + Duration.ofSeconds(SparkOperatorConf.RetryMaxIntervalSeconds.getValue()) + .toMillis()); + } + overrider.withRetry(genericRetry); + } + + public static void main(String[] args) { + SparkOperator sparkOperator = new SparkOperator(); + sparkOperator.getOperator().start(); + if (DynamicConfigEnabled.getValue() && sparkOperator.sparkOperatorConfMonitor != null) { + sparkOperator.sparkOperatorConfMonitor.start(); } + sparkOperator.probeService.start(); + // MetricsServer start follows the MetricsSystem start + // so that MetricsSystem::getSinks will not return an empty list + sparkOperator.metricsResourcesSingleThreadPool.submit(() -> { + sparkOperator.metricsSystem.start(); + }); + sparkOperator.metricsResourcesSingleThreadPool.submit(() -> { + sparkOperator.metricsService.start(); + }); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/KubernetesClientFactory.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/KubernetesClientFactory.java index 0d03a46b..249118a8 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/KubernetesClientFactory.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/KubernetesClientFactory.java @@ -18,55 +18,56 @@ package org.apache.spark.kubernetes.operator.client; +import java.util.ArrayList; +import java.util.List; + import io.fabric8.kubernetes.client.Config; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.KubernetesClientBuilder; import io.fabric8.kubernetes.client.okhttp.OkHttpClientFactory; import okhttp3.Interceptor; import okhttp3.OkHttpClient; + import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; import org.apache.spark.kubernetes.operator.metrics.MetricsSystem; import org.apache.spark.kubernetes.operator.metrics.source.KubernetesMetricsInterceptor; -import java.util.ArrayList; -import java.util.List; - /** * Build Kubernetes Client with metrics configured */ public class KubernetesClientFactory { - private static final KubernetesMetricsInterceptor kubernetesMetricsInterceptor = - new KubernetesMetricsInterceptor(); - - public static KubernetesClient buildKubernetesClient(MetricsSystem metricsSystem) { - return buildKubernetesClient(metricsSystem, null); - } + private static final KubernetesMetricsInterceptor kubernetesMetricsInterceptor = + new KubernetesMetricsInterceptor(); - public static KubernetesClient buildKubernetesClient(MetricsSystem metricsSystem, - Config kubernetesClientConfig) { - List clientInterceptors = new ArrayList<>(); - clientInterceptors.add(new RetryInterceptor()); + public static KubernetesClient buildKubernetesClient(MetricsSystem metricsSystem) { + return buildKubernetesClient(metricsSystem, null); + } - if (SparkOperatorConf.KubernetesClientMetricsEnabled.getValue()) { - clientInterceptors.add(kubernetesMetricsInterceptor); - // Avoid duplicate register metrics exception - if (!metricsSystem.getSources().contains(kubernetesMetricsInterceptor)) { - metricsSystem.registerSource(kubernetesMetricsInterceptor); - } - } + public static KubernetesClient buildKubernetesClient(MetricsSystem metricsSystem, + Config kubernetesClientConfig) { + List clientInterceptors = new ArrayList<>(); + clientInterceptors.add(new RetryInterceptor()); - return new KubernetesClientBuilder() - .withConfig(kubernetesClientConfig) - .withHttpClientFactory( - new OkHttpClientFactory() { - @Override - protected void additionalConfig(OkHttpClient.Builder builder) { - for (Interceptor interceptor : clientInterceptors) { - builder.addInterceptor(interceptor); - } - } - } - ) - .build(); + if (SparkOperatorConf.KubernetesClientMetricsEnabled.getValue()) { + clientInterceptors.add(kubernetesMetricsInterceptor); + // Avoid duplicate register metrics exception + if (!metricsSystem.getSources().contains(kubernetesMetricsInterceptor)) { + metricsSystem.registerSource(kubernetesMetricsInterceptor); + } } + + return new KubernetesClientBuilder() + .withConfig(kubernetesClientConfig) + .withHttpClientFactory( + new OkHttpClientFactory() { + @Override + protected void additionalConfig(OkHttpClient.Builder builder) { + for (Interceptor interceptor : clientInterceptors) { + builder.addInterceptor(interceptor); + } + } + } + ) + .build(); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/RetryInterceptor.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/RetryInterceptor.java index 3d746136..4f8e8d44 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/RetryInterceptor.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/client/RetryInterceptor.java @@ -18,17 +18,18 @@ package org.apache.spark.kubernetes.operator.client; +import java.io.IOException; +import java.util.Optional; +import java.util.concurrent.TimeUnit; + import lombok.extern.slf4j.Slf4j; import okhttp3.Interceptor; import okhttp3.Request; import okhttp3.Response; import okhttp3.ResponseBody; import org.apache.commons.lang3.StringUtils; -import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; -import java.io.IOException; -import java.util.Optional; -import java.util.concurrent.TimeUnit; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; import static io.fabric8.kubernetes.client.utils.Utils.closeQuietly; @@ -37,68 +38,68 @@ */ @Slf4j public class RetryInterceptor implements Interceptor { - private static final String RETRY_AFTER_HEADER_NAME = "Retry-After"; + private static final String RETRY_AFTER_HEADER_NAME = "Retry-After"; - private final Long maxAttemptCount; - private final Long maxRetryAfterInSecs; - private final Long defaultRetryAfterInSecs; + private final Long maxAttemptCount; + private final Long maxRetryAfterInSecs; + private final Long defaultRetryAfterInSecs; - public RetryInterceptor() { - this.maxAttemptCount = SparkOperatorConf.MaxRetryAttemptOnKubeServerFailure.getValue(); - this.maxRetryAfterInSecs = SparkOperatorConf.MaxRetryAttemptAfterSeconds.getValue(); - this.defaultRetryAfterInSecs = SparkOperatorConf.RetryAttemptAfterSeconds.getValue(); - } - - @Override - public Response intercept(Chain chain) throws IOException { - Request request = chain.request(); - Response response = chain.proceed(request); - int tryCount = 0; - while (!response.isSuccessful() && (response.code() == 429 || response.code() >= 500) && - tryCount < maxAttemptCount) { - // only retry on consecutive 429 and 5xx failure responses - if (log.isWarnEnabled()) { - log.warn( - "Request is not successful. attempt={} response-code={} " + - "response-headers={}", - tryCount, response.code(), response.headers()); - } - Optional retryAfter = getRetryAfter(response); - if (retryAfter.isPresent()) { - try { - TimeUnit.SECONDS.sleep(retryAfter.get()); - } catch (InterruptedException e) { - if (log.isErrorEnabled()) { - log.error("Aborting retry.", e); - } - } - } - tryCount++; + public RetryInterceptor() { + this.maxAttemptCount = SparkOperatorConf.MaxRetryAttemptOnKubeServerFailure.getValue(); + this.maxRetryAfterInSecs = SparkOperatorConf.MaxRetryAttemptAfterSeconds.getValue(); + this.defaultRetryAfterInSecs = SparkOperatorConf.RetryAttemptAfterSeconds.getValue(); + } - ResponseBody responseBody = response.body(); - if (responseBody != null) { - closeQuietly(responseBody); - } - // retry the request for 429 and 5xx - response = chain.proceed(request); + @Override + public Response intercept(Chain chain) throws IOException { + Request request = chain.request(); + Response response = chain.proceed(request); + int tryCount = 0; + while (!response.isSuccessful() && (response.code() == 429 || response.code() >= 500) && + tryCount < maxAttemptCount) { + // only retry on consecutive 429 and 5xx failure responses + if (log.isWarnEnabled()) { + log.warn( + "Request is not successful. attempt={} response-code={} " + + "response-headers={}", + tryCount, response.code(), response.headers()); + } + Optional retryAfter = getRetryAfter(response); + if (retryAfter.isPresent()) { + try { + TimeUnit.SECONDS.sleep(retryAfter.get()); + } catch (InterruptedException e) { + if (log.isErrorEnabled()) { + log.error("Aborting retry.", e); + } } - return response; + } + tryCount++; + + ResponseBody responseBody = response.body(); + if (responseBody != null) { + closeQuietly(responseBody); + } + // retry the request for 429 and 5xx + response = chain.proceed(request); } + return response; + } - private Optional getRetryAfter(Response response) { - String retryAfter = response.header(RETRY_AFTER_HEADER_NAME); - if (StringUtils.isNotEmpty(retryAfter)) { - try { - return Optional.of(Math.min(Long.parseLong(retryAfter), maxRetryAfterInSecs)); - } catch (Exception e) { - if (log.isErrorEnabled()) { - log.error(String.format( - "Error while parsing Retry-After header %s. Retrying with default %s", - retryAfter, defaultRetryAfterInSecs), e); - } - return Optional.of(defaultRetryAfterInSecs); - } + private Optional getRetryAfter(Response response) { + String retryAfter = response.header(RETRY_AFTER_HEADER_NAME); + if (StringUtils.isNotEmpty(retryAfter)) { + try { + return Optional.of(Math.min(Long.parseLong(retryAfter), maxRetryAfterInSecs)); + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error(String.format( + "Error while parsing Retry-After header %s. Retrying with default %s", + retryAfter, defaultRetryAfterInSecs), e); } - return Optional.empty(); + return Optional.of(defaultRetryAfterInSecs); + } } + return Optional.empty(); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/ConfigOption.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/ConfigOption.java index fc67da65..94e7aa7e 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/ConfigOption.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/ConfigOption.java @@ -39,66 +39,66 @@ @Builder @Slf4j public class ConfigOption { - private static final ObjectMapper objectMapper = new ObjectMapper(); + private static final ObjectMapper objectMapper = new ObjectMapper(); - @Getter - @Builder.Default - private boolean enableDynamicOverride = true; - @Getter - private String key; - @Getter - private String description; - private T defaultValue; - private Class typeParameterClass; + @Getter + @Builder.Default + private boolean enableDynamicOverride = true; + @Getter + private String key; + @Getter + private String description; + private T defaultValue; + private Class typeParameterClass; - public T getValue() { - return resolveValue(); - } + public T getValue() { + return resolveValue(); + } - private T resolveValue() { - try { - String value = SparkOperatorConfManager.INSTANCE.getValue(key); - if (!enableDynamicOverride) { - value = SparkOperatorConfManager.INSTANCE.getInitialValue(key); - } - if (StringUtils.isNotEmpty(value)) { - if (typeParameterClass.isPrimitive() || typeParameterClass == String.class) { - return (T) resolveValueToPrimitiveType(typeParameterClass, value); - } else { - return objectMapper.readValue(value, typeParameterClass); - } - } else { - return defaultValue; - } - } catch (Throwable t) { - log.error("Failed to resolve value for config key {}, using default value {}", key, - defaultValue, t); - return defaultValue; + private T resolveValue() { + try { + String value = SparkOperatorConfManager.INSTANCE.getValue(key); + if (!enableDynamicOverride) { + value = SparkOperatorConfManager.INSTANCE.getInitialValue(key); + } + if (StringUtils.isNotEmpty(value)) { + if (typeParameterClass.isPrimitive() || typeParameterClass == String.class) { + return (T) resolveValueToPrimitiveType(typeParameterClass, value); + } else { + return objectMapper.readValue(value, typeParameterClass); } + } else { + return defaultValue; + } + } catch (Throwable t) { + log.error("Failed to resolve value for config key {}, using default value {}", key, + defaultValue, t); + return defaultValue; } + } - public static Object resolveValueToPrimitiveType(Class clazz, String value) { - if (Boolean.class == clazz || Boolean.TYPE == clazz) { - return Boolean.parseBoolean(value); - } - if (Byte.class == clazz || Byte.TYPE == clazz) { - return Byte.parseByte(value); - } - if (Short.class == clazz || Short.TYPE == clazz) { - return Short.parseShort(value); - } - if (Integer.class == clazz || Integer.TYPE == clazz) { - return Integer.parseInt(value); - } - if (Long.class == clazz || Long.TYPE == clazz) { - return Long.parseLong(value); - } - if (Float.class == clazz || Float.TYPE == clazz) { - return Float.parseFloat(value); - } - if (Double.class == clazz || Double.TYPE == clazz) { - return Double.parseDouble(value); - } - return value; + public static Object resolveValueToPrimitiveType(Class clazz, String value) { + if (Boolean.class == clazz || Boolean.TYPE == clazz) { + return Boolean.parseBoolean(value); + } + if (Byte.class == clazz || Byte.TYPE == clazz) { + return Byte.parseByte(value); + } + if (Short.class == clazz || Short.TYPE == clazz) { + return Short.parseShort(value); + } + if (Integer.class == clazz || Integer.TYPE == clazz) { + return Integer.parseInt(value); + } + if (Long.class == clazz || Long.TYPE == clazz) { + return Long.parseLong(value); + } + if (Float.class == clazz || Float.TYPE == clazz) { + return Float.parseFloat(value); + } + if (Double.class == clazz || Double.TYPE == clazz) { + return Double.parseDouble(value); } + return value; + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java index 6175f360..a6675e05 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java @@ -18,17 +18,18 @@ package org.apache.spark.kubernetes.operator.config; -import io.javaoperatorsdk.operator.api.config.LeaderElectionConfiguration; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.apache.spark.kubernetes.operator.listeners.ApplicationStatusListener; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; +import io.javaoperatorsdk.operator.api.config.LeaderElectionConfiguration; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +import org.apache.spark.kubernetes.operator.listeners.ApplicationStatusListener; + import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.defaultOperatorConfigLabels; import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.labelsAsStr; @@ -37,370 +38,370 @@ */ @Slf4j public class SparkOperatorConf { - public static final String PREFIX = "spark.operator."; - public static final String METRIC_PREFIX = "spark.metrics.conf.operator."; - public static final String SINK = "sink."; - public static final String CLASS = "class"; + public static final String PREFIX = "spark.operator."; + public static final String METRIC_PREFIX = "spark.metrics.conf.operator."; + public static final String SINK = "sink."; + public static final String CLASS = "class"; - public static final ConfigOption OperatorAppName = ConfigOption.builder() - .key(PREFIX + "name") - .typeParameterClass(String.class) - .description("Name of the operator.") - .defaultValue("spark-kubernetes-operator") - .enableDynamicOverride(false) - .build(); - public static final ConfigOption OperatorNamespace = ConfigOption.builder() - .key(PREFIX + "namespace") - .typeParameterClass(String.class) - .description("Namespace that operator is deployed within.") - .defaultValue("spark-system") - .enableDynamicOverride(false) - .build(); - public static final ConfigOption DynamicConfigEnabled = ConfigOption.builder() - .key(PREFIX + "dynamic.config.enabled") - .typeParameterClass(Boolean.class) - .description( - "When enabled, operator would use config map as source of truth for config " + - "property override. The config map need to be created in " + - "spark.operator.namespace, and labeled with operator name.") - .defaultValue(false) - .enableDynamicOverride(false) - .build(); - public static final ConfigOption DynamicConfigSelectorStr = - ConfigOption.builder() - .key(PREFIX + "dynamic.config.selector.str") - .typeParameterClass(String.class) - .description("The selector str applied to dynamic config map.") - .defaultValue(labelsAsStr(defaultOperatorConfigLabels())) - .enableDynamicOverride(false) - .build(); - public static final ConfigOption TerminateOnInformerFailure = - ConfigOption.builder() - .key(PREFIX + "terminate.on.informer.failure") - .typeParameterClass(Boolean.class) - .description( - "Enable to indicate informer errors should stop operator startup. If " + - "disabled, operator startup will ignore recoverable errors, " + - "caused for example by RBAC issues and will retry " + - "periodically.") - .defaultValue(false) - .enableDynamicOverride(false) - .build(); - public static final ConfigOption TerminationTimeoutSeconds = - ConfigOption.builder() - .key(PREFIX + "termination.timeout.seconds") - .description( - "Grace period for operator shutdown before reconciliation threads " + - "are killed.") - .enableDynamicOverride(false) - .typeParameterClass(Integer.class) - .defaultValue(30) - .build(); - public static final ConfigOption ReconcilerParallelism = - ConfigOption.builder() - .key(PREFIX + "reconciler.parallelism") - .description( - "Thread pool size for Spark Operator reconcilers. Use -1 for " + - "unbounded pool.") - .enableDynamicOverride(false) - .typeParameterClass(Integer.class) - .defaultValue(30) - .build(); - public static final ConfigOption RateLimiterRefreshPeriodSeconds = - ConfigOption.builder() - .key(PREFIX + "rate.limiter.refresh.period.seconds") - .description( - "Operator rate limiter refresh period(in seconds) for each resource.") - .enableDynamicOverride(false) - .typeParameterClass(Integer.class) - .defaultValue(15) - .build(); - public static final ConfigOption RateLimiterLimit = ConfigOption.builder() - .key(PREFIX + "rate.limiter.limit") - .description( - "Max number of reconcile loops triggered within the rate limiter refresh " + - "period for each resource. Setting the limit <= 0 disables the " + - "limiter.") - .enableDynamicOverride(false) - .typeParameterClass(Integer.class) - .defaultValue(5) - .build(); - public static final ConfigOption RetryInitialInternalSeconds = - ConfigOption.builder() - .key(PREFIX + "retry.initial.internal.seconds") - .description( - "Initial interval(in seconds) of retries on unhandled controller " + - "errors.") - .enableDynamicOverride(false) - .typeParameterClass(Integer.class) - .defaultValue(5) - .build(); - public static final ConfigOption RetryInternalMultiplier = - ConfigOption.builder() - .key(PREFIX + "retry.internal.multiplier") - .description("Interval multiplier of retries on unhandled controller errors.") - .enableDynamicOverride(false) - .typeParameterClass(Double.class) - .defaultValue(1.5) - .build(); - public static final ConfigOption RetryMaxIntervalSeconds = - ConfigOption.builder() - .key(PREFIX + "retry.max.interval.seconds") - .description( - "Max interval(in seconds) of retries on unhandled controller errors. " + - "Set to -1 for unlimited.") - .enableDynamicOverride(false) - .typeParameterClass(Integer.class) - .defaultValue(-1) - .build(); - public static final ConfigOption RetryMaxAttempts = ConfigOption.builder() - .key(PREFIX + "retry.max.attempts") - .description("Max attempts of retries on unhandled controller errors.") - .enableDynamicOverride(false) - .typeParameterClass(Integer.class) - .defaultValue(15) - .build(); - public static final ConfigOption DriverCreateMaxAttempts = ConfigOption.builder() - .key(PREFIX + "driver.create.max.attempts") - .description( - "Maximal number of retry attempts of requesting driver for Spark application.") - .defaultValue(3L) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption MaxRetryAttemptOnKubeServerFailure = - ConfigOption.builder() - .key(PREFIX + "max.retry.attempts.on.k8s.failure") - .description( - "Maximal number of retry attempts of requests to k8s server upon " + - "response 429 and 5xx.") - .defaultValue(3L) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption RetryAttemptAfterSeconds = ConfigOption.builder() - .key(PREFIX + "retry.attempt.after.seconds") - .description( - "Default time (in seconds) to wait till next request. This would be used if " + - "server does not set Retry-After in response.") - .defaultValue(1L) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption MaxRetryAttemptAfterSeconds = - ConfigOption.builder() - .key(PREFIX + "max.retry.attempt.after.seconds") - .description("Maximal time (in seconds) to wait till next request.") - .defaultValue(15L) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption StatusPatchMaxRetry = ConfigOption.builder() - .key(PREFIX + "status.patch.max.retry") - .description( - "Maximal number of retry attempts of requests to k8s server for resource " + - "status update.") - .defaultValue(3L) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption StatusPatchFailureBackoffSeconds = - ConfigOption.builder() - .key(PREFIX + "status.patch.failure.backoff.seconds") - .description( - "Default time (in seconds) to wait till next request to patch " + - "resource status update.") - .defaultValue(3L) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption AppReconcileIntervalSeconds = - ConfigOption.builder() - .key(PREFIX + "app.reconcile.interval.seconds") - .description( - "Interval (in seconds) to reconcile when application is is starting " + - "up. Note that reconcile is always expected to be triggered " + - "per update - this interval controls the reconcile behavior " + - "when operator still need to reconcile even when there's no " + - "update ,e.g. for timeout checks.") - .defaultValue(120L) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption ForegroundRequestTimeoutSeconds = - ConfigOption.builder() - .key(PREFIX + "foreground.request.timeout.seconds") - .description( - "Timeout (in seconds) to for requests made to API server. this " + - "applies only to foreground requests.") - .defaultValue(120L) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption OperatorWatchedNamespaces = - ConfigOption.builder() - .key(PREFIX + "watched.namespaces") - .description( - "Comma-separated list of namespaces that the operator would be " + - "watching for Spark resources. If unset, operator would " + - "watch all namespaces by default.") - .defaultValue(null) - .typeParameterClass(String.class) - .build(); - public static final ConfigOption TrimAttemptStateTransitionHistory = - ConfigOption.builder() - .key(PREFIX + "trim.attempt.state.transition.history") - .description( - "When enabled, operator would trim state transition history when a " + - "new attempt starts, keeping previous attempt summary only.") - .defaultValue(true) - .typeParameterClass(Boolean.class) - .build(); + public static final ConfigOption OperatorAppName = ConfigOption.builder() + .key(PREFIX + "name") + .typeParameterClass(String.class) + .description("Name of the operator.") + .defaultValue("spark-kubernetes-operator") + .enableDynamicOverride(false) + .build(); + public static final ConfigOption OperatorNamespace = ConfigOption.builder() + .key(PREFIX + "namespace") + .typeParameterClass(String.class) + .description("Namespace that operator is deployed within.") + .defaultValue("spark-system") + .enableDynamicOverride(false) + .build(); + public static final ConfigOption DynamicConfigEnabled = ConfigOption.builder() + .key(PREFIX + "dynamic.config.enabled") + .typeParameterClass(Boolean.class) + .description( + "When enabled, operator would use config map as source of truth for config " + + "property override. The config map need to be created in " + + "spark.operator.namespace, and labeled with operator name.") + .defaultValue(false) + .enableDynamicOverride(false) + .build(); + public static final ConfigOption DynamicConfigSelectorStr = + ConfigOption.builder() + .key(PREFIX + "dynamic.config.selector.str") + .typeParameterClass(String.class) + .description("The selector str applied to dynamic config map.") + .defaultValue(labelsAsStr(defaultOperatorConfigLabels())) + .enableDynamicOverride(false) + .build(); + public static final ConfigOption TerminateOnInformerFailure = + ConfigOption.builder() + .key(PREFIX + "terminate.on.informer.failure") + .typeParameterClass(Boolean.class) + .description( + "Enable to indicate informer errors should stop operator startup. If " + + "disabled, operator startup will ignore recoverable errors, " + + "caused for example by RBAC issues and will retry " + + "periodically.") + .defaultValue(false) + .enableDynamicOverride(false) + .build(); + public static final ConfigOption TerminationTimeoutSeconds = + ConfigOption.builder() + .key(PREFIX + "termination.timeout.seconds") + .description( + "Grace period for operator shutdown before reconciliation threads " + + "are killed.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(30) + .build(); + public static final ConfigOption ReconcilerParallelism = + ConfigOption.builder() + .key(PREFIX + "reconciler.parallelism") + .description( + "Thread pool size for Spark Operator reconcilers. Use -1 for " + + "unbounded pool.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(30) + .build(); + public static final ConfigOption RateLimiterRefreshPeriodSeconds = + ConfigOption.builder() + .key(PREFIX + "rate.limiter.refresh.period.seconds") + .description( + "Operator rate limiter refresh period(in seconds) for each resource.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(15) + .build(); + public static final ConfigOption RateLimiterLimit = ConfigOption.builder() + .key(PREFIX + "rate.limiter.limit") + .description( + "Max number of reconcile loops triggered within the rate limiter refresh " + + "period for each resource. Setting the limit <= 0 disables the " + + "limiter.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(5) + .build(); + public static final ConfigOption RetryInitialInternalSeconds = + ConfigOption.builder() + .key(PREFIX + "retry.initial.internal.seconds") + .description( + "Initial interval(in seconds) of retries on unhandled controller " + + "errors.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(5) + .build(); + public static final ConfigOption RetryInternalMultiplier = + ConfigOption.builder() + .key(PREFIX + "retry.internal.multiplier") + .description("Interval multiplier of retries on unhandled controller errors.") + .enableDynamicOverride(false) + .typeParameterClass(Double.class) + .defaultValue(1.5) + .build(); + public static final ConfigOption RetryMaxIntervalSeconds = + ConfigOption.builder() + .key(PREFIX + "retry.max.interval.seconds") + .description( + "Max interval(in seconds) of retries on unhandled controller errors. " + + "Set to -1 for unlimited.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(-1) + .build(); + public static final ConfigOption RetryMaxAttempts = ConfigOption.builder() + .key(PREFIX + "retry.max.attempts") + .description("Max attempts of retries on unhandled controller errors.") + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .defaultValue(15) + .build(); + public static final ConfigOption DriverCreateMaxAttempts = ConfigOption.builder() + .key(PREFIX + "driver.create.max.attempts") + .description( + "Maximal number of retry attempts of requesting driver for Spark application.") + .defaultValue(3L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption MaxRetryAttemptOnKubeServerFailure = + ConfigOption.builder() + .key(PREFIX + "max.retry.attempts.on.k8s.failure") + .description( + "Maximal number of retry attempts of requests to k8s server upon " + + "response 429 and 5xx.") + .defaultValue(3L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption RetryAttemptAfterSeconds = ConfigOption.builder() + .key(PREFIX + "retry.attempt.after.seconds") + .description( + "Default time (in seconds) to wait till next request. This would be used if " + + "server does not set Retry-After in response.") + .defaultValue(1L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption MaxRetryAttemptAfterSeconds = + ConfigOption.builder() + .key(PREFIX + "max.retry.attempt.after.seconds") + .description("Maximal time (in seconds) to wait till next request.") + .defaultValue(15L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption StatusPatchMaxRetry = ConfigOption.builder() + .key(PREFIX + "status.patch.max.retry") + .description( + "Maximal number of retry attempts of requests to k8s server for resource " + + "status update.") + .defaultValue(3L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption StatusPatchFailureBackoffSeconds = + ConfigOption.builder() + .key(PREFIX + "status.patch.failure.backoff.seconds") + .description( + "Default time (in seconds) to wait till next request to patch " + + "resource status update.") + .defaultValue(3L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption AppReconcileIntervalSeconds = + ConfigOption.builder() + .key(PREFIX + "app.reconcile.interval.seconds") + .description( + "Interval (in seconds) to reconcile when application is is starting " + + "up. Note that reconcile is always expected to be triggered " + + "per update - this interval controls the reconcile behavior " + + "when operator still need to reconcile even when there's no " + + "update ,e.g. for timeout checks.") + .defaultValue(120L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption ForegroundRequestTimeoutSeconds = + ConfigOption.builder() + .key(PREFIX + "foreground.request.timeout.seconds") + .description( + "Timeout (in seconds) to for requests made to API server. this " + + "applies only to foreground requests.") + .defaultValue(120L) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption OperatorWatchedNamespaces = + ConfigOption.builder() + .key(PREFIX + "watched.namespaces") + .description( + "Comma-separated list of namespaces that the operator would be " + + "watching for Spark resources. If unset, operator would " + + "watch all namespaces by default.") + .defaultValue(null) + .typeParameterClass(String.class) + .build(); + public static final ConfigOption TrimAttemptStateTransitionHistory = + ConfigOption.builder() + .key(PREFIX + "trim.attempt.state.transition.history") + .description( + "When enabled, operator would trim state transition history when a " + + "new attempt starts, keeping previous attempt summary only.") + .defaultValue(true) + .typeParameterClass(Boolean.class) + .build(); - public static final ConfigOption JOSDKMetricsEnabled = ConfigOption.builder() - .key(PREFIX + "josdk.metrics.enabled") - .description( - "When enabled, the josdk metrics will be added in metrics source and " + - "configured for operator.") - .defaultValue(true) - .build(); + public static final ConfigOption JOSDKMetricsEnabled = ConfigOption.builder() + .key(PREFIX + "josdk.metrics.enabled") + .description( + "When enabled, the josdk metrics will be added in metrics source and " + + "configured for operator.") + .defaultValue(true) + .build(); - public static final ConfigOption KubernetesClientMetricsEnabled = - ConfigOption.builder() - .key(PREFIX + "kubernetes.client.metrics.enabled") - .defaultValue(true) - .description( - "Enable KubernetesClient metrics for measuring the HTTP traffic to " + - "the Kubernetes API Server. Since the metrics is collected " + - "via Okhttp interceptors, can be disabled when opt in " + - "customized interceptors.") - .build(); + public static final ConfigOption KubernetesClientMetricsEnabled = + ConfigOption.builder() + .key(PREFIX + "kubernetes.client.metrics.enabled") + .defaultValue(true) + .description( + "Enable KubernetesClient metrics for measuring the HTTP traffic to " + + "the Kubernetes API Server. Since the metrics is collected " + + "via Okhttp interceptors, can be disabled when opt in " + + "customized interceptors.") + .build(); - public static final ConfigOption - KubernetesClientMetricsGroupByResponseCodeGroupEnabled = ConfigOption.builder() - .key(PREFIX + "kubernetes.client.metrics.group.by.response.code.group.enable") - .description( - "When enabled, additional metrics group by http response code group(1xx, " + - "2xx, 3xx, 4xx, 5xx) received from API server will be added. Users " + - "can disable it when their monitoring system can combine lower level " + - "kubernetes.client.http.response.<3-digit-response-code> metrics.") - .defaultValue(true) - .build(); - public static final ConfigOption OperatorProbePort = ConfigOption.builder() - .key(PREFIX + "probe.port") - .defaultValue(18080) - .description("The port used for health/readiness check probe status.") - .typeParameterClass(Integer.class) - .enableDynamicOverride(false) - .build(); + public static final ConfigOption + KubernetesClientMetricsGroupByResponseCodeGroupEnabled = ConfigOption.builder() + .key(PREFIX + "kubernetes.client.metrics.group.by.response.code.group.enable") + .description( + "When enabled, additional metrics group by http response code group(1xx, " + + "2xx, 3xx, 4xx, 5xx) received from API server will be added. Users " + + "can disable it when their monitoring system can combine lower level " + + "kubernetes.client.http.response.<3-digit-response-code> metrics.") + .defaultValue(true) + .build(); + public static final ConfigOption OperatorProbePort = ConfigOption.builder() + .key(PREFIX + "probe.port") + .defaultValue(18080) + .description("The port used for health/readiness check probe status.") + .typeParameterClass(Integer.class) + .enableDynamicOverride(false) + .build(); - public static final ConfigOption OperatorMetricsPort = ConfigOption.builder() - .key(PREFIX + "metrics.port") - .defaultValue(19090) - .description("The port used for checking metrics") - .typeParameterClass(Integer.class) - .enableDynamicOverride(false) - .build(); + public static final ConfigOption OperatorMetricsPort = ConfigOption.builder() + .key(PREFIX + "metrics.port") + .defaultValue(19090) + .description("The port used for checking metrics") + .typeParameterClass(Integer.class) + .enableDynamicOverride(false) + .build(); - public static final ConfigOption SentinelExecutorServicePoolSize = - ConfigOption.builder() - .key(PREFIX + "sentinel.executor.pool.size") - .description( - "Size of executor service in Sentinel Managers to check the health " + - "of sentinel resources.") - .defaultValue(3) - .enableDynamicOverride(false) - .typeParameterClass(Integer.class) - .build(); + public static final ConfigOption SentinelExecutorServicePoolSize = + ConfigOption.builder() + .key(PREFIX + "sentinel.executor.pool.size") + .description( + "Size of executor service in Sentinel Managers to check the health " + + "of sentinel resources.") + .defaultValue(3) + .enableDynamicOverride(false) + .typeParameterClass(Integer.class) + .build(); - public static final ConfigOption SENTINEL_RESOURCE_RECONCILIATION_DELAY = - ConfigOption.builder() - .key(PREFIX + "health.sentinel.resource.reconciliation.delay.seconds") - .defaultValue(60L) - .description( - "Allowed max time(seconds) between spec update and reconciliation " + - "for sentinel resources.") - .enableDynamicOverride(true) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption APPLICATION_STATUS_LISTENER_CLASS_NAMES = - ConfigOption.builder() - .key(PREFIX + "application.status.listener.class.names") - .defaultValue("") - .description( - "Comma-separated names of ApplicationStatusListener class " + - "implementations") - .enableDynamicOverride(false) - .typeParameterClass(String.class) - .build(); - public static final ConfigOption LEADER_ELECTION_ENABLED = - ConfigOption.builder() - .key(PREFIX + "leader.election.enabled") - .defaultValue(false) - .description( - "Enable leader election for the operator to allow running standby " + - "instances.") - .enableDynamicOverride(false) - .typeParameterClass(Boolean.class) - .build(); - public static final ConfigOption LEADER_ELECTION_LEASE_NAME = - ConfigOption.builder() - .key(PREFIX + "leader.election.lease.name") - .defaultValue("spark-operator-lease") - .description( - "Leader election lease name, must be unique for leases in the same " + - "namespace.") - .enableDynamicOverride(false) - .typeParameterClass(String.class) - .build(); - public static final ConfigOption LEADER_ELECTION_LEASE_DURATION_SECONDS = - ConfigOption.builder() - .key(PREFIX + "leader.election.lease.duration.seconds") - .defaultValue(1200L) - .description("Leader election lease duration.") - .enableDynamicOverride(false) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption LEADER_ELECTION_RENEW_DEADLINE_SECONDS = - ConfigOption.builder() - .key(PREFIX + "leader.election.renew.deadline.seconds") - .defaultValue(600L) - .description("Leader election renew deadline.") - .enableDynamicOverride(false) - .typeParameterClass(Long.class) - .build(); - public static final ConfigOption LEADER_ELECTION_RETRY_PERIOD_SECONDS = - ConfigOption.builder() - .key(PREFIX + "leader.election.retry.period.seconds") - .defaultValue(180L) - .description("Leader election retry period.") - .enableDynamicOverride(false) - .typeParameterClass(Long.class) - .build(); + public static final ConfigOption SENTINEL_RESOURCE_RECONCILIATION_DELAY = + ConfigOption.builder() + .key(PREFIX + "health.sentinel.resource.reconciliation.delay.seconds") + .defaultValue(60L) + .description( + "Allowed max time(seconds) between spec update and reconciliation " + + "for sentinel resources.") + .enableDynamicOverride(true) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption APPLICATION_STATUS_LISTENER_CLASS_NAMES = + ConfigOption.builder() + .key(PREFIX + "application.status.listener.class.names") + .defaultValue("") + .description( + "Comma-separated names of ApplicationStatusListener class " + + "implementations") + .enableDynamicOverride(false) + .typeParameterClass(String.class) + .build(); + public static final ConfigOption LEADER_ELECTION_ENABLED = + ConfigOption.builder() + .key(PREFIX + "leader.election.enabled") + .defaultValue(false) + .description( + "Enable leader election for the operator to allow running standby " + + "instances.") + .enableDynamicOverride(false) + .typeParameterClass(Boolean.class) + .build(); + public static final ConfigOption LEADER_ELECTION_LEASE_NAME = + ConfigOption.builder() + .key(PREFIX + "leader.election.lease.name") + .defaultValue("spark-operator-lease") + .description( + "Leader election lease name, must be unique for leases in the same " + + "namespace.") + .enableDynamicOverride(false) + .typeParameterClass(String.class) + .build(); + public static final ConfigOption LEADER_ELECTION_LEASE_DURATION_SECONDS = + ConfigOption.builder() + .key(PREFIX + "leader.election.lease.duration.seconds") + .defaultValue(1200L) + .description("Leader election lease duration.") + .enableDynamicOverride(false) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption LEADER_ELECTION_RENEW_DEADLINE_SECONDS = + ConfigOption.builder() + .key(PREFIX + "leader.election.renew.deadline.seconds") + .defaultValue(600L) + .description("Leader election renew deadline.") + .enableDynamicOverride(false) + .typeParameterClass(Long.class) + .build(); + public static final ConfigOption LEADER_ELECTION_RETRY_PERIOD_SECONDS = + ConfigOption.builder() + .key(PREFIX + "leader.election.retry.period.seconds") + .defaultValue(180L) + .description("Leader election retry period.") + .enableDynamicOverride(false) + .typeParameterClass(Long.class) + .build(); - public static List getApplicationStatusListener() { - List listeners = new ArrayList<>(); - String listenerNamesStr = - SparkOperatorConf.APPLICATION_STATUS_LISTENER_CLASS_NAMES.getValue(); - if (StringUtils.isNotBlank(listenerNamesStr)) { - try { - List listenerNames = - Arrays.stream(listenerNamesStr.split(",")).map(String::trim) - .collect(Collectors.toList()); - for (String name: listenerNames) { - Class listenerClass = Class.forName(name); - if (ApplicationStatusListener.class.isAssignableFrom(listenerClass)) { - listeners.add((ApplicationStatusListener) - listenerClass.getConstructor().newInstance()); - } - } - } catch (Exception e) { - if (log.isErrorEnabled()) { - log.error("Failed to initialize listeners for operator with {}", - listenerNamesStr, e); - } - } + public static List getApplicationStatusListener() { + List listeners = new ArrayList<>(); + String listenerNamesStr = + SparkOperatorConf.APPLICATION_STATUS_LISTENER_CLASS_NAMES.getValue(); + if (StringUtils.isNotBlank(listenerNamesStr)) { + try { + List listenerNames = + Arrays.stream(listenerNamesStr.split(",")).map(String::trim) + .collect(Collectors.toList()); + for (String name : listenerNames) { + Class listenerClass = Class.forName(name); + if (ApplicationStatusListener.class.isAssignableFrom(listenerClass)) { + listeners.add((ApplicationStatusListener) + listenerClass.getConstructor().newInstance()); + } + } + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("Failed to initialize listeners for operator with {}", + listenerNamesStr, e); } - return listeners; + } } + return listeners; + } - public static LeaderElectionConfiguration getLeaderElectionConfig() { - return new LeaderElectionConfiguration(LEADER_ELECTION_LEASE_NAME.getValue(), - OperatorNamespace.getValue(), - Duration.ofSeconds(LEADER_ELECTION_LEASE_DURATION_SECONDS.getValue()), - Duration.ofSeconds(LEADER_ELECTION_RENEW_DEADLINE_SECONDS.getValue()), - Duration.ofSeconds(LEADER_ELECTION_RETRY_PERIOD_SECONDS.getValue())); - } + public static LeaderElectionConfiguration getLeaderElectionConfig() { + return new LeaderElectionConfiguration(LEADER_ELECTION_LEASE_NAME.getValue(), + OperatorNamespace.getValue(), + Duration.ofSeconds(LEADER_ELECTION_LEASE_DURATION_SECONDS.getValue()), + Duration.ofSeconds(LEADER_ELECTION_RENEW_DEADLINE_SECONDS.getValue()), + Duration.ofSeconds(LEADER_ELECTION_RETRY_PERIOD_SECONDS.getValue())); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManager.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManager.java index 68a636e4..ca914263 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManager.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManager.java @@ -18,85 +18,85 @@ package org.apache.spark.kubernetes.operator.config; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; - import java.io.FileInputStream; import java.io.InputStream; import java.util.Map; import java.util.Properties; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + /** * Loads ConfigOption from properties file. In addition, loads hot properties override * from config map if dynamic config is enabled. */ @Slf4j public class SparkOperatorConfManager { - public static final String INITIAL_CONFIG_FILE_PATH = - "/opt/spark-operator/conf/spark-operator.properties"; - - public static final String METRICS_CONFIG_FILE_PATH = - "/opt/spark-operator/conf/metrics.properties"; - - public static final String INITIAL_CONFIG_FILE_PATH_PROPS_KEY = - "spark.operator.base.property.file.name"; - - public static final String METRICS_CONFIG_FILE_PATH_PROPS_KEY = - "spark.operator.metrics.property.file.name"; - - public static final SparkOperatorConfManager INSTANCE = new SparkOperatorConfManager(); - protected final Properties initialConfig; - protected final Properties metricsConfig; - protected Properties configOverrides; - - protected SparkOperatorConfManager() { - this.initialConfig = new Properties(); - this.configOverrides = new Properties(); - this.metricsConfig = new Properties(); - initialize(); - } - - public String getValue(String key) { - String currentValue = configOverrides.getProperty(key); - return StringUtils.isEmpty(currentValue) ? getInitialValue(key) : currentValue; - } - - public String getInitialValue(String key) { - return initialConfig.getProperty(key); - } - - public void refresh(Map updatedConfig) { - synchronized (this) { - this.configOverrides = new Properties(); - configOverrides.putAll(updatedConfig); - } - } - - public Properties getMetricsProperties() { - return metricsConfig; - } - - private void initialize() { - initialConfig.putAll(System.getProperties()); - Properties properties = getProperties( - System.getProperty(INITIAL_CONFIG_FILE_PATH_PROPS_KEY, INITIAL_CONFIG_FILE_PATH)); - initialConfig.putAll(properties); - initializeMetricsProperties(); + public static final String INITIAL_CONFIG_FILE_PATH = + "/opt/spark-operator/conf/spark-operator.properties"; + + public static final String METRICS_CONFIG_FILE_PATH = + "/opt/spark-operator/conf/metrics.properties"; + + public static final String INITIAL_CONFIG_FILE_PATH_PROPS_KEY = + "spark.operator.base.property.file.name"; + + public static final String METRICS_CONFIG_FILE_PATH_PROPS_KEY = + "spark.operator.metrics.property.file.name"; + + public static final SparkOperatorConfManager INSTANCE = new SparkOperatorConfManager(); + protected final Properties initialConfig; + protected final Properties metricsConfig; + protected Properties configOverrides; + + protected SparkOperatorConfManager() { + this.initialConfig = new Properties(); + this.configOverrides = new Properties(); + this.metricsConfig = new Properties(); + initialize(); + } + + public String getValue(String key) { + String currentValue = configOverrides.getProperty(key); + return StringUtils.isEmpty(currentValue) ? getInitialValue(key) : currentValue; + } + + public String getInitialValue(String key) { + return initialConfig.getProperty(key); + } + + public void refresh(Map updatedConfig) { + synchronized (this) { + this.configOverrides = new Properties(); + configOverrides.putAll(updatedConfig); } - - private void initializeMetricsProperties() { - Properties properties = getProperties( - System.getProperty(METRICS_CONFIG_FILE_PATH_PROPS_KEY, METRICS_CONFIG_FILE_PATH)); - metricsConfig.putAll(properties); - } - - private Properties getProperties(String filePath) { - Properties properties = new Properties(); - try (InputStream inputStream = new FileInputStream(filePath)) { - properties.load(inputStream); - } catch (Exception e) { - log.error("Failed to load properties from {}.", filePath, e); - } - return properties; + } + + public Properties getMetricsProperties() { + return metricsConfig; + } + + private void initialize() { + initialConfig.putAll(System.getProperties()); + Properties properties = getProperties( + System.getProperty(INITIAL_CONFIG_FILE_PATH_PROPS_KEY, INITIAL_CONFIG_FILE_PATH)); + initialConfig.putAll(properties); + initializeMetricsProperties(); + } + + private void initializeMetricsProperties() { + Properties properties = getProperties( + System.getProperty(METRICS_CONFIG_FILE_PATH_PROPS_KEY, METRICS_CONFIG_FILE_PATH)); + metricsConfig.putAll(properties); + } + + private Properties getProperties(String filePath) { + Properties properties = new Properties(); + try (InputStream inputStream = new FileInputStream(filePath)) { + properties.load(inputStream); + } catch (Exception e) { + log.error("Failed to load properties from {}.", filePath, e); } + return properties; + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfigMapReconciler.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfigMapReconciler.java index 723dfab9..89e7bc83 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfigMapReconciler.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfigMapReconciler.java @@ -18,6 +18,10 @@ package org.apache.spark.kubernetes.operator.config; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + import io.fabric8.kubernetes.api.model.ConfigMap; import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration; import io.javaoperatorsdk.operator.api.reconciler.Context; @@ -33,11 +37,8 @@ import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; +import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorNamespace; @@ -51,31 +52,31 @@ @RequiredArgsConstructor @Slf4j public class SparkOperatorConfigMapReconciler implements Reconciler, - ErrorStatusHandler, EventSourceInitializer { - private final Function, Boolean> namespaceUpdater; + ErrorStatusHandler, EventSourceInitializer { + private final Function, Boolean> namespaceUpdater; - @Override - public ErrorStatusUpdateControl updateErrorStatus(ConfigMap resource, - Context context, - Exception e) { - log.error("Failed to reconcile dynamic config change."); - return ErrorStatusUpdateControl.noStatusUpdate(); - } + @Override + public ErrorStatusUpdateControl updateErrorStatus(ConfigMap resource, + Context context, + Exception e) { + log.error("Failed to reconcile dynamic config change."); + return ErrorStatusUpdateControl.noStatusUpdate(); + } - @Override - public Map prepareEventSources(EventSourceContext context) { - var configMapEventSource = - new InformerEventSource<>(InformerConfiguration.from(ConfigMap.class, context) - .withNamespaces(OperatorNamespace.getValue()) - .build(), context); - return EventSourceInitializer.nameEventSources(configMapEventSource); - } + @Override + public Map prepareEventSources(EventSourceContext context) { + var configMapEventSource = + new InformerEventSource<>(InformerConfiguration.from(ConfigMap.class, context) + .withNamespaces(OperatorNamespace.getValue()) + .build(), context); + return EventSourceInitializer.nameEventSources(configMapEventSource); + } - @Override - public UpdateControl reconcile(ConfigMap resource, Context context) - throws Exception { - SparkOperatorConfManager.INSTANCE.refresh(resource.getData()); - namespaceUpdater.apply(SparkReconcilerUtils.getWatchedNamespaces()); - return UpdateControl.noUpdate(); - } + @Override + public UpdateControl reconcile(ConfigMap resource, Context context) + throws Exception { + SparkOperatorConfManager.INSTANCE.refresh(resource.getData()); + namespaceUpdater.apply(SparkReconcilerUtils.getWatchedNamespaces()); + return UpdateControl.noUpdate(); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java index 97fe43bd..f13b389f 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java @@ -18,6 +18,11 @@ package org.apache.spark.kubernetes.operator.controller; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.Pod; import io.fabric8.kubernetes.client.KubernetesClient; @@ -25,15 +30,11 @@ import lombok.Getter; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; + import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; import org.apache.spark.kubernetes.operator.SparkApplication; import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconcileUtils; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.driverLabels; import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.executorLabels; @@ -44,48 +45,48 @@ @RequiredArgsConstructor @Slf4j public class SparkApplicationContext { - @Getter - private final SparkApplication sparkApplication; - private final Context josdkContext; - private ApplicationResourceSpec secondaryResourceSpec; + @Getter + private final SparkApplication sparkApplication; + private final Context josdkContext; + private ApplicationResourceSpec secondaryResourceSpec; - public Optional getDriverPod() { - return josdkContext.getSecondaryResourcesAsStream(Pod.class) - .filter(p -> p.getMetadata().getLabels().entrySet() - .containsAll(driverLabels(sparkApplication).entrySet())) - .findAny(); - } + public Optional getDriverPod() { + return josdkContext.getSecondaryResourcesAsStream(Pod.class) + .filter(p -> p.getMetadata().getLabels().entrySet() + .containsAll(driverLabels(sparkApplication).entrySet())) + .findAny(); + } - public Set getExecutorsForApplication() { - return josdkContext.getSecondaryResourcesAsStream(Pod.class) - .filter(p -> p.getMetadata().getLabels().entrySet() - .containsAll(executorLabels(sparkApplication).entrySet())) - .collect(Collectors.toSet()); - } + public Set getExecutorsForApplication() { + return josdkContext.getSecondaryResourcesAsStream(Pod.class) + .filter(p -> p.getMetadata().getLabels().entrySet() + .containsAll(executorLabels(sparkApplication).entrySet())) + .collect(Collectors.toSet()); + } - private ApplicationResourceSpec getSecondaryResourceSpec() { - synchronized (this) { - if (secondaryResourceSpec == null) { - secondaryResourceSpec = SparkApplicationReconcileUtils.buildResourceSpec( - sparkApplication, josdkContext.getClient()); - } - return secondaryResourceSpec; - } + private ApplicationResourceSpec getSecondaryResourceSpec() { + synchronized (this) { + if (secondaryResourceSpec == null) { + secondaryResourceSpec = SparkApplicationReconcileUtils.buildResourceSpec( + sparkApplication, josdkContext.getClient()); + } + return secondaryResourceSpec; } + } - public KubernetesClient getClient() { - return josdkContext.getClient(); - } + public KubernetesClient getClient() { + return josdkContext.getClient(); + } - public List getDriverPreResourcesSpec() { - return getSecondaryResourceSpec().getDriverPreResources(); - } + public List getDriverPreResourcesSpec() { + return getSecondaryResourceSpec().getDriverPreResources(); + } - public Pod getDriverPodSpec() { - return getSecondaryResourceSpec().getConfiguredPod(); - } + public Pod getDriverPodSpec() { + return getSecondaryResourceSpec().getConfiguredPod(); + } - public List getDriverResourcesSpec() { - return getSecondaryResourceSpec().getDriverResources(); - } + public List getDriverResourcesSpec() { + return getSecondaryResourceSpec().getDriverResources(); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverDecorator.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverDecorator.java index 702785b5..618bc818 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverDecorator.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverDecorator.java @@ -22,6 +22,7 @@ import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; import lombok.RequiredArgsConstructor; + import org.apache.spark.kubernetes.operator.SparkApplication; import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.sparkAppResourceLabels; @@ -34,19 +35,19 @@ @RequiredArgsConstructor public class DriverDecorator implements ResourceDecorator { - private final SparkApplication app; + private final SparkApplication app; - /** - * Add labels and owner references to the app for all secondary resources - */ - @Override - public T decorate(T resource) { - ObjectMeta metaData = new ObjectMetaBuilder(resource.getMetadata()) - .addToOwnerReferences(buildOwnerReferenceTo(app)) - .addToLabels(sparkAppResourceLabels(app)) - .withNamespace(app.getMetadata().getNamespace()) - .build(); - resource.setMetadata(metaData); - return resource; - } + /** + * Add labels and owner references to the app for all secondary resources + */ + @Override + public T decorate(T resource) { + ObjectMeta metaData = new ObjectMetaBuilder(resource.getMetadata()) + .addToOwnerReferences(buildOwnerReferenceTo(app)) + .addToLabels(sparkAppResourceLabels(app)) + .withNamespace(app.getMetadata().getNamespace()) + .build(); + resource.setMetadata(metaData); + return resource; + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java index 68d31880..2d840048 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java @@ -39,27 +39,27 @@ */ @RequiredArgsConstructor public class DriverResourceDecorator implements ResourceDecorator { - private final Pod driverPod; + private final Pod driverPod; - @Override - public T decorate(T resource) { - boolean ownerReferenceExists = false; - if (CollectionUtils.isNotEmpty(resource.getMetadata().getOwnerReferences())) { - for (OwnerReference o : resource.getMetadata().getOwnerReferences()) { - if (driverPod.getKind().equals(o.getKind()) - && driverPod.getMetadata().getName().equals(o.getName()) - && driverPod.getMetadata().getUid().equals(o.getUid())) { - ownerReferenceExists = true; - break; - } - } + @Override + public T decorate(T resource) { + boolean ownerReferenceExists = false; + if (CollectionUtils.isNotEmpty(resource.getMetadata().getOwnerReferences())) { + for (OwnerReference o : resource.getMetadata().getOwnerReferences()) { + if (driverPod.getKind().equals(o.getKind()) + && driverPod.getMetadata().getName().equals(o.getName()) + && driverPod.getMetadata().getUid().equals(o.getUid())) { + ownerReferenceExists = true; + break; } - if (!ownerReferenceExists) { - ObjectMeta metaData = new ObjectMetaBuilder(resource.getMetadata()) - .addToOwnerReferences(buildOwnerReferenceTo(driverPod)) - .build(); - resource.setMetadata(metaData); - } - return resource; + } + } + if (!ownerReferenceExists) { + ObjectMeta metaData = new ObjectMetaBuilder(resource.getMetadata()) + .addToOwnerReferences(buildOwnerReferenceTo(driverPod)) + .build(); + resource.setMetadata(metaData); } + return resource; + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/health/SentinelManager.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/health/SentinelManager.java index 7bc51470..874fab8e 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/health/SentinelManager.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/health/SentinelManager.java @@ -17,6 +17,14 @@ package org.apache.spark.kubernetes.operator.health; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + import com.google.common.annotations.VisibleForTesting; import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.client.KubernetesClient; @@ -25,20 +33,13 @@ import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.builder.ToStringBuilder; + import org.apache.spark.kubernetes.operator.BaseResource; import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import static org.apache.spark.kubernetes.operator.Constants.SPARK_CONF_SENTINEL_DUMMY_FIELD; import static org.apache.spark.kubernetes.operator.Constants.SENTINEL_LABEL; +import static org.apache.spark.kubernetes.operator.Constants.SPARK_CONF_SENTINEL_DUMMY_FIELD; /** * Sentinel manager monitors dedicated sentinel resources to make sure the operator is healthy @@ -49,161 +50,161 @@ @Slf4j public class SentinelManager> { - private final ConcurrentHashMap sentinelResources = - new ConcurrentHashMap<>(); + private final ConcurrentHashMap sentinelResources = + new ConcurrentHashMap<>(); - private final ScheduledExecutorService executorService = Executors.newScheduledThreadPool( - SparkOperatorConf.SentinelExecutorServicePoolSize.getValue()); + private final ScheduledExecutorService executorService = Executors.newScheduledThreadPool( + SparkOperatorConf.SentinelExecutorServicePoolSize.getValue()); - public static boolean isSentinelResource(HasMetadata resource) { - var labels = resource.getMetadata().getLabels(); - if (labels == null) { - return false; - } - var namespace = resource.getMetadata().getNamespace(); - return shouldSentinelWatchGivenNamespace(namespace) - && Boolean.TRUE.toString() - .equalsIgnoreCase(labels.getOrDefault(SENTINEL_LABEL, Boolean.FALSE.toString())); + public static boolean isSentinelResource(HasMetadata resource) { + var labels = resource.getMetadata().getLabels(); + if (labels == null) { + return false; } - - private static boolean shouldSentinelWatchGivenNamespace(String namespace) { - if ((!SparkReconcilerUtils.getWatchedNamespaces().isEmpty()) - && !SparkReconcilerUtils.getWatchedNamespaces().contains(namespace)) { - if (log.isErrorEnabled()) { - log.error("Skip watching sentinel resource in namespace {}", namespace); - } - return false; - } - return true; - } - - public boolean allSentinelsAreHealthy() { - Set unWatchedKey = new HashSet<>(); - var result = sentinelResources.entrySet().stream().filter( - x -> { - if (x.getKey().getNamespace().isPresent() - && shouldSentinelWatchGivenNamespace(x.getKey().getNamespace().get())) { - return true; - } - unWatchedKey.add(x.getKey()); - return false; - } - ).map(Map.Entry::getValue).allMatch(SentinelResourceState::isHealthy); - sentinelResources.keySet().removeAll(unWatchedKey); - return result; + var namespace = resource.getMetadata().getNamespace(); + return shouldSentinelWatchGivenNamespace(namespace) + && Boolean.TRUE.toString() + .equalsIgnoreCase(labels.getOrDefault(SENTINEL_LABEL, Boolean.FALSE.toString())); + } + + private static boolean shouldSentinelWatchGivenNamespace(String namespace) { + if ((!SparkReconcilerUtils.getWatchedNamespaces().isEmpty()) + && !SparkReconcilerUtils.getWatchedNamespaces().contains(namespace)) { + if (log.isErrorEnabled()) { + log.error("Skip watching sentinel resource in namespace {}", namespace); + } + return false; } - - public void checkHealth(ResourceID resourceID, KubernetesClient client) { - SentinelResourceState sentinelResourceState = sentinelResources.get(resourceID); - if (sentinelResourceState == null) { - if (log.isErrorEnabled()) { - log.error("Sentinel resources {} not found. Stopping sentinel health checks", - resourceID); - } - return; - } - - if (sentinelResourceState.reconciledSinceUpdate()) { - log.info("Sentinel reports healthy state globally"); - sentinelResourceState.isHealthy = true; - } else { - if (log.isErrorEnabled()) { - log.error( - "Sentinel deployment {} latest spec not was reconciled. Expected " + - "generation larger than {}, received {}", - resourceID, - sentinelResourceState.previousGeneration, - sentinelResourceState.resource.getMetadata().getGeneration()); - } - sentinelResourceState.isHealthy = false; + return true; + } + + public boolean allSentinelsAreHealthy() { + Set unWatchedKey = new HashSet<>(); + var result = sentinelResources.entrySet().stream().filter( + x -> { + if (x.getKey().getNamespace().isPresent() + && shouldSentinelWatchGivenNamespace(x.getKey().getNamespace().get())) { + return true; + } + unWatchedKey.add(x.getKey()); + return false; } + ).map(Map.Entry::getValue).allMatch(SentinelResourceState::isHealthy); + sentinelResources.keySet().removeAll(unWatchedKey); + return result; + } + + public void checkHealth(ResourceID resourceID, KubernetesClient client) { + SentinelResourceState sentinelResourceState = sentinelResources.get(resourceID); + if (sentinelResourceState == null) { + if (log.isErrorEnabled()) { + log.error("Sentinel resources {} not found. Stopping sentinel health checks", + resourceID); + } + return; + } - updateSpecAndScheduleHealthCheck(resourceID, sentinelResourceState, client); + if (sentinelResourceState.reconciledSinceUpdate()) { + log.info("Sentinel reports healthy state globally"); + sentinelResourceState.isHealthy = true; + } else { + if (log.isErrorEnabled()) { + log.error( + "Sentinel deployment {} latest spec not was reconciled. Expected " + + "generation larger than {}, received {}", + resourceID, + sentinelResourceState.previousGeneration, + sentinelResourceState.resource.getMetadata().getGeneration()); + } + sentinelResourceState.isHealthy = false; } - public boolean handleSentinelResourceReconciliation(CR resource, KubernetesClient client) { - if (!isSentinelResource(resource)) { - return false; - } + updateSpecAndScheduleHealthCheck(resourceID, sentinelResourceState, client); + } - var resourceId = ResourceID.fromResource(resource); - sentinelResources.compute( - resourceId, - (id, previousState) -> { - boolean firstReconcile = false; - if (previousState == null) { - firstReconcile = true; - previousState = new SentinelResourceState(); - } - previousState.onReconcile(resource); - if (firstReconcile) { - updateSpecAndScheduleHealthCheck(resourceId, previousState, client); - } - return previousState; - }); - return true; + public boolean handleSentinelResourceReconciliation(CR resource, KubernetesClient client) { + if (!isSentinelResource(resource)) { + return false; } - private void updateSpecAndScheduleHealthCheck(ResourceID resourceID, - SentinelResourceState sentinelResourceState, - KubernetesClient client) { - var sparkConf = sentinelResourceState.resource.getSpec().getSparkConf(); - sparkConf.compute(SPARK_CONF_SENTINEL_DUMMY_FIELD, (key, value) -> { - if (value == null) { - return "1"; - } else { - return String.valueOf(Long.parseLong(value) + 1); - } + var resourceId = ResourceID.fromResource(resource); + sentinelResources.compute( + resourceId, + (id, previousState) -> { + boolean firstReconcile = false; + if (previousState == null) { + firstReconcile = true; + previousState = new SentinelResourceState(); + } + previousState.onReconcile(resource); + if (firstReconcile) { + updateSpecAndScheduleHealthCheck(resourceId, previousState, client); + } + return previousState; }); - sentinelResourceState.previousGeneration = - sentinelResourceState.resource.getMetadata().getGeneration(); - try { - if (log.isDebugEnabled()) { - log.debug("Update the sentinel kubernetes resource spec {}", sentinelResourceState); - } - client.resource(SparkReconcilerUtils.clone(sentinelResourceState.resource)).replace(); - } catch (Throwable t) { - if (log.isWarnEnabled()) { - log.warn("Could not replace the sentinel deployment spark conf {}", - SPARK_CONF_SENTINEL_DUMMY_FIELD, t); - } - } - var delay = SparkOperatorConf.SENTINEL_RESOURCE_RECONCILIATION_DELAY.getValue(); - if (log.isInfoEnabled()) { - log.info("Scheduling sentinel check for {} in {} seconds", resourceID, delay); - } - executorService.schedule(() -> checkHealth(resourceID, client), - delay, - TimeUnit.SECONDS); + return true; + } + + private void updateSpecAndScheduleHealthCheck(ResourceID resourceID, + SentinelResourceState sentinelResourceState, + KubernetesClient client) { + var sparkConf = sentinelResourceState.resource.getSpec().getSparkConf(); + sparkConf.compute(SPARK_CONF_SENTINEL_DUMMY_FIELD, (key, value) -> { + if (value == null) { + return "1"; + } else { + return String.valueOf(Long.parseLong(value) + 1); + } + }); + sentinelResourceState.previousGeneration = + sentinelResourceState.resource.getMetadata().getGeneration(); + try { + if (log.isDebugEnabled()) { + log.debug("Update the sentinel kubernetes resource spec {}", sentinelResourceState); + } + client.resource(SparkReconcilerUtils.clone(sentinelResourceState.resource)).replace(); + } catch (Throwable t) { + if (log.isWarnEnabled()) { + log.warn("Could not replace the sentinel deployment spark conf {}", + SPARK_CONF_SENTINEL_DUMMY_FIELD, t); + } } + var delay = SparkOperatorConf.SENTINEL_RESOURCE_RECONCILIATION_DELAY.getValue(); + if (log.isInfoEnabled()) { + log.info("Scheduling sentinel check for {} in {} seconds", resourceID, delay); + } + executorService.schedule(() -> checkHealth(resourceID, client), + delay, + TimeUnit.SECONDS); + } - public class SentinelResourceState { - CR resource; - long previousGeneration; - - @Getter - boolean isHealthy = true; + public class SentinelResourceState { + CR resource; + long previousGeneration; - void onReconcile(CR cr) { - resource = cr; - } + @Getter + boolean isHealthy = true; - boolean reconciledSinceUpdate() { - return resource.getMetadata().getGeneration() > previousGeneration; - } + void onReconcile(CR cr) { + resource = cr; + } - @Override - public String toString() { - return new ToStringBuilder(this) - .append("resource", resource) - .append("previousGeneration", previousGeneration) - .append("isHealthy", isHealthy) - .toString(); - } + boolean reconciledSinceUpdate() { + return resource.getMetadata().getGeneration() > previousGeneration; } - @VisibleForTesting - public ConcurrentHashMap getSentinelResources() { - return sentinelResources; + @Override + public String toString() { + return new ToStringBuilder(this) + .append("resource", resource) + .append("previousGeneration", previousGeneration) + .append("isHealthy", isHealthy) + .toString(); } + } + + @VisibleForTesting + public ConcurrentHashMap getSentinelResources() { + return sentinelResources; + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java index 947e6d78..2a3afe83 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java @@ -25,5 +25,5 @@ * Custom listeners, if added, would be listening to Spark App status change */ public abstract class ApplicationStatusListener extends BaseStatusListener { + SparkApplication> { } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/BaseStatusListener.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/BaseStatusListener.java index aa213290..3c6367ba 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/BaseStatusListener.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/BaseStatusListener.java @@ -25,6 +25,6 @@ * Custom listeners, if added, would be listening to resource status change */ public abstract class BaseStatusListener, - CR extends BaseResource> { - public abstract void listenStatus(CR resource, STATUS prevStatus, STATUS updatedStatus); + CR extends BaseResource> { + public abstract void listenStatus(CR resource, STATUS prevStatus, STATUS updatedStatus); } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/JVMMetricSet.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/JVMMetricSet.java index 144a05dd..7ca4bcb7 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/JVMMetricSet.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/JVMMetricSet.java @@ -18,6 +18,10 @@ package org.apache.spark.kubernetes.operator.metrics; +import java.lang.management.ManagementFactory; +import java.util.HashMap; +import java.util.Map; + import com.codahale.metrics.Metric; import com.codahale.metrics.MetricSet; import com.codahale.metrics.jvm.BufferPoolMetricSet; @@ -26,41 +30,37 @@ import com.codahale.metrics.jvm.MemoryUsageGaugeSet; import com.codahale.metrics.jvm.ThreadStatesGaugeSet; -import java.lang.management.ManagementFactory; -import java.util.HashMap; -import java.util.Map; - public class JVMMetricSet implements MetricSet { - public static final String FILE_DESC_RATIO_OPEN_MAX = "fileDesc.ratio.open/max"; - private final BufferPoolMetricSet bufferPoolMetricSet; - private final FileDescriptorRatioGauge fileDescriptorRatioGauge; - private final GarbageCollectorMetricSet garbageCollectorMetricSet; - private final MemoryUsageGaugeSet memoryUsageGaugeSet; - private final ThreadStatesGaugeSet threadStatesGaugeSet; + public static final String FILE_DESC_RATIO_OPEN_MAX = "fileDesc.ratio.open/max"; + private final BufferPoolMetricSet bufferPoolMetricSet; + private final FileDescriptorRatioGauge fileDescriptorRatioGauge; + private final GarbageCollectorMetricSet garbageCollectorMetricSet; + private final MemoryUsageGaugeSet memoryUsageGaugeSet; + private final ThreadStatesGaugeSet threadStatesGaugeSet; - public JVMMetricSet() { - bufferPoolMetricSet = new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer()); - fileDescriptorRatioGauge = new FileDescriptorRatioGauge(); - garbageCollectorMetricSet = new GarbageCollectorMetricSet(); - memoryUsageGaugeSet = new MemoryUsageGaugeSet(); - threadStatesGaugeSet = new ThreadStatesGaugeSet(); - } + public JVMMetricSet() { + bufferPoolMetricSet = new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer()); + fileDescriptorRatioGauge = new FileDescriptorRatioGauge(); + garbageCollectorMetricSet = new GarbageCollectorMetricSet(); + memoryUsageGaugeSet = new MemoryUsageGaugeSet(); + threadStatesGaugeSet = new ThreadStatesGaugeSet(); + } - @Override - public Map getMetrics() { - final Map jvmMetrics = new HashMap<>(); - putAllMetrics(jvmMetrics, bufferPoolMetricSet, "bufferPool"); - jvmMetrics.put(FILE_DESC_RATIO_OPEN_MAX, fileDescriptorRatioGauge); - putAllMetrics(jvmMetrics, garbageCollectorMetricSet, "gc"); - putAllMetrics(jvmMetrics, memoryUsageGaugeSet, "memoryUsage"); - putAllMetrics(jvmMetrics, threadStatesGaugeSet, "threadStates"); - return jvmMetrics; - } + @Override + public Map getMetrics() { + final Map jvmMetrics = new HashMap<>(); + putAllMetrics(jvmMetrics, bufferPoolMetricSet, "bufferPool"); + jvmMetrics.put(FILE_DESC_RATIO_OPEN_MAX, fileDescriptorRatioGauge); + putAllMetrics(jvmMetrics, garbageCollectorMetricSet, "gc"); + putAllMetrics(jvmMetrics, memoryUsageGaugeSet, "memoryUsage"); + putAllMetrics(jvmMetrics, threadStatesGaugeSet, "threadStates"); + return jvmMetrics; + } - private void putAllMetrics(final Map destination, final MetricSet origin, - final String prefix) { - for (Map.Entry entry : origin.getMetrics().entrySet()) { - destination.put(prefix + "." + entry.getKey(), entry.getValue()); - } + private void putAllMetrics(final Map destination, final MetricSet origin, + final String prefix) { + for (Map.Entry entry : origin.getMetrics().entrySet()) { + destination.put(prefix + "." + entry.getKey(), entry.getValue()); } + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsService.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsService.java index c904ff67..313a2d9d 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsService.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsService.java @@ -17,44 +17,46 @@ package org.apache.spark.kubernetes.operator.metrics; -import com.sun.net.httpserver.HttpServer; -import lombok.extern.slf4j.Slf4j; -import org.apache.spark.kubernetes.operator.metrics.sink.PrometheusPullModelSink; -import org.apache.spark.metrics.sink.Sink; - import java.io.IOException; import java.net.InetSocketAddress; import java.util.List; import java.util.Optional; +import com.sun.net.httpserver.HttpServer; +import lombok.extern.slf4j.Slf4j; + +import org.apache.spark.kubernetes.operator.metrics.sink.PrometheusPullModelSink; +import org.apache.spark.metrics.sink.Sink; + import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorMetricsPort; @Slf4j public class MetricsService { - HttpServer server; - MetricsSystem metricsSystem; - public MetricsService(MetricsSystem metricsSystem) { - this.metricsSystem = metricsSystem; - try { - server = HttpServer.create(new InetSocketAddress(OperatorMetricsPort.getValue()), 0); - } catch (IOException e) { - throw new RuntimeException("Failed to create Metrics Server", e); - } - server.setExecutor(null); - } + HttpServer server; + MetricsSystem metricsSystem; - public void start() { - log.info("Metrics Service started"); - List sinks = metricsSystem.getSinks(); - Optional instanceOptional = - sinks.stream().filter(x -> x instanceof PrometheusPullModelSink).findAny(); - instanceOptional.ifPresent(sink -> - server.createContext("/prometheus", (PrometheusPullModelSink) sink)); - server.start(); + public MetricsService(MetricsSystem metricsSystem) { + this.metricsSystem = metricsSystem; + try { + server = HttpServer.create(new InetSocketAddress(OperatorMetricsPort.getValue()), 0); + } catch (IOException e) { + throw new RuntimeException("Failed to create Metrics Server", e); } + server.setExecutor(null); + } - public void stop() { - log.info("Metrics Service stopped"); - server.stop(0); - } + public void start() { + log.info("Metrics Service started"); + List sinks = metricsSystem.getSinks(); + Optional instanceOptional = + sinks.stream().filter(x -> x instanceof PrometheusPullModelSink).findAny(); + instanceOptional.ifPresent(sink -> + server.createContext("/prometheus", (PrometheusPullModelSink) sink)); + server.start(); + } + + public void stop() { + log.info("Metrics Service stopped"); + server.stop(0); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystem.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystem.java index 1cfabf43..11203e7b 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystem.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystem.java @@ -18,118 +18,119 @@ package org.apache.spark.kubernetes.operator.metrics; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicBoolean; + import com.codahale.metrics.MetricFilter; +import com.codahale.metrics.MetricRegistry; import lombok.Data; import lombok.Getter; import lombok.extern.slf4j.Slf4j; + import org.apache.spark.kubernetes.operator.metrics.source.JVMSource; import org.apache.spark.metrics.sink.Sink; import org.apache.spark.metrics.source.Source; -import com.codahale.metrics.MetricRegistry; import org.apache.spark.util.Utils; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.atomic.AtomicBoolean; - @Slf4j public class MetricsSystem { - private AtomicBoolean running = new AtomicBoolean(false); - @Getter - private List sinks; - @Getter - private List sources; - @Getter - private MetricRegistry registry; - private Properties properties; - private Map sinkPropertiesMap; + private AtomicBoolean running = new AtomicBoolean(false); + @Getter + private List sinks; + @Getter + private List sources; + @Getter + private MetricRegistry registry; + private Properties properties; + private Map sinkPropertiesMap; - public MetricsSystem() { - this.sinks = new ArrayList<>(); - this.sources = new ArrayList<>(); - this.registry = new MetricRegistry(); - this.properties = new Properties(); - this.sinkPropertiesMap = new HashMap<>(); - } + public MetricsSystem() { + this.sinks = new ArrayList<>(); + this.sources = new ArrayList<>(); + this.registry = new MetricRegistry(); + this.properties = new Properties(); + this.sinkPropertiesMap = new HashMap<>(); + } - public MetricsSystem(Properties properties) { - this.sinks = new ArrayList<>(); - this.sources = new ArrayList<>(); - this.registry = new MetricRegistry(); - this.properties = properties; - this.sinkPropertiesMap = MetricsSystemFactory.parseSinkProperties(this.properties); - } + public MetricsSystem(Properties properties) { + this.sinks = new ArrayList<>(); + this.sources = new ArrayList<>(); + this.registry = new MetricRegistry(); + this.properties = properties; + this.sinkPropertiesMap = MetricsSystemFactory.parseSinkProperties(this.properties); + } - public void start() { - if (running.get()) { - throw new IllegalStateException( - "Attempting to start a MetricsSystem that is already running"); - } - running.set(true); - registerSources(); - registerSinks(); - sinks.forEach(Sink::start); + public void start() { + if (running.get()) { + throw new IllegalStateException( + "Attempting to start a MetricsSystem that is already running"); } + running.set(true); + registerSources(); + registerSinks(); + sinks.forEach(Sink::start); + } - public void stop() { - if (running.get()) { - sinks.forEach(Sink::stop); - registry.removeMatching(MetricFilter.ALL); - } else { - log.error("Stopping a MetricsSystem that is not running"); - } - running.set(false); + public void stop() { + if (running.get()) { + sinks.forEach(Sink::stop); + registry.removeMatching(MetricFilter.ALL); + } else { + log.error("Stopping a MetricsSystem that is not running"); } + running.set(false); + } - public void report() { - sinks.forEach(Sink::report); - } + public void report() { + sinks.forEach(Sink::report); + } - public void registerSinks() { - log.info("sinkPropertiesMap: {}", sinkPropertiesMap); - sinkPropertiesMap.values().forEach(sinkProp -> { - Class sink = Utils.classForName(sinkProp.getClassName(), true, false); - Sink sinkInstance; - try { - sinkInstance = sink.getConstructor(Properties.class, MetricRegistry.class) - .newInstance(sinkProp.getProperties(), registry); - } catch (Exception e) { - if (log.isErrorEnabled()) { - log.error("Fail to create metrics sink for sink name {}, sink properties {}", - sinkProp.getClassName(), sinkProp.getProperties()); - } - throw new RuntimeException("Fail to create metrics sink", e); - } - sinks.add(sinkInstance); - }); - } + public void registerSinks() { + log.info("sinkPropertiesMap: {}", sinkPropertiesMap); + sinkPropertiesMap.values().forEach(sinkProp -> { + Class sink = Utils.classForName(sinkProp.getClassName(), true, false); + Sink sinkInstance; + try { + sinkInstance = sink.getConstructor(Properties.class, MetricRegistry.class) + .newInstance(sinkProp.getProperties(), registry); + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("Fail to create metrics sink for sink name {}, sink properties {}", + sinkProp.getClassName(), sinkProp.getProperties()); + } + throw new RuntimeException("Fail to create metrics sink", e); + } + sinks.add(sinkInstance); + }); + } - private void registerSources() { - // TO-DO parse the properties to config sources - registerSource(new JVMSource()); - } + private void registerSources() { + // TO-DO parse the properties to config sources + registerSource(new JVMSource()); + } - public void registerSource(Source source) { - sources.add(source); - try { - String regName = MetricRegistry.name(source.sourceName()); - registry.register(regName, source.metricRegistry()); - } catch (IllegalArgumentException e) { - log.error("Metrics already registered", e); - } + public void registerSource(Source source) { + sources.add(source); + try { + String regName = MetricRegistry.name(source.sourceName()); + registry.register(regName, source.metricRegistry()); + } catch (IllegalArgumentException e) { + log.error("Metrics already registered", e); } + } - @Data - public static class SinkProps { - String className; - Properties properties; + @Data + public static class SinkProps { + String className; + Properties properties; - public SinkProps() { - this.className = ""; - this.properties = new Properties(); - } + public SinkProps() { + this.className = ""; + this.properties = new Properties(); } + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactory.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactory.java index 8f24fc33..ce2243fe 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactory.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactory.java @@ -18,81 +18,82 @@ package org.apache.spark.kubernetes.operator.metrics; -import org.apache.commons.lang3.StringUtils; -import org.apache.spark.kubernetes.operator.config.SparkOperatorConfManager; - import java.util.Enumeration; import java.util.HashMap; import java.util.Map; import java.util.Properties; +import org.apache.commons.lang3.StringUtils; + +import org.apache.spark.kubernetes.operator.config.SparkOperatorConfManager; + import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.CLASS; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.METRIC_PREFIX; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.SINK; public class MetricsSystemFactory { - public static MetricsSystem createMetricsSystem() { - Properties properties = - parseMetricsProperties(SparkOperatorConfManager.INSTANCE.getMetricsProperties()); - return new MetricsSystem(properties); - } + public static MetricsSystem createMetricsSystem() { + Properties properties = + parseMetricsProperties(SparkOperatorConfManager.INSTANCE.getMetricsProperties()); + return new MetricsSystem(properties); + } - private static Properties parseMetricsProperties(Properties userProperties) { - Properties properties = new Properties(); - Enumeration valueEnumeration = userProperties.propertyNames(); - while (valueEnumeration.hasMoreElements()) { - String key = (String) valueEnumeration.nextElement(); - if (key.startsWith(METRIC_PREFIX)) { - properties.put(key.substring(METRIC_PREFIX.length()), - userProperties.getProperty(key)); - } - } - return properties; + private static Properties parseMetricsProperties(Properties userProperties) { + Properties properties = new Properties(); + Enumeration valueEnumeration = userProperties.propertyNames(); + while (valueEnumeration.hasMoreElements()) { + String key = (String) valueEnumeration.nextElement(); + if (key.startsWith(METRIC_PREFIX)) { + properties.put(key.substring(METRIC_PREFIX.length()), + userProperties.getProperty(key)); + } } + return properties; + } - public static Map parseSinkProperties( - Properties metricsProperties) { - Map propertiesMap = new HashMap<>(); - // e.g: "sink.graphite.class"="org.apache.spark.metrics.sink.GraphiteSink" - Enumeration valueEnumeration = metricsProperties.propertyNames(); - while (valueEnumeration.hasMoreElements()) { - String key = (String) valueEnumeration.nextElement(); - int firstDotIndex = StringUtils.ordinalIndexOf(key, ".", 1); - int secondDotIndex = StringUtils.ordinalIndexOf(key, ".", 2); - if (key.startsWith(SINK)) { - String shortName = key.substring(firstDotIndex + 1, secondDotIndex); - MetricsSystem.SinkProps sinkProps = - propertiesMap.getOrDefault(shortName, new MetricsSystem.SinkProps()); - if (key.endsWith(CLASS)) { - sinkProps.setClassName(metricsProperties.getProperty(key)); - } else { - sinkProps.getProperties().put(key.substring(secondDotIndex + 1), - metricsProperties.getProperty(key)); - } - propertiesMap.put(shortName, sinkProps); - } + public static Map parseSinkProperties( + Properties metricsProperties) { + Map propertiesMap = new HashMap<>(); + // e.g: "sink.graphite.class"="org.apache.spark.metrics.sink.GraphiteSink" + Enumeration valueEnumeration = metricsProperties.propertyNames(); + while (valueEnumeration.hasMoreElements()) { + String key = (String) valueEnumeration.nextElement(); + int firstDotIndex = StringUtils.ordinalIndexOf(key, ".", 1); + int secondDotIndex = StringUtils.ordinalIndexOf(key, ".", 2); + if (key.startsWith(SINK)) { + String shortName = key.substring(firstDotIndex + 1, secondDotIndex); + MetricsSystem.SinkProps sinkProps = + propertiesMap.getOrDefault(shortName, new MetricsSystem.SinkProps()); + if (key.endsWith(CLASS)) { + sinkProps.setClassName(metricsProperties.getProperty(key)); + } else { + sinkProps.getProperties().put(key.substring(secondDotIndex + 1), + metricsProperties.getProperty(key)); } - sinkPropertiesSanityCheck(propertiesMap); - return propertiesMap; + propertiesMap.put(shortName, sinkProps); + } } + sinkPropertiesSanityCheck(propertiesMap); + return propertiesMap; + } - private static void sinkPropertiesSanityCheck( - Map sinkPropsMap) { - for (Map.Entry pair : sinkPropsMap.entrySet()) { - // Each Sink should have mapping class full name - if (StringUtils.isBlank(pair.getValue().className)) { - String errorMessage = String.format( - "%s provides properties, but does not provide full class name", - pair.getKey()); - throw new RuntimeException(errorMessage); - } - // Check the existence of each class full name - try { - Class.forName(pair.getValue().getClassName()); - } catch (ClassNotFoundException e) { - throw new RuntimeException( - String.format("Fail to find class %s", pair.getValue().getClassName()), e); - } - } + private static void sinkPropertiesSanityCheck( + Map sinkPropsMap) { + for (Map.Entry pair : sinkPropsMap.entrySet()) { + // Each Sink should have mapping class full name + if (StringUtils.isBlank(pair.getValue().className)) { + String errorMessage = String.format( + "%s provides properties, but does not provide full class name", + pair.getKey()); + throw new RuntimeException(errorMessage); + } + // Check the existence of each class full name + try { + Class.forName(pair.getValue().getClassName()); + } catch (ClassNotFoundException e) { + throw new RuntimeException( + String.format("Fail to find class %s", pair.getValue().getClassName()), e); + } } + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/sink/PrometheusPullModelSink.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/sink/PrometheusPullModelSink.java index 23a18f66..52aae782 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/sink/PrometheusPullModelSink.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/sink/PrometheusPullModelSink.java @@ -17,61 +17,62 @@ package org.apache.spark.kubernetes.operator.metrics.sink; -import com.codahale.metrics.MetricRegistry; -import com.sun.net.httpserver.HttpExchange; -import com.sun.net.httpserver.HttpHandler; -import lombok.extern.slf4j.Slf4j; -import org.apache.spark.metrics.sink.PrometheusServlet; - import javax.servlet.http.HttpServletRequest; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Properties; +import com.codahale.metrics.MetricRegistry; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import lombok.extern.slf4j.Slf4j; + +import org.apache.spark.metrics.sink.PrometheusServlet; + import static org.apache.spark.kubernetes.operator.utils.ProbeUtil.sendMessage; @Slf4j public class PrometheusPullModelSink extends PrometheusServlet implements HttpHandler { - public PrometheusPullModelSink(Properties properties, MetricRegistry registry) { - super(properties, registry); - } + public PrometheusPullModelSink(Properties properties, MetricRegistry registry) { + super(properties, registry); + } - @Override - public void start() { - log.info("PrometheusPullModelSink started"); - } + @Override + public void start() { + log.info("PrometheusPullModelSink started"); + } - @Override - public void stop() { - log.info("PrometheusPullModelSink stopped"); - } + @Override + public void stop() { + log.info("PrometheusPullModelSink stopped"); + } - @Override - public void report() { - //no-op - } + @Override + public void report() { + //no-op + } - @Override - public void handle(HttpExchange exchange) throws IOException { - // https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/metrics/sink/PrometheusServlet.scala#L50 - // Temporary solution since PrometheusServlet.getMetricsSnapshot method does not use - // httpServletRequest at all - HttpServletRequest httpServletRequest = null; - String value = getMetricsSnapshot(httpServletRequest); - // Prometheus will have invalid syntax exception while parsing value equal to "[]", e.g: - // metrics_jvm_threadStates_deadlocks_Number{type="gauges"} [] - // metrics_jvm_threadStates_deadlocks_Value{type="gauges"} [] - String[] records = value.split("\n"); - List filteredRecords = new ArrayList<>(); - for (String record : records) { - String[] keyValuePair = record.split(" "); - if ("[]".equals(keyValuePair[1])) { - log.info("Bug identified strconv.ParseFloat: parsing []"); - continue; - } - filteredRecords.add(record); - } - sendMessage(exchange, 200, String.join("\n", filteredRecords)); + @Override + public void handle(HttpExchange exchange) throws IOException { + // https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/metrics/sink/PrometheusServlet.scala#L50 + // Temporary solution since PrometheusServlet.getMetricsSnapshot method does not use + // httpServletRequest at all + HttpServletRequest httpServletRequest = null; + String value = getMetricsSnapshot(httpServletRequest); + // Prometheus will have invalid syntax exception while parsing value equal to "[]", e.g: + // metrics_jvm_threadStates_deadlocks_Number{type="gauges"} [] + // metrics_jvm_threadStates_deadlocks_Value{type="gauges"} [] + String[] records = value.split("\n"); + List filteredRecords = new ArrayList<>(); + for (String record : records) { + String[] keyValuePair = record.split(" "); + if ("[]".equals(keyValuePair[1])) { + log.info("Bug identified strconv.ParseFloat: parsing []"); + continue; + } + filteredRecords.add(record); } + sendMessage(exchange, 200, String.join("\n", filteredRecords)); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/JVMSource.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/JVMSource.java index b1d499dd..07c44a78 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/JVMSource.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/JVMSource.java @@ -19,20 +19,21 @@ package org.apache.spark.kubernetes.operator.metrics.source; import com.codahale.metrics.MetricRegistry; + import org.apache.spark.kubernetes.operator.metrics.JVMMetricSet; import org.apache.spark.metrics.source.Source; public class JVMSource implements Source { - @Override - public String sourceName() { - return "jvm"; - } + @Override + public String sourceName() { + return "jvm"; + } - @Override - public MetricRegistry metricRegistry() { - MetricRegistry metricRegistry = new MetricRegistry(); - metricRegistry.registerAll(new JVMMetricSet()); - return metricRegistry; - } + @Override + public MetricRegistry metricRegistry() { + MetricRegistry metricRegistry = new MetricRegistry(); + metricRegistry.registerAll(new JVMMetricSet()); + return metricRegistry; + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptor.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptor.java index 003716d0..efad902d 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptor.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptor.java @@ -18,6 +18,13 @@ package org.apache.spark.kubernetes.operator.metrics.source; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; + import com.codahale.metrics.Histogram; import com.codahale.metrics.Meter; import com.codahale.metrics.MetricRegistry; @@ -26,150 +33,144 @@ import okhttp3.Request; import okhttp3.Response; import org.apache.commons.lang3.tuple.Pair; -import org.apache.spark.metrics.source.Source; import org.jetbrains.annotations.NotNull; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; +import org.apache.spark.metrics.source.Source; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.KubernetesClientMetricsGroupByResponseCodeGroupEnabled; @Slf4j public class KubernetesMetricsInterceptor implements Interceptor, Source { - MetricRegistry metricRegistry; - public static final String NAMESPACES = "namespaces"; - public static final String HTTP_REQUEST_GROUP = "http.request"; - public static final String HTTP_REQUEST_FAILED_GROUP = "failed"; - public static final String HTTP_RESPONSE_GROUP = "http.response"; - public static final String HTTP_RESPONSE_1XX = "1xx"; - public static final String HTTP_RESPONSE_2XX = "2xx"; - public static final String HTTP_RESPONSE_3XX = "3xx"; - public static final String HTTP_RESPONSE_4XX = "4xx"; - public static final String HTTP_RESPONSE_5XX = "5xx"; - private final Histogram responseLatency; - private final Map responseCodeMeters = - new ConcurrentHashMap<>(); - private final Map requestMethodCounter = new ConcurrentHashMap<>(); - private final List responseCodeGroupMeters = new ArrayList<>(5); - private final Meter requestFailedRateMeter; - private final Meter requestRateMeter; - private final Meter responseRateMeter; - private final Map namespacedResourceMethodMeters = new ConcurrentHashMap<>(); - - public KubernetesMetricsInterceptor() { - metricRegistry = new MetricRegistry(); - - responseLatency = metricRegistry.histogram( - MetricRegistry.name(HTTP_RESPONSE_GROUP, "latency", "nanos").toLowerCase()); - requestFailedRateMeter = - metricRegistry.meter(MetricRegistry.name(HTTP_REQUEST_FAILED_GROUP).toLowerCase()); - requestRateMeter = - metricRegistry.meter(MetricRegistry.name(HTTP_REQUEST_GROUP).toLowerCase()); - responseRateMeter = - metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_GROUP).toLowerCase()); - - if (KubernetesClientMetricsGroupByResponseCodeGroupEnabled.getValue()) { - responseCodeGroupMeters.add( - metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_1XX).toLowerCase())); - responseCodeGroupMeters.add( - metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_2XX).toLowerCase())); - responseCodeGroupMeters.add( - metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_3XX).toLowerCase())); - responseCodeGroupMeters.add( - metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_4XX).toLowerCase())); - responseCodeGroupMeters.add( - metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_5XX).toLowerCase())); - } + MetricRegistry metricRegistry; + public static final String NAMESPACES = "namespaces"; + public static final String HTTP_REQUEST_GROUP = "http.request"; + public static final String HTTP_REQUEST_FAILED_GROUP = "failed"; + public static final String HTTP_RESPONSE_GROUP = "http.response"; + public static final String HTTP_RESPONSE_1XX = "1xx"; + public static final String HTTP_RESPONSE_2XX = "2xx"; + public static final String HTTP_RESPONSE_3XX = "3xx"; + public static final String HTTP_RESPONSE_4XX = "4xx"; + public static final String HTTP_RESPONSE_5XX = "5xx"; + private final Histogram responseLatency; + private final Map responseCodeMeters = + new ConcurrentHashMap<>(); + private final Map requestMethodCounter = new ConcurrentHashMap<>(); + private final List responseCodeGroupMeters = new ArrayList<>(5); + private final Meter requestFailedRateMeter; + private final Meter requestRateMeter; + private final Meter responseRateMeter; + private final Map namespacedResourceMethodMeters = new ConcurrentHashMap<>(); + + public KubernetesMetricsInterceptor() { + metricRegistry = new MetricRegistry(); + + responseLatency = metricRegistry.histogram( + MetricRegistry.name(HTTP_RESPONSE_GROUP, "latency", "nanos").toLowerCase()); + requestFailedRateMeter = + metricRegistry.meter(MetricRegistry.name(HTTP_REQUEST_FAILED_GROUP).toLowerCase()); + requestRateMeter = + metricRegistry.meter(MetricRegistry.name(HTTP_REQUEST_GROUP).toLowerCase()); + responseRateMeter = + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_GROUP).toLowerCase()); + + if (KubernetesClientMetricsGroupByResponseCodeGroupEnabled.getValue()) { + responseCodeGroupMeters.add( + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_1XX).toLowerCase())); + responseCodeGroupMeters.add( + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_2XX).toLowerCase())); + responseCodeGroupMeters.add( + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_3XX).toLowerCase())); + responseCodeGroupMeters.add( + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_4XX).toLowerCase())); + responseCodeGroupMeters.add( + metricRegistry.meter(MetricRegistry.name(HTTP_RESPONSE_5XX).toLowerCase())); } - - @NotNull - @Override - public Response intercept(@NotNull Chain chain) throws IOException { - Request request = chain.request(); - updateRequestMetrics(request); - Response response = null; - final long startTime = System.nanoTime(); - try { - response = chain.proceed(request); - return response; - } finally { - updateResponseMetrics(response, startTime); - } - } - - @Override - public String sourceName() { - return "kubernetes.client"; - } - - @Override - public MetricRegistry metricRegistry() { - return this.metricRegistry; - } - - private void updateRequestMetrics(Request request) { - this.requestRateMeter.mark(); - getMeterByRequestMethod(request.method()).mark(); - Optional> resourceNamePairOptional = - parseNamespaceScopedResource(request.url().uri().getPath()); - resourceNamePairOptional.ifPresent(pair -> { - getMeterByRequestMethodAndResourceName( - pair.getValue(), request.method()).mark(); - getMeterByRequestMethodAndResourceName( - pair.getKey() + "." + pair.getValue(), - request.method()).mark(); - } - ); + } + + @NotNull + @Override + public Response intercept(@NotNull Chain chain) throws IOException { + Request request = chain.request(); + updateRequestMetrics(request); + Response response = null; + final long startTime = System.nanoTime(); + try { + response = chain.proceed(request); + return response; + } finally { + updateResponseMetrics(response, startTime); } - - private void updateResponseMetrics(Response response, long startTimeNanos) { - final long latency = System.nanoTime() - startTimeNanos; - if (response != null) { - this.responseRateMeter.mark(); - this.responseLatency.update(latency); - getMeterByResponseCode(response.code()).mark(); - if (KubernetesClientMetricsGroupByResponseCodeGroupEnabled.getValue()) { - responseCodeGroupMeters.get(response.code() / 100 - 1).mark(); - } - } else { - this.requestFailedRateMeter.mark(); + } + + @Override + public String sourceName() { + return "kubernetes.client"; + } + + @Override + public MetricRegistry metricRegistry() { + return this.metricRegistry; + } + + private void updateRequestMetrics(Request request) { + this.requestRateMeter.mark(); + getMeterByRequestMethod(request.method()).mark(); + Optional> resourceNamePairOptional = + parseNamespaceScopedResource(request.url().uri().getPath()); + resourceNamePairOptional.ifPresent(pair -> { + getMeterByRequestMethodAndResourceName( + pair.getValue(), request.method()).mark(); + getMeterByRequestMethodAndResourceName( + pair.getKey() + "." + pair.getValue(), + request.method()).mark(); } + ); + } + + private void updateResponseMetrics(Response response, long startTimeNanos) { + final long latency = System.nanoTime() - startTimeNanos; + if (response != null) { + this.responseRateMeter.mark(); + this.responseLatency.update(latency); + getMeterByResponseCode(response.code()).mark(); + if (KubernetesClientMetricsGroupByResponseCodeGroupEnabled.getValue()) { + responseCodeGroupMeters.get(response.code() / 100 - 1).mark(); + } + } else { + this.requestFailedRateMeter.mark(); } - - private Meter getMeterByRequestMethod(String method) { - return requestMethodCounter.computeIfAbsent( - method, - key -> - metricRegistry.meter( - MetricRegistry.name(HTTP_REQUEST_GROUP, method).toLowerCase())); - } - - private Meter getMeterByRequestMethodAndResourceName(String resourceName, String method) { - String metricsName = MetricRegistry.name(resourceName, method); - return namespacedResourceMethodMeters.computeIfAbsent( - metricsName, - key -> - metricRegistry.meter(metricsName.toLowerCase())); - } - - private Meter getMeterByResponseCode(int code) { - return responseCodeMeters.computeIfAbsent(code, - key -> metricRegistry.meter( - MetricRegistry.name(HTTP_RESPONSE_GROUP, String.valueOf(code)))); - } - - public Optional> parseNamespaceScopedResource(String path) { - if (path.contains(NAMESPACES)) { - var index = path.indexOf(NAMESPACES) + NAMESPACES.length(); - String namespaceAndResources = path.substring(index + 1); - String[] parts = namespaceAndResources.split("/"); - return Optional.of(Pair.of(parts[0], parts[1])); - } else { - return Optional.empty(); - } + } + + private Meter getMeterByRequestMethod(String method) { + return requestMethodCounter.computeIfAbsent( + method, + key -> + metricRegistry.meter( + MetricRegistry.name(HTTP_REQUEST_GROUP, method).toLowerCase())); + } + + private Meter getMeterByRequestMethodAndResourceName(String resourceName, String method) { + String metricsName = MetricRegistry.name(resourceName, method); + return namespacedResourceMethodMeters.computeIfAbsent( + metricsName, + key -> + metricRegistry.meter(metricsName.toLowerCase())); + } + + private Meter getMeterByResponseCode(int code) { + return responseCodeMeters.computeIfAbsent(code, + key -> metricRegistry.meter( + MetricRegistry.name(HTTP_RESPONSE_GROUP, String.valueOf(code)))); + } + + public Optional> parseNamespaceScopedResource(String path) { + if (path.contains(NAMESPACES)) { + var index = path.indexOf(NAMESPACES) + NAMESPACES.length(); + String namespaceAndResources = path.substring(index + 1); + String[] parts = namespaceAndResources.split("/"); + return Optional.of(Pair.of(parts[0], parts[1])); + } else { + return Optional.empty(); } + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetrics.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetrics.java index eafb83aa..8fa15110 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetrics.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetrics.java @@ -18,6 +18,11 @@ package org.apache.spark.kubernetes.operator.metrics.source; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + import com.codahale.metrics.Counter; import com.codahale.metrics.Gauge; import com.codahale.metrics.Histogram; @@ -32,245 +37,241 @@ import io.javaoperatorsdk.operator.processing.event.ResourceID; import io.javaoperatorsdk.operator.processing.event.source.controller.ResourceEvent; import lombok.extern.slf4j.Slf4j; + import org.apache.spark.kubernetes.operator.BaseResource; import org.apache.spark.kubernetes.operator.SparkApplication; import org.apache.spark.metrics.source.Source; import org.apache.spark.util.Clock; import org.apache.spark.util.SystemClock; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; - import static io.javaoperatorsdk.operator.api.reconciler.Constants.CONTROLLER_NAME; @Slf4j public class OperatorJosdkMetrics implements Source, Metrics { - public static final String FINISHED = "finished"; - public static final String CLEANUP = "cleanup"; - public static final String FAILED = "failed"; - public static final String RETRIES = "retries"; - private final Map histograms = new ConcurrentHashMap<>(); - private final Map counters = new ConcurrentHashMap<>(); - private final Map gauges = new ConcurrentHashMap<>(); - private static final String RECONCILIATION = "reconciliation"; - private static final String RESOURCE = "resource"; - private static final String EVENT = "event"; - private static final String SUCCESS = "success"; - private static final String FAILURE = "failure"; - private static final String EXCEPTION = "exception"; - private static final String PREFIX = "operator.sdk"; - private static final String RECONCILIATIONS = "reconciliations"; - private static final String RECONCILIATIONS_EXECUTIONS = RECONCILIATIONS + ".executions"; - private static final String RECONCILIATIONS_QUEUE_SIZE = RECONCILIATIONS + ".queue.size"; - private static final String SIZE = "size"; + public static final String FINISHED = "finished"; + public static final String CLEANUP = "cleanup"; + public static final String FAILED = "failed"; + public static final String RETRIES = "retries"; + private final Map histograms = new ConcurrentHashMap<>(); + private final Map counters = new ConcurrentHashMap<>(); + private final Map gauges = new ConcurrentHashMap<>(); + private static final String RECONCILIATION = "reconciliation"; + private static final String RESOURCE = "resource"; + private static final String EVENT = "event"; + private static final String SUCCESS = "success"; + private static final String FAILURE = "failure"; + private static final String EXCEPTION = "exception"; + private static final String PREFIX = "operator.sdk"; + private static final String RECONCILIATIONS = "reconciliations"; + private static final String RECONCILIATIONS_EXECUTIONS = RECONCILIATIONS + ".executions"; + private static final String RECONCILIATIONS_QUEUE_SIZE = RECONCILIATIONS + ".queue.size"; + private static final String SIZE = "size"; - private final Clock clock; - private final MetricRegistry metricRegistry; + private final Clock clock; + private final MetricRegistry metricRegistry; - public OperatorJosdkMetrics() { - this.clock = new SystemClock(); - this.metricRegistry = new MetricRegistry(); - } + public OperatorJosdkMetrics() { + this.clock = new SystemClock(); + this.metricRegistry = new MetricRegistry(); + } - @Override - public String sourceName() { - return PREFIX; - } + @Override + public String sourceName() { + return PREFIX; + } - @Override - public MetricRegistry metricRegistry() { - return metricRegistry; - } + @Override + public MetricRegistry metricRegistry() { + return metricRegistry; + } - @Override - public void controllerRegistered(Controller controller) { - // no-op - log.debug("Controller has been registered"); - } + @Override + public void controllerRegistered(Controller controller) { + // no-op + log.debug("Controller has been registered"); + } - @Override - public void receivedEvent(Event event, Map metadata) { - log.debug("received event {}, metadata {}", event, metadata); - if (event instanceof ResourceEvent) { - final var action = ((ResourceEvent) event).getAction(); - final var resource = getResourceClass(metadata); - final var namespaceOptional = event.getRelatedCustomResourceID().getNamespace(); - resource.ifPresent(aClass -> getCounter(aClass, action.name().toLowerCase(), RESOURCE, - EVENT).inc()); - if (resource.isPresent() && namespaceOptional.isPresent()) { - getCounter(resource.get(), namespaceOptional.get(), action.name().toLowerCase(), - RESOURCE, EVENT).inc(); - } - } + @Override + public void receivedEvent(Event event, Map metadata) { + log.debug("received event {}, metadata {}", event, metadata); + if (event instanceof ResourceEvent) { + final var action = ((ResourceEvent) event).getAction(); + final var resource = getResourceClass(metadata); + final var namespaceOptional = event.getRelatedCustomResourceID().getNamespace(); + resource.ifPresent(aClass -> getCounter(aClass, action.name().toLowerCase(), RESOURCE, + EVENT).inc()); + if (resource.isPresent() && namespaceOptional.isPresent()) { + getCounter(resource.get(), namespaceOptional.get(), action.name().toLowerCase(), + RESOURCE, EVENT).inc(); + } } + } - @Override - public T timeControllerExecution(ControllerExecution execution) throws Exception { - log.debug("Time controller execution"); - final var name = execution.controllerName(); - final var resourceID = execution.resourceID(); - final var namespaceOptional = resourceID.getNamespace(); - final var metadata = execution.metadata(); - final var resourceClass = getResourceClass(metadata); - final var execName = execution.name(); + @Override + public T timeControllerExecution(ControllerExecution execution) throws Exception { + log.debug("Time controller execution"); + final var name = execution.controllerName(); + final var resourceID = execution.resourceID(); + final var namespaceOptional = resourceID.getNamespace(); + final var metadata = execution.metadata(); + final var resourceClass = getResourceClass(metadata); + final var execName = execution.name(); - long startTime = clock.getTimeMillis(); - try { - T result = execution.execute(); - final var successType = execution.successTypeName(result); - if (resourceClass.isPresent()) { - getHistogram(resourceClass.get(), name, execName, successType).update( - toSeconds(startTime)); - getCounter(resourceClass.get(), name, execName, SUCCESS, successType).inc(); - if (namespaceOptional.isPresent()) { - getHistogram(resourceClass.get(), namespaceOptional.get(), name, execName, - successType).update(toSeconds(startTime)); - getCounter(resourceClass.get(), namespaceOptional.get(), name, execName, - SUCCESS, successType).inc(); - } - } - return result; - } catch (Exception e) { - log.error("Controller execution failed for resource {}, metadata {}", resourceID, - metadata, e); - final var exception = e.getClass().getSimpleName(); - if (resourceClass.isPresent()) { - getHistogram(resourceClass.get(), name, execName, FAILURE).update( - toSeconds(startTime)); - getCounter(resourceClass.get(), name, execName, FAILURE, EXCEPTION, - exception).inc(); - if (namespaceOptional.isPresent()) { - getHistogram(resourceClass.get(), namespaceOptional.get(), name, execName, - FAILURE).update(toSeconds(startTime)); - getCounter(resourceClass.get(), namespaceOptional.get(), name, execName, - FAILURE, EXCEPTION, exception).inc(); - } - } - throw e; + long startTime = clock.getTimeMillis(); + try { + T result = execution.execute(); + final var successType = execution.successTypeName(result); + if (resourceClass.isPresent()) { + getHistogram(resourceClass.get(), name, execName, successType).update( + toSeconds(startTime)); + getCounter(resourceClass.get(), name, execName, SUCCESS, successType).inc(); + if (namespaceOptional.isPresent()) { + getHistogram(resourceClass.get(), namespaceOptional.get(), name, execName, + successType).update(toSeconds(startTime)); + getCounter(resourceClass.get(), namespaceOptional.get(), name, execName, + SUCCESS, successType).inc(); } - } - - @Override - public void reconcileCustomResource(HasMetadata resource, RetryInfo retryInfo, - Map metadata) { - log.debug("Reconcile custom resource {}, with retryInfo {} metadata {}", resource, - retryInfo, metadata); - if (retryInfo != null) { - final var namespace = resource.getMetadata().getNamespace(); - getCounter(resource.getClass(), RECONCILIATION, RETRIES).inc(); - getCounter(resource.getClass(), namespace, RECONCILIATION, RETRIES).inc(); + } + return result; + } catch (Exception e) { + log.error("Controller execution failed for resource {}, metadata {}", resourceID, + metadata, e); + final var exception = e.getClass().getSimpleName(); + if (resourceClass.isPresent()) { + getHistogram(resourceClass.get(), name, execName, FAILURE).update( + toSeconds(startTime)); + getCounter(resourceClass.get(), name, execName, FAILURE, EXCEPTION, + exception).inc(); + if (namespaceOptional.isPresent()) { + getHistogram(resourceClass.get(), namespaceOptional.get(), name, execName, + FAILURE).update(toSeconds(startTime)); + getCounter(resourceClass.get(), namespaceOptional.get(), name, execName, + FAILURE, EXCEPTION, exception).inc(); } - getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), - RECONCILIATIONS_QUEUE_SIZE).inc(); + } + throw e; } + } - @Override - public void failedReconciliation(HasMetadata resource, Exception exception, - Map metadata) { - log.error("Failed reconciliation for resource {} with metadata {}", resource, exception, - exception); - getCounter(resource.getClass(), RECONCILIATION, FAILED).inc(); - getCounter(resource.getClass(), resource.getMetadata().getNamespace(), RECONCILIATION, - FAILED).inc(); + @Override + public void reconcileCustomResource(HasMetadata resource, RetryInfo retryInfo, + Map metadata) { + log.debug("Reconcile custom resource {}, with retryInfo {} metadata {}", resource, + retryInfo, metadata); + if (retryInfo != null) { + final var namespace = resource.getMetadata().getNamespace(); + getCounter(resource.getClass(), RECONCILIATION, RETRIES).inc(); + getCounter(resource.getClass(), namespace, RECONCILIATION, RETRIES).inc(); } + getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_QUEUE_SIZE).inc(); + } - @Override - public void finishedReconciliation(HasMetadata resource, Map metadata) { - log.debug("Finished reconciliation for resource {} with metadata {}", resource, metadata); - getCounter(resource.getClass(), RECONCILIATION, FINISHED).inc(); - getCounter(resource.getClass(), resource.getMetadata().getNamespace(), RECONCILIATION, - FINISHED); - } + @Override + public void failedReconciliation(HasMetadata resource, Exception exception, + Map metadata) { + log.error("Failed reconciliation for resource {} with metadata {}", resource, exception, + exception); + getCounter(resource.getClass(), RECONCILIATION, FAILED).inc(); + getCounter(resource.getClass(), resource.getMetadata().getNamespace(), RECONCILIATION, + FAILED).inc(); + } - @Override - public void cleanupDoneFor(ResourceID resourceID, Map metadata) { - log.debug("Cleanup Done for resource {} with metadata {}", resourceID, metadata); - getCounter(resourceID.getClass(), RECONCILIATION, CLEANUP).inc(); - resourceID.getNamespace().ifPresent( - ns -> getCounter(resourceID.getClass(), ns, RECONCILIATION, CLEANUP).inc()); - } + @Override + public void finishedReconciliation(HasMetadata resource, Map metadata) { + log.debug("Finished reconciliation for resource {} with metadata {}", resource, metadata); + getCounter(resource.getClass(), RECONCILIATION, FINISHED).inc(); + getCounter(resource.getClass(), resource.getMetadata().getNamespace(), RECONCILIATION, + FINISHED); + } - @Override - public > T monitorSizeOf(T map, String name) { - log.debug("Monitor size for {}", name); - var gauge = new Gauge<>() { - @Override - public Integer getValue() { - return map.size(); - } - }; - gauges.put(MetricRegistry.name(name, SIZE), gauge); - return map; - } + @Override + public void cleanupDoneFor(ResourceID resourceID, Map metadata) { + log.debug("Cleanup Done for resource {} with metadata {}", resourceID, metadata); + getCounter(resourceID.getClass(), RECONCILIATION, CLEANUP).inc(); + resourceID.getNamespace().ifPresent( + ns -> getCounter(resourceID.getClass(), ns, RECONCILIATION, CLEANUP).inc()); + } - @Override - public void reconciliationExecutionStarted(HasMetadata resource, Map metadata) { - log.debug("Reconciliation execution started"); - var namespace = resource.getMetadata().getNamespace(); - getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), - RECONCILIATIONS_EXECUTIONS).inc(); - getCounter(resource.getClass(), namespace, (String) metadata.get(CONTROLLER_NAME), - RECONCILIATIONS_EXECUTIONS).inc(); - } + @Override + public > T monitorSizeOf(T map, String name) { + log.debug("Monitor size for {}", name); + var gauge = new Gauge<>() { + @Override + public Integer getValue() { + return map.size(); + } + }; + gauges.put(MetricRegistry.name(name, SIZE), gauge); + return map; + } - @Override - public void reconciliationExecutionFinished(HasMetadata resource, - Map metadata) { - log.debug("Reconciliation execution finished"); - var namespace = resource.getMetadata().getNamespace(); - getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), - RECONCILIATIONS_EXECUTIONS).dec(); - getCounter(resource.getClass(), namespace, (String) metadata.get(CONTROLLER_NAME), - RECONCILIATIONS_EXECUTIONS).dec(); - getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), - RECONCILIATIONS_QUEUE_SIZE).dec(); - } + @Override + public void reconciliationExecutionStarted(HasMetadata resource, Map metadata) { + log.debug("Reconciliation execution started"); + var namespace = resource.getMetadata().getNamespace(); + getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_EXECUTIONS).inc(); + getCounter(resource.getClass(), namespace, (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_EXECUTIONS).inc(); + } - private long toSeconds(long startTimeInMilliseconds) { - return TimeUnit.MILLISECONDS.toSeconds(clock.getTimeMillis() - startTimeInMilliseconds); - } + @Override + public void reconciliationExecutionFinished(HasMetadata resource, + Map metadata) { + log.debug("Reconciliation execution finished"); + var namespace = resource.getMetadata().getNamespace(); + getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_EXECUTIONS).dec(); + getCounter(resource.getClass(), namespace, (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_EXECUTIONS).dec(); + getCounter(resource.getClass(), (String) metadata.get(CONTROLLER_NAME), + RECONCILIATIONS_QUEUE_SIZE).dec(); + } - private Histogram getHistogram(Class kclass, String... names) { - String name = MetricRegistry.name(kclass.getSimpleName(), names).toLowerCase(); - Histogram histogram; - if (!histograms.containsKey(name)) { - histogram = metricRegistry.histogram(name); - histograms.put(name, histogram); - } else { - histogram = histograms.get(name); - } - return histogram; + private long toSeconds(long startTimeInMilliseconds) { + return TimeUnit.MILLISECONDS.toSeconds(clock.getTimeMillis() - startTimeInMilliseconds); + } + + private Histogram getHistogram(Class kclass, String... names) { + String name = MetricRegistry.name(kclass.getSimpleName(), names).toLowerCase(); + Histogram histogram; + if (!histograms.containsKey(name)) { + histogram = metricRegistry.histogram(name); + histograms.put(name, histogram); + } else { + histogram = histograms.get(name); } + return histogram; + } - private Counter getCounter(Class klass, String... names) { - String name = MetricRegistry.name(klass.getSimpleName(), names).toLowerCase(); - Counter counter; - if (!counters.containsKey(name)) { - counter = metricRegistry.counter(name); - counters.put(name, counter); - } else { - counter = counters.get(name); - } - return counter; + private Counter getCounter(Class klass, String... names) { + String name = MetricRegistry.name(klass.getSimpleName(), names).toLowerCase(); + Counter counter; + if (!counters.containsKey(name)) { + counter = metricRegistry.counter(name); + counters.put(name, counter); + } else { + counter = counters.get(name); } + return counter; + } - private Optional>> getResourceClass( - Map metadata) { - var resourceGvk = (GroupVersionKind) metadata.get(Constants.RESOURCE_GVK_KEY); + private Optional>> getResourceClass( + Map metadata) { + var resourceGvk = (GroupVersionKind) metadata.get(Constants.RESOURCE_GVK_KEY); - if (resourceGvk == null) { - return Optional.empty(); - } + if (resourceGvk == null) { + return Optional.empty(); + } - Class> resourceClass; + Class> resourceClass; - if (resourceGvk.getKind().equals(SparkApplication.class.getSimpleName())) { - resourceClass = SparkApplication.class; - } else { - return Optional.empty(); - } - return Optional.of(resourceClass); + if (resourceGvk.getKind().equals(SparkApplication.class.getSimpleName())) { + resourceClass = SparkApplication.class; + } else { + return Optional.empty(); } + return Optional.of(resourceClass); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java index c5503d81..a175c490 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java @@ -17,6 +17,11 @@ package org.apache.spark.kubernetes.operator.probe; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import io.javaoperatorsdk.operator.Operator; @@ -25,12 +30,8 @@ import io.javaoperatorsdk.operator.health.Status; import lombok.Getter; import lombok.extern.slf4j.Slf4j; -import org.apache.spark.kubernetes.operator.health.SentinelManager; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; +import org.apache.spark.kubernetes.operator.health.SentinelManager; import static org.apache.spark.kubernetes.operator.utils.ProbeUtil.areOperatorsStarted; import static org.apache.spark.kubernetes.operator.utils.ProbeUtil.sendMessage; @@ -38,76 +39,76 @@ @Getter @Slf4j public class HealthProbe implements HttpHandler { - private final List operators; - private final List> sentinelManagers = new ArrayList<>(); + private final List operators; + private final List> sentinelManagers = new ArrayList<>(); - public HealthProbe(List operators) { - this.operators = operators; - } - - public void registerSentinelResourceManager(SentinelManager sentinelManager) { - sentinelManagers.add(sentinelManager); - } + public HealthProbe(List operators) { + this.operators = operators; + } - public boolean isHealthy() { - var operatorsAreReady = areOperatorsStarted(operators); - if (operatorsAreReady.isEmpty() || !operatorsAreReady.get()) { - return false; - } + public void registerSentinelResourceManager(SentinelManager sentinelManager) { + sentinelManagers.add(sentinelManager); + } - var runtimeInfosAreHealthy = operators.stream().map(operator -> - checkInformersHealth(operator.getRuntimeInfo()) - ).reduce((a, b) -> a && b); + public boolean isHealthy() { + var operatorsAreReady = areOperatorsStarted(operators); + if (operatorsAreReady.isEmpty() || !operatorsAreReady.get()) { + return false; + } - if (runtimeInfosAreHealthy.isEmpty() || !runtimeInfosAreHealthy.get()) { - return false; - } + var runtimeInfosAreHealthy = operators.stream().map(operator -> + checkInformersHealth(operator.getRuntimeInfo()) + ).reduce((a, b) -> a && b); - for (SentinelManager sentinelManager : sentinelManagers) { - if (!sentinelManager.allSentinelsAreHealthy()) { - log.error("One sentinel manager {} reported an unhealthy condition.", - sentinelManager); - return false; - } - } + if (runtimeInfosAreHealthy.isEmpty() || !runtimeInfosAreHealthy.get()) { + return false; + } - return true; + for (SentinelManager sentinelManager : sentinelManagers) { + if (!sentinelManager.allSentinelsAreHealthy()) { + log.error("One sentinel manager {} reported an unhealthy condition.", + sentinelManager); + return false; + } } - @Override - public void handle(HttpExchange exchange) throws IOException { - if (isHealthy()) { - sendMessage(exchange, 200, "healthy"); - } else { - sendMessage(exchange, 500, "unhealthy"); - } + return true; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + if (isHealthy()) { + sendMessage(exchange, 200, "healthy"); + } else { + sendMessage(exchange, 500, "unhealthy"); } + } - private boolean checkInformersHealth(RuntimeInfo operatorRuntimeInfo) { - log.info("Checking informer health"); - List informersHealthList = new ArrayList<>(); - for (var controllerEntry : - operatorRuntimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator() - .entrySet()) { - for (var eventSourceEntry : controllerEntry.getValue().entrySet()) { - Map informers = - eventSourceEntry.getValue().informerHealthIndicators(); - for (var informerEntry : informers.entrySet()) { - if (informerEntry.getValue().getStatus() == Status.HEALTHY) { - informersHealthList.add(true); - } else { - if (log.isErrorEnabled()) { - log.error( - "Controller: {}, Event Source: {}, Informer: {} is not in a " + - "healthy state", - controllerEntry.getKey(), eventSourceEntry.getKey(), - informerEntry.getKey()); - } - informersHealthList.add(false); - } - } + private boolean checkInformersHealth(RuntimeInfo operatorRuntimeInfo) { + log.info("Checking informer health"); + List informersHealthList = new ArrayList<>(); + for (var controllerEntry : + operatorRuntimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator() + .entrySet()) { + for (var eventSourceEntry : controllerEntry.getValue().entrySet()) { + Map informers = + eventSourceEntry.getValue().informerHealthIndicators(); + for (var informerEntry : informers.entrySet()) { + if (informerEntry.getValue().getStatus() == Status.HEALTHY) { + informersHealthList.add(true); + } else { + if (log.isErrorEnabled()) { + log.error( + "Controller: {}, Event Source: {}, Informer: {} is not in a " + + "healthy state", + controllerEntry.getKey(), eventSourceEntry.getKey(), + informerEntry.getKey()); } + informersHealthList.add(false); + } } - return informersHealthList.stream().reduce((a, b) -> a && b).orElse(true); + } } + return informersHealthList.stream().reduce((a, b) -> a && b).orElse(true); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java index 7e417728..8eb48856 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ProbeService.java @@ -17,43 +17,44 @@ package org.apache.spark.kubernetes.operator.probe; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.List; + import com.sun.net.httpserver.HttpServer; import io.javaoperatorsdk.operator.Operator; import lombok.extern.slf4j.Slf4j; -import org.apache.spark.kubernetes.operator.health.SentinelManager; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.List; +import org.apache.spark.kubernetes.operator.health.SentinelManager; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorProbePort; @Slf4j public class ProbeService { - public static final String HEALTHZ = "/healthz"; - public static final String READYZ = "/readyz"; - HttpServer server; - - public ProbeService(List operators, SentinelManager sentinelManager) { - HealthProbe healthProbe = new HealthProbe(operators); - healthProbe.registerSentinelResourceManager(sentinelManager); - try { - server = HttpServer.create(new InetSocketAddress(OperatorProbePort.getValue()), 0); - } catch (IOException e) { - throw new RuntimeException("Failed to create Probe Service Server", e); - } - server.createContext(READYZ, new ReadinessProbe(operators)); - server.createContext(HEALTHZ, healthProbe); - server.setExecutor(null); - } - - public void start() { - log.info("Probe service started"); - server.start(); - } - - public void stop() { - log.info("Probe service stopped"); - server.stop(0); + public static final String HEALTHZ = "/healthz"; + public static final String READYZ = "/readyz"; + HttpServer server; + + public ProbeService(List operators, SentinelManager sentinelManager) { + HealthProbe healthProbe = new HealthProbe(operators); + healthProbe.registerSentinelResourceManager(sentinelManager); + try { + server = HttpServer.create(new InetSocketAddress(OperatorProbePort.getValue()), 0); + } catch (IOException e) { + throw new RuntimeException("Failed to create Probe Service Server", e); } + server.createContext(READYZ, new ReadinessProbe(operators)); + server.createContext(HEALTHZ, healthProbe); + server.setExecutor(null); + } + + public void start() { + log.info("Probe service started"); + server.start(); + } + + public void stop() { + log.info("Probe service stopped"); + server.stop(0); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbe.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbe.java index bd810a2f..6ea7be8e 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbe.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbe.java @@ -17,40 +17,40 @@ package org.apache.spark.kubernetes.operator.probe; +import java.io.IOException; +import java.util.List; + import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import io.javaoperatorsdk.operator.Operator; import lombok.extern.slf4j.Slf4j; -import java.io.IOException; -import java.util.List; - import static org.apache.spark.kubernetes.operator.utils.ProbeUtil.areOperatorsStarted; import static org.apache.spark.kubernetes.operator.utils.ProbeUtil.sendMessage; @Slf4j public class ReadinessProbe implements HttpHandler { - private final List operators; + private final List operators; - public ReadinessProbe(List operators) { - this.operators = operators; - } - - @Override - public void handle(HttpExchange httpExchange) throws IOException { - var operatorsAreReady = areOperatorsStarted(operators); - if (operatorsAreReady.isEmpty() || !operatorsAreReady.get()) { - sendMessage(httpExchange, 400, "spark operators are not ready yet"); - } - - if (!passRbacCheck()) { - sendMessage(httpExchange, 403, "required rbac test failed, operators are not ready"); - } + public ReadinessProbe(List operators) { + this.operators = operators; + } - sendMessage(httpExchange, 200, "started"); + @Override + public void handle(HttpExchange httpExchange) throws IOException { + var operatorsAreReady = areOperatorsStarted(operators); + if (operatorsAreReady.isEmpty() || !operatorsAreReady.get()) { + sendMessage(httpExchange, 400, "spark operators are not ready yet"); } - public boolean passRbacCheck() { - return true; + if (!passRbacCheck()) { + sendMessage(httpExchange, 403, "required rbac test failed, operators are not ready"); } + + sendMessage(httpExchange, 200, "started"); + } + + public boolean passRbacCheck() { + return true; + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java index 7fa9df37..b6716d16 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java @@ -18,49 +18,49 @@ package org.apache.spark.kubernetes.operator.reconciler; -import lombok.Data; - import java.time.Duration; +import lombok.Data; + import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.AppReconcileIntervalSeconds; /** * Represents the progress of a reconcile request * - completed : is set to true if there's no more actions expected in the same reconciliation * - requeue : describes whether the mentioned resource need to be reconciled again - and if so, - * the frequency + * the frequency */ @Data public class ReconcileProgress { - private boolean completed; - boolean requeue; - private Duration requeueAfterDuration; + private boolean completed; + boolean requeue; + private Duration requeueAfterDuration; - private ReconcileProgress(boolean completed, boolean requeue, Duration requeueAfterDuration) { - this.completed = completed; - this.requeue = requeue; - this.requeueAfterDuration = requeueAfterDuration; - } + private ReconcileProgress(boolean completed, boolean requeue, Duration requeueAfterDuration) { + this.completed = completed; + this.requeue = requeue; + this.requeueAfterDuration = requeueAfterDuration; + } - public static ReconcileProgress proceed() { - return new ReconcileProgress(false, true, - Duration.ofSeconds(AppReconcileIntervalSeconds.getValue())); - } + public static ReconcileProgress proceed() { + return new ReconcileProgress(false, true, + Duration.ofSeconds(AppReconcileIntervalSeconds.getValue())); + } - public static ReconcileProgress completeAndDefaultRequeue() { - return new ReconcileProgress(true, true, - Duration.ofSeconds(AppReconcileIntervalSeconds.getValue())); - } + public static ReconcileProgress completeAndDefaultRequeue() { + return new ReconcileProgress(true, true, + Duration.ofSeconds(AppReconcileIntervalSeconds.getValue())); + } - public static ReconcileProgress completeAndRequeueAfter(Duration requeueAfterDuration) { - return new ReconcileProgress(true, true, requeueAfterDuration); - } + public static ReconcileProgress completeAndRequeueAfter(Duration requeueAfterDuration) { + return new ReconcileProgress(true, true, requeueAfterDuration); + } - public static ReconcileProgress completeAndImmediateRequeue() { - return new ReconcileProgress(true, true, Duration.ZERO); - } + public static ReconcileProgress completeAndImmediateRequeue() { + return new ReconcileProgress(true, true, Duration.ZERO); + } - public static ReconcileProgress completeAndNoRequeue() { - return new ReconcileProgress(true, false, Duration.ZERO); - } + public static ReconcileProgress completeAndNoRequeue() { + return new ReconcileProgress(true, false, Duration.ZERO); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java index 7a927cb6..6ea9eece 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java @@ -18,15 +18,6 @@ package org.apache.spark.kubernetes.operator.reconciler; -import io.fabric8.kubernetes.api.model.PodTemplateSpec; -import io.fabric8.kubernetes.client.KubernetesClient; -import lombok.extern.slf4j.Slf4j; -import org.apache.spark.kubernetes.operator.ApplicationClientWorker; -import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; -import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.decorators.DriverDecorator; -import org.apache.spark.kubernetes.operator.utils.ModelUtils; - import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -37,6 +28,16 @@ import java.util.Map; import java.util.Optional; +import io.fabric8.kubernetes.api.model.PodTemplateSpec; +import io.fabric8.kubernetes.client.KubernetesClient; +import lombok.extern.slf4j.Slf4j; + +import org.apache.spark.kubernetes.operator.ApplicationClientWorker; +import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.decorators.DriverDecorator; +import org.apache.spark.kubernetes.operator.utils.ModelUtils; + import static org.apache.spark.kubernetes.operator.utils.ModelUtils.DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY; import static org.apache.spark.kubernetes.operator.utils.ModelUtils.EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY; import static org.apache.spark.kubernetes.operator.utils.ModelUtils.overrideDriverTemplate; @@ -44,139 +45,139 @@ @Slf4j public class SparkApplicationReconcileUtils { - public static boolean enableForceDelete(SparkApplication app) { - long timeoutThreshold = app.getSpec().getApplicationTolerations() - .getApplicationTimeoutConfig().getForceTerminationGracePeriodMillis(); - Instant lastTransitionTime = - Instant.parse(app.getStatus().getCurrentState().getLastTransitionTime()); - return lastTransitionTime.plusMillis(timeoutThreshold).isBefore(Instant.now()); - } + public static boolean enableForceDelete(SparkApplication app) { + long timeoutThreshold = app.getSpec().getApplicationTolerations() + .getApplicationTimeoutConfig().getForceTerminationGracePeriodMillis(); + Instant lastTransitionTime = + Instant.parse(app.getStatus().getCurrentState().getLastTransitionTime()); + return lastTransitionTime.plusMillis(timeoutThreshold).isBefore(Instant.now()); + } - public static ApplicationResourceSpec buildResourceSpec(final SparkApplication app, - final KubernetesClient client) { - Map confOverrides = overrideMetadataForSecondaryResources(app); - ApplicationResourceSpec resourceSpec = - ApplicationClientWorker.getResourceSpec(app, client, confOverrides); - cleanUpTempResourcesForApp(app, confOverrides); - DriverDecorator decorator = new DriverDecorator(app); - decorator.decorate(resourceSpec.getConfiguredPod()); - return resourceSpec; - } + public static ApplicationResourceSpec buildResourceSpec(final SparkApplication app, + final KubernetesClient client) { + Map confOverrides = overrideMetadataForSecondaryResources(app); + ApplicationResourceSpec resourceSpec = + ApplicationClientWorker.getResourceSpec(app, client, confOverrides); + cleanUpTempResourcesForApp(app, confOverrides); + DriverDecorator decorator = new DriverDecorator(app); + decorator.decorate(resourceSpec.getConfiguredPod()); + return resourceSpec; + } - private static Map overrideMetadataForSecondaryResources( - final SparkApplication app) { - Map confOverrides = new HashMap<>(); - SparkReconcilerUtils.sparkAppResourceLabels(app).forEach((k, v) -> { - confOverrides.put("spark.kubernetes.driver.label." + k, v); - confOverrides.put("spark.kubernetes.driver.service.label." + k, v); - confOverrides.put("spark.kubernetes.executor.label." + k, v); - }); - confOverrides.put("spark.kubernetes.namespace", app.getMetadata().getNamespace()); - if (app.getSpec().getSparkConf().containsKey("spark.app.name")) { - confOverrides.put("spark.app.name", app.getMetadata().getName()); - } - // FIXME: avoid this file flushing - confOverrides.putAll(getOrCreateLocalFileForDriverSpec(app, confOverrides)); - confOverrides.putAll(getOrCreateLocalFileForExecutorSpec(app, confOverrides)); - return confOverrides; + private static Map overrideMetadataForSecondaryResources( + final SparkApplication app) { + Map confOverrides = new HashMap<>(); + SparkReconcilerUtils.sparkAppResourceLabels(app).forEach((k, v) -> { + confOverrides.put("spark.kubernetes.driver.label." + k, v); + confOverrides.put("spark.kubernetes.driver.service.label." + k, v); + confOverrides.put("spark.kubernetes.executor.label." + k, v); + }); + confOverrides.put("spark.kubernetes.namespace", app.getMetadata().getNamespace()); + if (app.getSpec().getSparkConf().containsKey("spark.app.name")) { + confOverrides.put("spark.app.name", app.getMetadata().getName()); } + // FIXME: avoid this file flushing + confOverrides.putAll(getOrCreateLocalFileForDriverSpec(app, confOverrides)); + confOverrides.putAll(getOrCreateLocalFileForExecutorSpec(app, confOverrides)); + return confOverrides; + } - private static void cleanUpTempResourcesForApp(final SparkApplication app, - Map confOverrides) { - if (overrideDriverTemplate(app.getSpec())) { - deleteLocalFileFromPathKey(confOverrides, DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY); - } - if (overrideExecutorTemplate(app.getSpec())) { - deleteLocalFileFromPathKey(confOverrides, EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY); - } + private static void cleanUpTempResourcesForApp(final SparkApplication app, + Map confOverrides) { + if (overrideDriverTemplate(app.getSpec())) { + deleteLocalFileFromPathKey(confOverrides, DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY); + } + if (overrideExecutorTemplate(app.getSpec())) { + deleteLocalFileFromPathKey(confOverrides, EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY); } + } - private static Optional getLocalFileFromPathKey(Map confOverrides, - String pathKey) { - if (confOverrides.containsKey(pathKey)) { - String filePath = confOverrides.get(pathKey); - if (filePath.startsWith("local") || filePath.startsWith("file") || - filePath.startsWith("/")) { - return Optional.of(new File(filePath)); - } - } - return Optional.empty(); + private static Optional getLocalFileFromPathKey(Map confOverrides, + String pathKey) { + if (confOverrides.containsKey(pathKey)) { + String filePath = confOverrides.get(pathKey); + if (filePath.startsWith("local") || filePath.startsWith("file") || + filePath.startsWith("/")) { + return Optional.of(new File(filePath)); + } } + return Optional.empty(); + } - private static void deleteLocalFileFromPathKey(Map confOverrides, - String pathKey) { - Optional localFile = Optional.empty(); - boolean deleted = false; - try { - localFile = getLocalFileFromPathKey(confOverrides, pathKey); - if (localFile.isPresent() && localFile.get().exists() && localFile.get().isFile()) { - deleted = localFile.get().delete(); - } else { - log.warn("Local temp file not found at {}", pathKey); - } - } catch (Throwable t) { - log.error("Failed to delete temp file. Attempting delete upon exit.", t); - } finally { - if (!deleted && localFile.isPresent() && localFile.get().exists()) { - localFile.get().deleteOnExit(); - } - } + private static void deleteLocalFileFromPathKey(Map confOverrides, + String pathKey) { + Optional localFile = Optional.empty(); + boolean deleted = false; + try { + localFile = getLocalFileFromPathKey(confOverrides, pathKey); + if (localFile.isPresent() && localFile.get().exists() && localFile.get().isFile()) { + deleted = localFile.get().delete(); + } else { + log.warn("Local temp file not found at {}", pathKey); + } + } catch (Throwable t) { + log.error("Failed to delete temp file. Attempting delete upon exit.", t); + } finally { + if (!deleted && localFile.isPresent() && localFile.get().exists()) { + localFile.get().deleteOnExit(); + } } + } - private static Map getOrCreateLocalFileForDriverSpec( - final SparkApplication app, - final Map confOverrides) { - if (overrideDriverTemplate(app.getSpec())) { - Optional localFile = - getLocalFileFromPathKey(confOverrides, DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY); - if (localFile.isEmpty() || !localFile.get().exists() || !localFile.get().isFile()) { - String filePath = createLocalFileForPodTemplateSpec( - app.getSpec().getDriverSpec().getPodTemplateSpec(), - app.getMetadata().getUid() + "-driver-"); - return Collections.singletonMap(DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY, filePath); - } - } - return Collections.emptyMap(); + private static Map getOrCreateLocalFileForDriverSpec( + final SparkApplication app, + final Map confOverrides) { + if (overrideDriverTemplate(app.getSpec())) { + Optional localFile = + getLocalFileFromPathKey(confOverrides, DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY); + if (localFile.isEmpty() || !localFile.get().exists() || !localFile.get().isFile()) { + String filePath = createLocalFileForPodTemplateSpec( + app.getSpec().getDriverSpec().getPodTemplateSpec(), + app.getMetadata().getUid() + "-driver-"); + return Collections.singletonMap(DRIVER_SPARK_TEMPLATE_FILE_PROP_KEY, filePath); + } } + return Collections.emptyMap(); + } - private static Map getOrCreateLocalFileForExecutorSpec( - final SparkApplication app, - final Map confOverrides) { - if (overrideExecutorTemplate(app.getSpec())) { - Optional localFile = - getLocalFileFromPathKey(confOverrides, EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY); - if (localFile.isEmpty() || !localFile.get().exists() || !localFile.get().isFile()) { - String filePath = createLocalFileForPodTemplateSpec( - app.getSpec().getExecutorSpec().getPodTemplateSpec(), - app.getMetadata().getUid() + "-executor-"); - return Collections.singletonMap(EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY, filePath); - } - } - return Collections.emptyMap(); + private static Map getOrCreateLocalFileForExecutorSpec( + final SparkApplication app, + final Map confOverrides) { + if (overrideExecutorTemplate(app.getSpec())) { + Optional localFile = + getLocalFileFromPathKey(confOverrides, EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY); + if (localFile.isEmpty() || !localFile.get().exists() || !localFile.get().isFile()) { + String filePath = createLocalFileForPodTemplateSpec( + app.getSpec().getExecutorSpec().getPodTemplateSpec(), + app.getMetadata().getUid() + "-executor-"); + return Collections.singletonMap(EXECUTOR_SPARK_TEMPLATE_FILE_PROP_KEY, filePath); + } } + return Collections.emptyMap(); + } - /** - * Flush driver pod template spec to a local file - * - * @return temp file path - */ - private static String createLocalFileForPodTemplateSpec(final PodTemplateSpec podTemplateSpec, - final String tempFilePrefix) { - try { - File tmpFile = File.createTempFile(tempFilePrefix, ".json"); - FileOutputStream fileStream = new FileOutputStream(tmpFile); - OutputStreamWriter writer = new OutputStreamWriter(fileStream, "UTF-8"); - writer.write( - ModelUtils.asJsonString(ModelUtils.getPodFromTemplateSpec(podTemplateSpec))); - writer.close(); - String path = tmpFile.getAbsolutePath(); - if (log.isDebugEnabled()) { - log.debug("Temp file wrote to {}", tmpFile.getAbsolutePath()); - } - return path; - } catch (IOException e) { - throw new RuntimeException(e); - } + /** + * Flush driver pod template spec to a local file + * + * @return temp file path + */ + private static String createLocalFileForPodTemplateSpec(final PodTemplateSpec podTemplateSpec, + final String tempFilePrefix) { + try { + File tmpFile = File.createTempFile(tempFilePrefix, ".json"); + FileOutputStream fileStream = new FileOutputStream(tmpFile); + OutputStreamWriter writer = new OutputStreamWriter(fileStream, "UTF-8"); + writer.write( + ModelUtils.asJsonString(ModelUtils.getPodFromTemplateSpec(podTemplateSpec))); + writer.close(); + String path = tmpFile.getAbsolutePath(); + if (log.isDebugEnabled()) { + log.debug("Temp file wrote to {}", tmpFile.getAbsolutePath()); + } + return path; + } catch (IOException e) { + throw new RuntimeException(e); } + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java index 3e7193ec..67562790 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java @@ -18,6 +18,11 @@ package org.apache.spark.kubernetes.operator.reconciler; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + import io.fabric8.kubernetes.api.model.Pod; import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration; import io.javaoperatorsdk.operator.api.reconciler.Cleaner; @@ -35,6 +40,7 @@ import io.javaoperatorsdk.operator.processing.event.source.informer.Mappers; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; + import org.apache.spark.kubernetes.operator.Constants; import org.apache.spark.kubernetes.operator.SparkApplication; import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; @@ -44,9 +50,9 @@ import org.apache.spark.kubernetes.operator.reconciler.observers.AppDriverStartObserver; import org.apache.spark.kubernetes.operator.reconciler.observers.AppDriverTimeoutObserver; import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppCleanUpStep; -import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppResourceObserveStep; import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppInitStep; import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppReconcileStep; +import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppResourceObserveStep; import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppRunningStep; import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppTerminatedStep; import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppValidateStep; @@ -55,11 +61,6 @@ import org.apache.spark.kubernetes.operator.utils.LoggingUtils; import org.apache.spark.kubernetes.operator.utils.StatusRecorder; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndDefaultRequeue; import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.commonResourceLabelsStr; @@ -72,154 +73,155 @@ @Slf4j @RequiredArgsConstructor public class SparkApplicationReconciler - implements Reconciler, - ErrorStatusHandler, - EventSourceInitializer, - Cleaner { - private final StatusRecorder statusRecorder; - private final SentinelManager sentinelManager; + implements Reconciler, + ErrorStatusHandler, + EventSourceInitializer, + Cleaner { + private final StatusRecorder statusRecorder; + private final SentinelManager sentinelManager; - @Override - public UpdateControl reconcile(SparkApplication sparkApplication, - Context context) - throws Exception { - LoggingUtils.TrackedMDC trackedMDC = new LoggingUtils.TrackedMDC(); - try { - trackedMDC.set(sparkApplication); - if (sentinelManager.handleSentinelResourceReconciliation(sparkApplication, - context.getClient())) { - return UpdateControl.noUpdate(); - } - log.debug("Start reconciliation."); - statusRecorder.updateStatusFromCache(sparkApplication); - SparkApplicationContext ctx = new SparkApplicationContext(sparkApplication, context); - List reconcileSteps = getReconcileSteps(sparkApplication); - for (AppReconcileStep step : reconcileSteps) { - ReconcileProgress progress = step.reconcile(ctx, statusRecorder); - if (progress.isCompleted()) { - return SparkReconcilerUtils.toUpdateControl(sparkApplication, progress); - } - } - return SparkReconcilerUtils.toUpdateControl(sparkApplication, - completeAndDefaultRequeue()); - - } finally { - log.debug("Reconciliation completed."); - trackedMDC.reset(); + @Override + public UpdateControl reconcile(SparkApplication sparkApplication, + Context context) + throws Exception { + LoggingUtils.TrackedMDC trackedMDC = new LoggingUtils.TrackedMDC(); + try { + trackedMDC.set(sparkApplication); + if (sentinelManager.handleSentinelResourceReconciliation(sparkApplication, + context.getClient())) { + return UpdateControl.noUpdate(); + } + log.debug("Start reconciliation."); + statusRecorder.updateStatusFromCache(sparkApplication); + SparkApplicationContext ctx = new SparkApplicationContext(sparkApplication, context); + List reconcileSteps = getReconcileSteps(sparkApplication); + for (AppReconcileStep step : reconcileSteps) { + ReconcileProgress progress = step.reconcile(ctx, statusRecorder); + if (progress.isCompleted()) { + return SparkReconcilerUtils.toUpdateControl(sparkApplication, progress); } + } + return SparkReconcilerUtils.toUpdateControl(sparkApplication, + completeAndDefaultRequeue()); + + } finally { + log.debug("Reconciliation completed."); + trackedMDC.reset(); } + } - @Override - public ErrorStatusUpdateControl updateErrorStatus( - SparkApplication sparkApplication, - Context context, - Exception e) { - LoggingUtils.TrackedMDC trackedMDC = new LoggingUtils.TrackedMDC(); - try { - trackedMDC.set(sparkApplication); - context.getRetryInfo().ifPresent(retryInfo -> { - if (log.isErrorEnabled()) { - log.error("Failed attempt: {}, last attempt: {}", retryInfo.getAttemptCount(), - retryInfo.isLastAttempt()); - } - }); - return ErrorStatusUpdateControl.noStatusUpdate(); - } finally { - trackedMDC.reset(); + @Override + public ErrorStatusUpdateControl updateErrorStatus( + SparkApplication sparkApplication, + Context context, + Exception e) { + LoggingUtils.TrackedMDC trackedMDC = new LoggingUtils.TrackedMDC(); + try { + trackedMDC.set(sparkApplication); + context.getRetryInfo().ifPresent(retryInfo -> { + if (log.isErrorEnabled()) { + log.error("Failed attempt: {}, last attempt: {}", retryInfo.getAttemptCount(), + retryInfo.isLastAttempt()); } + }); + return ErrorStatusUpdateControl.noStatusUpdate(); + } finally { + trackedMDC.reset(); } + } - @Override - public Map prepareEventSources( - EventSourceContext context) { - var podEventSource = - new InformerEventSource<>(InformerConfiguration.from(Pod.class, context) - .withSecondaryToPrimaryMapper( - Mappers.fromLabel(Constants.LABEL_SPARK_APPLICATION_NAME)) - .withLabelSelector(commonResourceLabelsStr()) - .build(), context); - return EventSourceInitializer.nameEventSources(podEventSource); - } + @Override + public Map prepareEventSources( + EventSourceContext context) { + var podEventSource = + new InformerEventSource<>(InformerConfiguration.from(Pod.class, context) + .withSecondaryToPrimaryMapper( + Mappers.fromLabel(Constants.LABEL_SPARK_APPLICATION_NAME)) + .withLabelSelector(commonResourceLabelsStr()) + .build(), context); + return EventSourceInitializer.nameEventSources(podEventSource); + } - protected List getReconcileSteps(final SparkApplication app) { - List steps = new ArrayList<>(); - steps.add(new AppValidateStep()); - steps.add(new AppTerminatedStep()); - switch (app.getStatus().getCurrentState().getCurrentStateSummary()) { - case SUBMITTED: - case SCHEDULED_TO_RESTART: - steps.add(new AppInitStep()); - break; - case DRIVER_REQUESTED: - case DRIVER_STARTED: - steps.add(new AppResourceObserveStep( - List.of(new AppDriverStartObserver(), new AppDriverReadyObserver()))); - steps.add(new AppResourceObserveStep( - Collections.singletonList(new AppDriverRunningObserver()))); - steps.add(new AppResourceObserveStep( - Collections.singletonList(new AppDriverTimeoutObserver()))); - break; - case DRIVER_READY: - case INITIALIZED_BELOW_THRESHOLD_EXECUTORS: - case RUNNING_HEALTHY: - case RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS: - steps.add(new AppRunningStep()); - steps.add(new AppResourceObserveStep( - Collections.singletonList(new AppDriverRunningObserver()))); - steps.add(new AppResourceObserveStep( - Collections.singletonList(new AppDriverTimeoutObserver()))); - break; - case SPARK_SESSION_INITIALIZATION_TIMED_OUT: - case DRIVER_LAUNCH_TIMED_OUT: - case EXECUTORS_LAUNCH_TIMED_OUT: - case SUCCEEDED: - case DRIVER_EVICTED: - case FAILED: - case SCHEDULING_FAILURE: - steps.add(new AppCleanUpStep()); - break; - default: - steps.add(new UnknownStateStep()); - break; - } - return steps; + protected List getReconcileSteps(final SparkApplication app) { + List steps = new ArrayList<>(); + steps.add(new AppValidateStep()); + steps.add(new AppTerminatedStep()); + switch (app.getStatus().getCurrentState().getCurrentStateSummary()) { + case SUBMITTED: + case SCHEDULED_TO_RESTART: + steps.add(new AppInitStep()); + break; + case DRIVER_REQUESTED: + case DRIVER_STARTED: + steps.add(new AppResourceObserveStep( + List.of(new AppDriverStartObserver(), new AppDriverReadyObserver()))); + steps.add(new AppResourceObserveStep( + Collections.singletonList(new AppDriverRunningObserver()))); + steps.add(new AppResourceObserveStep( + Collections.singletonList(new AppDriverTimeoutObserver()))); + break; + case DRIVER_READY: + case INITIALIZED_BELOW_THRESHOLD_EXECUTORS: + case RUNNING_HEALTHY: + case RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS: + steps.add(new AppRunningStep()); + steps.add(new AppResourceObserveStep( + Collections.singletonList(new AppDriverRunningObserver()))); + steps.add(new AppResourceObserveStep( + Collections.singletonList(new AppDriverTimeoutObserver()))); + break; + case SPARK_SESSION_INITIALIZATION_TIMED_OUT: + case DRIVER_LAUNCH_TIMED_OUT: + case EXECUTORS_LAUNCH_TIMED_OUT: + case SUCCEEDED: + case DRIVER_EVICTED: + case FAILED: + case SCHEDULING_FAILURE: + steps.add(new AppCleanUpStep()); + break; + default: + steps.add(new UnknownStateStep()); + break; } + return steps; + } - /** - * Best-effort graceful termination upon delete. - * @param sparkApplication the resource that is marked for deletion - * @param context the context with which the operation is executed - * @return DeleteControl, with requeue if needed - */ - @Override - public DeleteControl cleanup(SparkApplication sparkApplication, - Context context) { - LoggingUtils.TrackedMDC trackedMDC = new LoggingUtils.TrackedMDC(); - DeleteControl deleteControl = DeleteControl.defaultDelete(); - try { - trackedMDC.set(sparkApplication); - log.info("Cleaning up resources for SparkApp."); - SparkApplicationContext ctx = new SparkApplicationContext(sparkApplication, context); - List cleanupSteps = new ArrayList<>(); - cleanupSteps.add(new AppValidateStep()); - cleanupSteps.add(new AppTerminatedStep()); - cleanupSteps.add(new AppCleanUpStep(ApplicationStatusUtils::appCancelled)); - for (AppReconcileStep step : cleanupSteps) { - ReconcileProgress progress = step.reconcile(ctx, statusRecorder); - if (progress.isCompleted()) { - if (progress.isRequeue()) { - return DeleteControl.noFinalizerRemoval().rescheduleAfter( - progress.getRequeueAfterDuration()); - } else { - break; - } - } - } - } finally { - log.info("Cleanup completed"); - trackedMDC.reset(); + /** + * Best-effort graceful termination upon delete. + * + * @param sparkApplication the resource that is marked for deletion + * @param context the context with which the operation is executed + * @return DeleteControl, with requeue if needed + */ + @Override + public DeleteControl cleanup(SparkApplication sparkApplication, + Context context) { + LoggingUtils.TrackedMDC trackedMDC = new LoggingUtils.TrackedMDC(); + DeleteControl deleteControl = DeleteControl.defaultDelete(); + try { + trackedMDC.set(sparkApplication); + log.info("Cleaning up resources for SparkApp."); + SparkApplicationContext ctx = new SparkApplicationContext(sparkApplication, context); + List cleanupSteps = new ArrayList<>(); + cleanupSteps.add(new AppValidateStep()); + cleanupSteps.add(new AppTerminatedStep()); + cleanupSteps.add(new AppCleanUpStep(ApplicationStatusUtils::appCancelled)); + for (AppReconcileStep step : cleanupSteps) { + ReconcileProgress progress = step.reconcile(ctx, statusRecorder); + if (progress.isCompleted()) { + if (progress.isRequeue()) { + return DeleteControl.noFinalizerRemoval().rescheduleAfter( + progress.getRequeueAfterDuration()); + } else { + break; + } } - statusRecorder.removeCachedStatus(sparkApplication); - return deleteControl; + } + } finally { + log.info("Cleanup completed"); + trackedMDC.reset(); } + statusRecorder.removeCachedStatus(sparkApplication); + return deleteControl; + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkReconcilerUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkReconcilerUtils.java index 83bc051d..3233a375 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkReconcilerUtils.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkReconcilerUtils.java @@ -18,6 +18,16 @@ package org.apache.spark.kubernetes.operator.reconciler; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import io.fabric8.kubernetes.api.model.DeletionPropagation; @@ -30,20 +40,11 @@ import io.javaoperatorsdk.operator.api.reconciler.UpdateControl; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; + import org.apache.spark.kubernetes.operator.BaseResource; import org.apache.spark.kubernetes.operator.Constants; import org.apache.spark.kubernetes.operator.SparkApplication; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - import static org.apache.spark.kubernetes.operator.Constants.LABEL_RESOURCE_NAME; import static org.apache.spark.kubernetes.operator.Constants.LABEL_SPARK_OPERATOR_NAME; import static org.apache.spark.kubernetes.operator.Constants.LABEL_SPARK_ROLE_DRIVER_VALUE; @@ -58,195 +59,195 @@ @Slf4j public class SparkReconcilerUtils { - private static final ObjectMapper objectMapper = new ObjectMapper(); - - public static Map commonOperatorResourceLabels() { - Map labels = new HashMap<>(); - labels.put(LABEL_RESOURCE_NAME, OperatorAppName.getValue()); - return labels; - } - - public static Map defaultOperatorConfigLabels() { - Map labels = new HashMap<>(commonOperatorResourceLabels()); - labels.put("app.kubernetes.io/component", "operator-dynamic-config-overrides"); - return labels; - } - - public static Map commonManagedResourceLabels() { - Map labels = new HashMap<>(); - labels.put(LABEL_SPARK_OPERATOR_NAME, OperatorAppName.getValue()); - return labels; - } - - public static Map sparkAppResourceLabels(final SparkApplication app) { - return sparkAppResourceLabels(app.getMetadata().getName()); - } - - public static Map sparkAppResourceLabels(final String appName) { - Map labels = commonManagedResourceLabels(); - labels.put(Constants.LABEL_SPARK_APPLICATION_NAME, appName); - return labels; - } - - public static Map driverLabels(final SparkApplication sparkApplication) { - Map labels = sparkAppResourceLabels(sparkApplication); - labels.put(Constants.LABEL_SPARK_ROLE_NAME, LABEL_SPARK_ROLE_DRIVER_VALUE); - return labels; - } - - public static Map executorLabels(final SparkApplication sparkApplication) { - Map labels = sparkAppResourceLabels(sparkApplication); - labels.put(Constants.LABEL_SPARK_ROLE_NAME, LABEL_SPARK_ROLE_EXECUTOR_VALUE); - return labels; - } - - public static Set getWatchedNamespaces() { - String namespaces = OperatorWatchedNamespaces.getValue(); - if (StringUtils.isNotEmpty(namespaces)) { - return Arrays.stream(namespaces.split(",")).map(String::trim) - .collect(Collectors.toSet()); - } - return Collections.emptySet(); - } - - /** - * Labels to be applied to all created resources, as a comma-separated string - * - * @return labels string - */ - public static String commonResourceLabelsStr() { - return labelsAsStr(commonManagedResourceLabels()); - } - - public static String labelsAsStr(Map labels) { - return labels - .entrySet() - .stream() - .map(e -> String.join("=", e.getKey(), e.getValue())) - .collect(Collectors.joining(",")); - } - - public static > UpdateControl toUpdateControl( - O resource, ReconcileProgress reconcileProgress) { - // reconciler already handled resource and status update, skip update at lower level - UpdateControl updateControl = UpdateControl.noUpdate(); - if (reconcileProgress.isRequeue()) { - return updateControl.rescheduleAfter(reconcileProgress.getRequeueAfterDuration()); - } else { - return updateControl; - } - } - - public static > DeleteControl toDeleteControl( - O resource, ReconcileProgress reconcileProgress) { - if (reconcileProgress.isRequeue()) { - return DeleteControl.noFinalizerRemoval().rescheduleAfter( - reconcileProgress.getRequeueAfterDuration()); - } else { - return DeleteControl.defaultDelete(); - } - } - - public static Optional getOrCreateSecondaryResource( - final KubernetesClient client, - final T resource) { - Optional current = getResource(client, resource); - if (current.isEmpty()) { - // Adding retry logic to overcome a k8s bug: - // https://github.com/kubernetes/kubernetes/issues/67761 - long maxAttempts = DriverCreateMaxAttempts.getValue(); - long attemptCount = 1; - while (true) { - try { - current = Optional.ofNullable(client.resource(resource).create()); - break; - } catch (KubernetesClientException e) { - if (log.isErrorEnabled()) { - log.error( - "Failed to request resource with responseCode={} " + - "attemptCount={}/{}", - e.getCode(), attemptCount, maxAttempts); - } - // retry only on 409 Conflict - if (e.getCode() != 409) { - throw e; - } else { - if (isConflictForExistingResource(e)) { - current = getResource(client, resource); - if (current.isPresent()) { - return current; - } - } - if (++attemptCount > maxAttempts) { - log.error("Max Retries exceeded while trying to create resource"); - throw e; - } - } - } - } - } - return current; - } - - public static void addOwnerReferenceSecondaryResource(final KubernetesClient client, - final List resources, - final HasMetadata owner) { - - resources.forEach(r -> { - ObjectMeta metaData = new ObjectMetaBuilder(r.getMetadata()) - .addToOwnerReferences(buildOwnerReferenceTo(owner)) - .build(); - r.setMetadata(metaData); - }); - client.resourceList(resources).forceConflicts().serverSideApply(); - } - - public static Optional getResource(final KubernetesClient client, - final T desired) { - T resource = null; + private static final ObjectMapper objectMapper = new ObjectMapper(); + + public static Map commonOperatorResourceLabels() { + Map labels = new HashMap<>(); + labels.put(LABEL_RESOURCE_NAME, OperatorAppName.getValue()); + return labels; + } + + public static Map defaultOperatorConfigLabels() { + Map labels = new HashMap<>(commonOperatorResourceLabels()); + labels.put("app.kubernetes.io/component", "operator-dynamic-config-overrides"); + return labels; + } + + public static Map commonManagedResourceLabels() { + Map labels = new HashMap<>(); + labels.put(LABEL_SPARK_OPERATOR_NAME, OperatorAppName.getValue()); + return labels; + } + + public static Map sparkAppResourceLabels(final SparkApplication app) { + return sparkAppResourceLabels(app.getMetadata().getName()); + } + + public static Map sparkAppResourceLabels(final String appName) { + Map labels = commonManagedResourceLabels(); + labels.put(Constants.LABEL_SPARK_APPLICATION_NAME, appName); + return labels; + } + + public static Map driverLabels(final SparkApplication sparkApplication) { + Map labels = sparkAppResourceLabels(sparkApplication); + labels.put(Constants.LABEL_SPARK_ROLE_NAME, LABEL_SPARK_ROLE_DRIVER_VALUE); + return labels; + } + + public static Map executorLabels(final SparkApplication sparkApplication) { + Map labels = sparkAppResourceLabels(sparkApplication); + labels.put(Constants.LABEL_SPARK_ROLE_NAME, LABEL_SPARK_ROLE_EXECUTOR_VALUE); + return labels; + } + + public static Set getWatchedNamespaces() { + String namespaces = OperatorWatchedNamespaces.getValue(); + if (StringUtils.isNotEmpty(namespaces)) { + return Arrays.stream(namespaces.split(",")).map(String::trim) + .collect(Collectors.toSet()); + } + return Collections.emptySet(); + } + + /** + * Labels to be applied to all created resources, as a comma-separated string + * + * @return labels string + */ + public static String commonResourceLabelsStr() { + return labelsAsStr(commonManagedResourceLabels()); + } + + public static String labelsAsStr(Map labels) { + return labels + .entrySet() + .stream() + .map(e -> String.join("=", e.getKey(), e.getValue())) + .collect(Collectors.joining(",")); + } + + public static > UpdateControl toUpdateControl( + O resource, ReconcileProgress reconcileProgress) { + // reconciler already handled resource and status update, skip update at lower level + UpdateControl updateControl = UpdateControl.noUpdate(); + if (reconcileProgress.isRequeue()) { + return updateControl.rescheduleAfter(reconcileProgress.getRequeueAfterDuration()); + } else { + return updateControl; + } + } + + public static > DeleteControl toDeleteControl( + O resource, ReconcileProgress reconcileProgress) { + if (reconcileProgress.isRequeue()) { + return DeleteControl.noFinalizerRemoval().rescheduleAfter( + reconcileProgress.getRequeueAfterDuration()); + } else { + return DeleteControl.defaultDelete(); + } + } + + public static Optional getOrCreateSecondaryResource( + final KubernetesClient client, + final T resource) { + Optional current = getResource(client, resource); + if (current.isEmpty()) { + // Adding retry logic to overcome a k8s bug: + // https://github.com/kubernetes/kubernetes/issues/67761 + long maxAttempts = DriverCreateMaxAttempts.getValue(); + long attemptCount = 1; + while (true) { try { - resource = client.resource(desired).get(); + current = Optional.ofNullable(client.resource(resource).create()); + break; } catch (KubernetesClientException e) { - if (e.getCode() == 404) { - return Optional.empty(); - } - } - return Optional.ofNullable(resource); - } - - public static void deleteResourceIfExists(final KubernetesClient client, - final T resource, - boolean forceDelete) { - try { - if (forceDelete) { - client.resource(resource) - .withGracePeriod(0L) - .delete(); - } else { - client.resource(resource) - .withPropagationPolicy(DeletionPropagation.FOREGROUND) - .withTimeout(ForegroundRequestTimeoutSeconds.getValue(), TimeUnit.SECONDS) - .delete(); + if (log.isErrorEnabled()) { + log.error( + "Failed to request resource with responseCode={} " + + "attemptCount={}/{}", + e.getCode(), attemptCount, maxAttempts); + } + // retry only on 409 Conflict + if (e.getCode() != 409) { + throw e; + } else { + if (isConflictForExistingResource(e)) { + current = getResource(client, resource); + if (current.isPresent()) { + return current; + } } - } catch (KubernetesClientException e) { - if (e.getCode() != 404) { - throw e; - } else { - log.info("Pod to delete does not exist, proceeding..."); + if (++attemptCount > maxAttempts) { + log.error("Max Retries exceeded while trying to create resource"); + throw e; } + } } - } - - public static T clone(T object) { - if (object == null) { - return null; - } - try { - return (T) - objectMapper.readValue( - objectMapper.writeValueAsString(object), object.getClass()); - } catch (JsonProcessingException e) { - throw new IllegalStateException(e); - } - } + } + } + return current; + } + + public static void addOwnerReferenceSecondaryResource(final KubernetesClient client, + final List resources, + final HasMetadata owner) { + + resources.forEach(r -> { + ObjectMeta metaData = new ObjectMetaBuilder(r.getMetadata()) + .addToOwnerReferences(buildOwnerReferenceTo(owner)) + .build(); + r.setMetadata(metaData); + }); + client.resourceList(resources).forceConflicts().serverSideApply(); + } + + public static Optional getResource(final KubernetesClient client, + final T desired) { + T resource = null; + try { + resource = client.resource(desired).get(); + } catch (KubernetesClientException e) { + if (e.getCode() == 404) { + return Optional.empty(); + } + } + return Optional.ofNullable(resource); + } + + public static void deleteResourceIfExists(final KubernetesClient client, + final T resource, + boolean forceDelete) { + try { + if (forceDelete) { + client.resource(resource) + .withGracePeriod(0L) + .delete(); + } else { + client.resource(resource) + .withPropagationPolicy(DeletionPropagation.FOREGROUND) + .withTimeout(ForegroundRequestTimeoutSeconds.getValue(), TimeUnit.SECONDS) + .delete(); + } + } catch (KubernetesClientException e) { + if (e.getCode() != 404) { + throw e; + } else { + log.info("Pod to delete does not exist, proceeding..."); + } + } + } + + public static T clone(T object) { + if (object == null) { + return null; + } + try { + return (T) + objectMapper.readValue( + objectMapper.writeValueAsString(object), object.getClass()); + } catch (JsonProcessingException e) { + throw new IllegalStateException(e); + } + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverReadyObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverReadyObserver.java index a443d104..ea123457 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverReadyObserver.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverReadyObserver.java @@ -18,7 +18,10 @@ package org.apache.spark.kubernetes.operator.reconciler.observers; +import java.util.Optional; + import io.fabric8.kubernetes.api.model.Pod; + import org.apache.spark.kubernetes.operator.Constants; import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; import org.apache.spark.kubernetes.operator.status.ApplicationState; @@ -26,24 +29,22 @@ import org.apache.spark.kubernetes.operator.status.ApplicationStatus; import org.apache.spark.kubernetes.operator.utils.PodUtils; -import java.util.Optional; - /** * Observes whether driver is ready */ public class AppDriverReadyObserver extends BaseAppDriverObserver { - @Override - public Optional observe(Pod driver, - ApplicationSpec spec, - ApplicationStatus currentStatus) { - if (ApplicationStateSummary.DRIVER_READY.ordinal() - <= currentStatus.getCurrentState().getCurrentStateSummary().ordinal()) { - return Optional.empty(); - } - if (PodUtils.isPodReady(driver)) { - return Optional.of(new ApplicationState(ApplicationStateSummary.DRIVER_READY, - Constants.DriverReady)); - } - return observeDriverTermination(driver, true, spec); + @Override + public Optional observe(Pod driver, + ApplicationSpec spec, + ApplicationStatus currentStatus) { + if (ApplicationStateSummary.DRIVER_READY.ordinal() + <= currentStatus.getCurrentState().getCurrentStateSummary().ordinal()) { + return Optional.empty(); + } + if (PodUtils.isPodReady(driver)) { + return Optional.of(new ApplicationState(ApplicationStateSummary.DRIVER_READY, + Constants.DriverReady)); } + return observeDriverTermination(driver, true, spec); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverRunningObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverRunningObserver.java index 02f6e0ad..8cba5854 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverRunningObserver.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverRunningObserver.java @@ -18,21 +18,22 @@ package org.apache.spark.kubernetes.operator.reconciler.observers; +import java.util.Optional; + import io.fabric8.kubernetes.api.model.Pod; + import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStatus; -import java.util.Optional; - /** * Observes whether driver reaches running state (in other words, whether its at least scheduled) */ public class AppDriverRunningObserver extends BaseAppDriverObserver { - @Override - public Optional observe(Pod driver, - ApplicationSpec spec, - ApplicationStatus currentStatus) { - return observeDriverTermination(driver, true, spec); - } + @Override + public Optional observe(Pod driver, + ApplicationSpec spec, + ApplicationStatus currentStatus) { + return observeDriverTermination(driver, true, spec); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverStartObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverStartObserver.java index 839adaca..567d0a1c 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverStartObserver.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverStartObserver.java @@ -18,7 +18,10 @@ package org.apache.spark.kubernetes.operator.reconciler.observers; +import java.util.Optional; + import io.fabric8.kubernetes.api.model.Pod; + import org.apache.spark.kubernetes.operator.Constants; import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; import org.apache.spark.kubernetes.operator.status.ApplicationState; @@ -26,21 +29,19 @@ import org.apache.spark.kubernetes.operator.status.ApplicationStatus; import org.apache.spark.kubernetes.operator.utils.PodUtils; -import java.util.Optional; - public class AppDriverStartObserver extends BaseAppDriverObserver { - @Override - public Optional observe(Pod driver, - ApplicationSpec spec, - ApplicationStatus currentStatus) { - if (ApplicationStateSummary.DRIVER_STARTED.ordinal() - <= currentStatus.getCurrentState().getCurrentStateSummary().ordinal()) { - return Optional.empty(); - } - if (PodUtils.isPodStarted(driver, spec)) { - return Optional.of(new ApplicationState(ApplicationStateSummary.DRIVER_STARTED, - Constants.DriverRunning)); - } - return observeDriverTermination(driver, false, spec); + @Override + public Optional observe(Pod driver, + ApplicationSpec spec, + ApplicationStatus currentStatus) { + if (ApplicationStateSummary.DRIVER_STARTED.ordinal() + <= currentStatus.getCurrentState().getCurrentStateSummary().ordinal()) { + return Optional.empty(); + } + if (PodUtils.isPodStarted(driver, spec)) { + return Optional.of(new ApplicationState(ApplicationStateSummary.DRIVER_STARTED, + Constants.DriverRunning)); } + return observeDriverTermination(driver, false, spec); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java index 34a0383c..0b2cffe6 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java @@ -18,73 +18,74 @@ package org.apache.spark.kubernetes.operator.reconciler.observers; +import java.time.Instant; +import java.util.Optional; +import java.util.function.Supplier; + import io.fabric8.kubernetes.api.model.Pod; + import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; import org.apache.spark.kubernetes.operator.spec.ApplicationTimeoutConfig; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStatus; import org.apache.spark.kubernetes.operator.utils.ApplicationStatusUtils; -import java.time.Instant; -import java.util.Optional; -import java.util.function.Supplier; - /** * Observes driver status and time-out as configured in app spec */ public class AppDriverTimeoutObserver extends BaseAppDriverObserver { - /** - * Operator may proactively terminate application if it has stay in certain state for a while. - * This helps to avoid resource deadlock when app cannot proceed. - * Such states include - * - DRIVER_REQUESTED -> goes to DRIVER_LAUNCH_TIMED_OUT if driver pod cannot be scheduled or - * cannot start running - * - DRIVER_STARTED -> goes to SPARK_SESSION_INITIALIZATION_TIMED_OUT if Spark session cannot - * be initialized - * - DRIVER_READY / EXECUTOR_REQUESTED / EXECUTOR_SCHEDULED / - * INITIALIZED_BELOW_THRESHOLD_EXECUTORS - * -> go to EXECUTORS_LAUNCH_TIMED_OUT if app cannot acquire at least minimal executors in - * given time - * Operator will NOT proactively stop the app if it has acquired enough executors and later - * lose them. User may build additional layers to alert and act on such scenario. - * Timeout check would be performed at the end of reconcile - and it would be performed only - * if there's no other updates to be performed in the same reconcile action. - */ - @Override - public Optional observe(Pod driver, - ApplicationSpec spec, - ApplicationStatus currentStatus) { - Instant lastTransitionTime = - Instant.parse(currentStatus.getCurrentState().getLastTransitionTime()); - long timeoutThreshold; - Supplier supplier; - ApplicationTimeoutConfig timeoutConfig = - spec.getApplicationTolerations().getApplicationTimeoutConfig(); - switch (currentStatus.getCurrentState().getCurrentStateSummary()) { - case DRIVER_REQUESTED: - timeoutThreshold = timeoutConfig.getDriverStartTimeoutMillis(); - supplier = ApplicationStatusUtils::driverLaunchTimedOut; - break; - case DRIVER_STARTED: - timeoutThreshold = timeoutConfig.getSparkSessionStartTimeoutMillis(); - supplier = ApplicationStatusUtils::driverReadyTimedOut; - break; - case DRIVER_READY: - case INITIALIZED_BELOW_THRESHOLD_EXECUTORS: - timeoutThreshold = timeoutConfig.getExecutorStartTimeoutMillis(); - supplier = ApplicationStatusUtils::executorLaunchTimedOut; - break; - default: - // No timeout check needed for other states - return Optional.empty(); - } - if (timeoutThreshold > 0L && - lastTransitionTime.plusMillis(timeoutThreshold).isBefore(Instant.now())) { - ApplicationState state = supplier.get(); - state.setLastObservedDriverStatus(driver.getStatus()); - return Optional.of(state); - } + /** + * Operator may proactively terminate application if it has stay in certain state for a while. + * This helps to avoid resource deadlock when app cannot proceed. + * Such states include + * - DRIVER_REQUESTED -> goes to DRIVER_LAUNCH_TIMED_OUT if driver pod cannot be scheduled or + * cannot start running + * - DRIVER_STARTED -> goes to SPARK_SESSION_INITIALIZATION_TIMED_OUT if Spark session cannot + * be initialized + * - DRIVER_READY / EXECUTOR_REQUESTED / EXECUTOR_SCHEDULED / + * INITIALIZED_BELOW_THRESHOLD_EXECUTORS + * -> go to EXECUTORS_LAUNCH_TIMED_OUT if app cannot acquire at least minimal executors in + * given time + * Operator will NOT proactively stop the app if it has acquired enough executors and later + * lose them. User may build additional layers to alert and act on such scenario. + * Timeout check would be performed at the end of reconcile - and it would be performed only + * if there's no other updates to be performed in the same reconcile action. + */ + @Override + public Optional observe(Pod driver, + ApplicationSpec spec, + ApplicationStatus currentStatus) { + Instant lastTransitionTime = + Instant.parse(currentStatus.getCurrentState().getLastTransitionTime()); + long timeoutThreshold; + Supplier supplier; + ApplicationTimeoutConfig timeoutConfig = + spec.getApplicationTolerations().getApplicationTimeoutConfig(); + switch (currentStatus.getCurrentState().getCurrentStateSummary()) { + case DRIVER_REQUESTED: + timeoutThreshold = timeoutConfig.getDriverStartTimeoutMillis(); + supplier = ApplicationStatusUtils::driverLaunchTimedOut; + break; + case DRIVER_STARTED: + timeoutThreshold = timeoutConfig.getSparkSessionStartTimeoutMillis(); + supplier = ApplicationStatusUtils::driverReadyTimedOut; + break; + case DRIVER_READY: + case INITIALIZED_BELOW_THRESHOLD_EXECUTORS: + timeoutThreshold = timeoutConfig.getExecutorStartTimeoutMillis(); + supplier = ApplicationStatusUtils::executorLaunchTimedOut; + break; + default: + // No timeout check needed for other states return Optional.empty(); } + if (timeoutThreshold > 0L && + lastTransitionTime.plusMillis(timeoutThreshold).isBefore(Instant.now())) { + ApplicationState state = supplier.get(); + state.setLastObservedDriverStatus(driver.getStatus()); + return Optional.of(state); + } + return Optional.empty(); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseAppDriverObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseAppDriverObserver.java index 21d1059d..9d04e5c8 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseAppDriverObserver.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseAppDriverObserver.java @@ -18,9 +18,14 @@ package org.apache.spark.kubernetes.operator.reconciler.observers; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + import io.fabric8.kubernetes.api.model.ContainerStatus; import io.fabric8.kubernetes.api.model.Pod; import lombok.extern.slf4j.Slf4j; + import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; import org.apache.spark.kubernetes.operator.status.ApplicationAttemptSummary; import org.apache.spark.kubernetes.operator.status.ApplicationState; @@ -29,10 +34,6 @@ import org.apache.spark.kubernetes.operator.utils.PodPhase; import org.apache.spark.kubernetes.operator.utils.PodUtils; -import java.util.List; -import java.util.Optional; -import java.util.stream.Collectors; - import static org.apache.spark.kubernetes.operator.Constants.DriverCompletedMessage; import static org.apache.spark.kubernetes.operator.Constants.DriverFailedInitContainersMessage; import static org.apache.spark.kubernetes.operator.Constants.DriverFailedMessage; @@ -46,100 +47,100 @@ */ @Slf4j public abstract class BaseAppDriverObserver extends - BaseSecondaryResourceObserver { + BaseSecondaryResourceObserver { - /** - * Check whether the driver pod (and thus the application) has actually terminated - * This would be determined by status of containers - and only by the containers with name - * matches given filter - * e.g. you can use "s -> 'true'" to evaluate all containers - * Driver is considered as 'failed', if any init container failed, or any of the matched - * container(s) has - * 1. failed (isTerminated, non-zero) - * 2. restarted - * 3. (corner case) exited 0 without SparkContext / SparkSession initialization - * Driver is considered as 'succeeded', if - * 1. The pod is succeeded phase, or - * 2. The container(s) has exited 0 after SparkContext / SparkSession initialization - * - * @param driverPod the driverPod - * @param driverReady whether SparkContext / SparkSession has ever been initialized for this - * pod - * @return the ApplicationState to be updated if pod is terminated. Returning empty if pod - * is still running - */ - protected Optional observeDriverTermination(final Pod driverPod, - final boolean driverReady, - final ApplicationSpec spec) { - if (driverPod.getStatus() == null - || driverPod.getStatus().getContainerStatuses() == null - || driverPod.getStatus().getContainerStatuses().isEmpty()) { - log.warn("Cannot determine driver pod status, the pod may in pending state."); - return Optional.empty(); - } + /** + * Check whether the driver pod (and thus the application) has actually terminated + * This would be determined by status of containers - and only by the containers with name + * matches given filter + * e.g. you can use "s -> 'true'" to evaluate all containers + * Driver is considered as 'failed', if any init container failed, or any of the matched + * container(s) has + * 1. failed (isTerminated, non-zero) + * 2. restarted + * 3. (corner case) exited 0 without SparkContext / SparkSession initialization + * Driver is considered as 'succeeded', if + * 1. The pod is succeeded phase, or + * 2. The container(s) has exited 0 after SparkContext / SparkSession initialization + * + * @param driverPod the driverPod + * @param driverReady whether SparkContext / SparkSession has ever been initialized for this + * pod + * @return the ApplicationState to be updated if pod is terminated. Returning empty if pod + * is still running + */ + protected Optional observeDriverTermination(final Pod driverPod, + final boolean driverReady, + final ApplicationSpec spec) { + if (driverPod.getStatus() == null + || driverPod.getStatus().getContainerStatuses() == null + || driverPod.getStatus().getContainerStatuses().isEmpty()) { + log.warn("Cannot determine driver pod status, the pod may in pending state."); + return Optional.empty(); + } - if (PodPhase.FAILED.equals(PodPhase.getPhase(driverPod))) { - ApplicationState applicationState = new ApplicationState(ApplicationStateSummary.FAILED, - DriverFailedMessage); - if ("Evicted".equalsIgnoreCase(driverPod.getStatus().getReason())) { - applicationState = new ApplicationState(ApplicationStateSummary.DRIVER_EVICTED, - DriverFailedMessage); - } - applicationState.setLastObservedDriverStatus(driverPod.getStatus()); - return Optional.of(applicationState); - } + if (PodPhase.FAILED.equals(PodPhase.getPhase(driverPod))) { + ApplicationState applicationState = new ApplicationState(ApplicationStateSummary.FAILED, + DriverFailedMessage); + if ("Evicted".equalsIgnoreCase(driverPod.getStatus().getReason())) { + applicationState = new ApplicationState(ApplicationStateSummary.DRIVER_EVICTED, + DriverFailedMessage); + } + applicationState.setLastObservedDriverStatus(driverPod.getStatus()); + return Optional.of(applicationState); + } - if (PodPhase.SUCCEEDED.equals(PodPhase.getPhase(driverPod))) { - ApplicationState state; - if (driverReady) { - state = new ApplicationState(ApplicationStateSummary.SUCCEEDED, - DriverCompletedMessage); - } else { - state = new ApplicationState(ApplicationStateSummary.FAILED, - DriverTerminatedBeforeInitializationMessage); - state.setLastObservedDriverStatus(driverPod.getStatus()); - } - return Optional.of(state); - } + if (PodPhase.SUCCEEDED.equals(PodPhase.getPhase(driverPod))) { + ApplicationState state; + if (driverReady) { + state = new ApplicationState(ApplicationStateSummary.SUCCEEDED, + DriverCompletedMessage); + } else { + state = new ApplicationState(ApplicationStateSummary.FAILED, + DriverTerminatedBeforeInitializationMessage); + state.setLastObservedDriverStatus(driverPod.getStatus()); + } + return Optional.of(state); + } - List initContainerStatusList = - driverPod.getStatus().getInitContainerStatuses(); - if (initContainerStatusList != null - && initContainerStatusList.parallelStream().anyMatch(PodUtils::isContainerFailed)) { - ApplicationState applicationState = new ApplicationState(ApplicationStateSummary.FAILED, - DriverFailedInitContainersMessage); - applicationState.setLastObservedDriverStatus(driverPod.getStatus()); - return Optional.of(applicationState); - } - List containerStatusList = driverPod.getStatus().getContainerStatuses(); - List terminatedCriticalContainers = containerStatusList.parallelStream() - .filter(c -> isDriverMainContainer(spec, c.getName())) - .filter(PodUtils::isContainerExited) - .collect(Collectors.toList()); - if (!terminatedCriticalContainers.isEmpty()) { - ApplicationState applicationState; - if (terminatedCriticalContainers.parallelStream() - .anyMatch(PodUtils::isContainerFailed)) { - applicationState = - new ApplicationState(ApplicationStateSummary.FAILED, DriverFailedMessage); - } else { - applicationState = new ApplicationState(ApplicationStateSummary.SUCCEEDED, - DriverSucceededMessage); - } - applicationState.setLastObservedDriverStatus(driverPod.getStatus()); - return Optional.of(applicationState); - } - if (containerStatusList.parallelStream() - .filter(c -> isDriverMainContainer(spec, c.getName())) - .anyMatch(PodUtils::isContainerRestarted)) { - ApplicationState state = - new ApplicationState(ApplicationStateSummary.FAILED, DriverRestartedMessage); - state.setLastObservedDriverStatus(driverPod.getStatus()); - return Optional.of(state); - } - return Optional.empty(); + List initContainerStatusList = + driverPod.getStatus().getInitContainerStatuses(); + if (initContainerStatusList != null + && initContainerStatusList.parallelStream().anyMatch(PodUtils::isContainerFailed)) { + ApplicationState applicationState = new ApplicationState(ApplicationStateSummary.FAILED, + DriverFailedInitContainersMessage); + applicationState.setLastObservedDriverStatus(driverPod.getStatus()); + return Optional.of(applicationState); + } + List containerStatusList = driverPod.getStatus().getContainerStatuses(); + List terminatedCriticalContainers = containerStatusList.parallelStream() + .filter(c -> isDriverMainContainer(spec, c.getName())) + .filter(PodUtils::isContainerExited) + .collect(Collectors.toList()); + if (!terminatedCriticalContainers.isEmpty()) { + ApplicationState applicationState; + if (terminatedCriticalContainers.parallelStream() + .anyMatch(PodUtils::isContainerFailed)) { + applicationState = + new ApplicationState(ApplicationStateSummary.FAILED, DriverFailedMessage); + } else { + applicationState = new ApplicationState(ApplicationStateSummary.SUCCEEDED, + DriverSucceededMessage); + } + applicationState.setLastObservedDriverStatus(driverPod.getStatus()); + return Optional.of(applicationState); + } + if (containerStatusList.parallelStream() + .filter(c -> isDriverMainContainer(spec, c.getName())) + .anyMatch(PodUtils::isContainerRestarted)) { + ApplicationState state = + new ApplicationState(ApplicationStateSummary.FAILED, DriverRestartedMessage); + state.setLastObservedDriverStatus(driverPod.getStatus()); + return Optional.of(state); } + return Optional.empty(); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseSecondaryResourceObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseSecondaryResourceObserver.java index 991fe4f7..a8666e4d 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseSecondaryResourceObserver.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/BaseSecondaryResourceObserver.java @@ -18,26 +18,27 @@ package org.apache.spark.kubernetes.operator.reconciler.observers; +import java.util.Optional; + import io.fabric8.kubernetes.api.model.HasMetadata; + import org.apache.spark.kubernetes.operator.spec.BaseSpec; import org.apache.spark.kubernetes.operator.status.BaseAttemptSummary; import org.apache.spark.kubernetes.operator.status.BaseState; import org.apache.spark.kubernetes.operator.status.BaseStatus; -import java.util.Optional; - /** * Observe given secondary resource, return state to be updated if applicable * These observers does not act on secondary resource. They only observe secondary resource * status and update owner SparkApplication status if needed */ public abstract class BaseSecondaryResourceObserver, - SPEC extends BaseSpec, - STATUS extends BaseStatus, - SR extends HasMetadata> { - public abstract Optional observe(SR secondaryResource, - SPEC spec, - STATUS currentStatus); + AS extends BaseAttemptSummary, + STATE extends BaseState, + SPEC extends BaseSpec, + STATUS extends BaseStatus, + SR extends HasMetadata> { + public abstract Optional observe(SR secondaryResource, + SPEC spec, + STATUS currentStatus); } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java index 3790bdac..75fce0ba 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java @@ -18,12 +18,19 @@ package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.function.Supplier; + import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.Pod; import lombok.AllArgsConstructor; import lombok.NoArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; + import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; @@ -36,12 +43,6 @@ import org.apache.spark.kubernetes.operator.status.ApplicationStatus; import org.apache.spark.kubernetes.operator.utils.StatusRecorder; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import java.util.function.Supplier; - /** * Cleanup all secondary resources when application is deleted, or at the end of each attempt * Update Application status to indicate whether another attempt would be made @@ -50,99 +51,99 @@ @NoArgsConstructor @Slf4j public class AppCleanUpStep extends AppReconcileStep { - private Supplier cleanUpSuccessStateSupplier; - - @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - ApplicationStatus currentStatus = context.getSparkApplication().getStatus(); - ApplicationTolerations tolerations = - context.getSparkApplication().getSpec().getApplicationTolerations(); - String stateMessage = null; - if (!tolerations.getDeleteOnTermination()) { - if (tolerations.getRestartConfig() != null - && !RestartPolicy.Never.equals( - tolerations.getRestartConfig().getRestartPolicy())) { - stateMessage = - "Application is configured to restart, resources created in current " + - "attempt would be force released."; - log.warn(stateMessage); - } else { - ApplicationStatus updatedStatus = currentStatus.appendNewState( - new ApplicationState( - ApplicationStateSummary.TERMINATED_WITHOUT_RELEASE_RESOURCES, - "Application is terminated without releasing resources " + - "as configured.")); - long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() - .getTerminationRequeuePeriodMillis(); - return updateStateAndProceed(context, statusRecorder, updatedStatus, - requeueAfterMillis); - } - } - List resourcesToRemove = new ArrayList<>(); - if (ApplicationStateSummary.SCHEDULING_FAILURE.equals( - currentStatus.getCurrentState().getCurrentStateSummary())) { - // if app failed at scheduling, re-compute all spec and delete as they may not be fully - // owned by driver - try { - resourcesToRemove.addAll(context.getDriverPreResourcesSpec()); - resourcesToRemove.add(context.getDriverPodSpec()); - resourcesToRemove.addAll(context.getDriverResourcesSpec()); - } catch (Exception e) { - if (log.isErrorEnabled()) { - log.error("Failed to build resources for application.", e); - } - ApplicationStatus updatedStatus = currentStatus.appendNewState( - new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, - "Cannot build Spark spec for given application, " + - "consider all resources as released.")); - long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() - .getTerminationRequeuePeriodMillis(); + private Supplier cleanUpSuccessStateSupplier; - return updateStateAndProceed(context, statusRecorder, updatedStatus, - requeueAfterMillis); - } - } else { - Optional driver = context.getDriverPod(); - driver.ifPresent(resourcesToRemove::add); - } - boolean forceDelete = - SparkApplicationReconcileUtils.enableForceDelete(context.getSparkApplication()); - for (HasMetadata resource : resourcesToRemove) { - SparkReconcilerUtils.deleteResourceIfExists(context.getClient(), resource, forceDelete); - } - ApplicationStatus updatedStatus; - if (cleanUpSuccessStateSupplier != null) { - ApplicationState state = cleanUpSuccessStateSupplier.get(); - if (StringUtils.isNotEmpty(stateMessage)) { - state.setMessage(stateMessage); - } - updatedStatus = currentStatus.appendNewState(state); - long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() - .getTerminationRequeuePeriodMillis(); - return updateStateAndProceed(context, statusRecorder, updatedStatus, - requeueAfterMillis); - } else { - updatedStatus = - currentStatus.terminateOrRestart(tolerations.getRestartConfig(), stateMessage, - SparkOperatorConf.TrimAttemptStateTransitionHistory.getValue()); - long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() - .getTerminationRequeuePeriodMillis(); - if (ApplicationStateSummary.SCHEDULED_TO_RESTART.equals(updatedStatus.getCurrentState() - .getCurrentStateSummary())) { - requeueAfterMillis = tolerations.getRestartConfig().getRestartBackoffMillis(); - } - return updateStateAndProceed(context, statusRecorder, updatedStatus, - requeueAfterMillis); + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + ApplicationStatus currentStatus = context.getSparkApplication().getStatus(); + ApplicationTolerations tolerations = + context.getSparkApplication().getSpec().getApplicationTolerations(); + String stateMessage = null; + if (!tolerations.getDeleteOnTermination()) { + if (tolerations.getRestartConfig() != null + && !RestartPolicy.Never.equals( + tolerations.getRestartConfig().getRestartPolicy())) { + stateMessage = + "Application is configured to restart, resources created in current " + + "attempt would be force released."; + log.warn(stateMessage); + } else { + ApplicationStatus updatedStatus = currentStatus.appendNewState( + new ApplicationState( + ApplicationStateSummary.TERMINATED_WITHOUT_RELEASE_RESOURCES, + "Application is terminated without releasing resources " + + "as configured.")); + long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() + .getTerminationRequeuePeriodMillis(); + return updateStateAndProceed(context, statusRecorder, updatedStatus, + requeueAfterMillis); + } + } + List resourcesToRemove = new ArrayList<>(); + if (ApplicationStateSummary.SCHEDULING_FAILURE.equals( + currentStatus.getCurrentState().getCurrentStateSummary())) { + // if app failed at scheduling, re-compute all spec and delete as they may not be fully + // owned by driver + try { + resourcesToRemove.addAll(context.getDriverPreResourcesSpec()); + resourcesToRemove.add(context.getDriverPodSpec()); + resourcesToRemove.addAll(context.getDriverResourcesSpec()); + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("Failed to build resources for application.", e); } + ApplicationStatus updatedStatus = currentStatus.appendNewState( + new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, + "Cannot build Spark spec for given application, " + + "consider all resources as released.")); + long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() + .getTerminationRequeuePeriodMillis(); + return updateStateAndProceed(context, statusRecorder, updatedStatus, + requeueAfterMillis); + } + } else { + Optional driver = context.getDriverPod(); + driver.ifPresent(resourcesToRemove::add); } - - private ReconcileProgress updateStateAndProceed(SparkApplicationContext context, - StatusRecorder statusRecorder, - ApplicationStatus updatedStatus, - long requeueAfterMillis) { - statusRecorder.persistStatus(context, updatedStatus); - return ReconcileProgress.completeAndRequeueAfter(Duration.ofMillis(requeueAfterMillis)); + boolean forceDelete = + SparkApplicationReconcileUtils.enableForceDelete(context.getSparkApplication()); + for (HasMetadata resource : resourcesToRemove) { + SparkReconcilerUtils.deleteResourceIfExists(context.getClient(), resource, forceDelete); + } + ApplicationStatus updatedStatus; + if (cleanUpSuccessStateSupplier != null) { + ApplicationState state = cleanUpSuccessStateSupplier.get(); + if (StringUtils.isNotEmpty(stateMessage)) { + state.setMessage(stateMessage); + } + updatedStatus = currentStatus.appendNewState(state); + long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() + .getTerminationRequeuePeriodMillis(); + return updateStateAndProceed(context, statusRecorder, updatedStatus, + requeueAfterMillis); + } else { + updatedStatus = + currentStatus.terminateOrRestart(tolerations.getRestartConfig(), stateMessage, + SparkOperatorConf.TrimAttemptStateTransitionHistory.getValue()); + long requeueAfterMillis = tolerations.getApplicationTimeoutConfig() + .getTerminationRequeuePeriodMillis(); + if (ApplicationStateSummary.SCHEDULED_TO_RESTART.equals(updatedStatus.getCurrentState() + .getCurrentStateSummary())) { + requeueAfterMillis = tolerations.getRestartConfig().getRestartBackoffMillis(); + } + return updateStateAndProceed(context, statusRecorder, updatedStatus, + requeueAfterMillis); } + + } + + private ReconcileProgress updateStateAndProceed(SparkApplicationContext context, + StatusRecorder statusRecorder, + ApplicationStatus updatedStatus, + long requeueAfterMillis) { + statusRecorder.persistStatus(context, updatedStatus); + return ReconcileProgress.completeAndRequeueAfter(Duration.ofMillis(requeueAfterMillis)); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java index 15946a1d..38bb7d62 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java @@ -18,9 +18,16 @@ package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.Pod; import lombok.extern.slf4j.Slf4j; + import org.apache.spark.kubernetes.operator.Constants; import org.apache.spark.kubernetes.operator.SparkApplication; import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; @@ -32,16 +39,10 @@ import org.apache.spark.kubernetes.operator.status.ApplicationStatus; import org.apache.spark.kubernetes.operator.utils.StatusRecorder; -import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; - import static org.apache.spark.kubernetes.operator.Constants.ScheduleFailureMessage; +import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndDefaultRequeue; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndImmediateRequeue; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.proceed; -import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndDefaultRequeue; import static org.apache.spark.kubernetes.operator.utils.SparkExceptionUtils.buildGeneralErrorMessage; /** @@ -49,89 +50,89 @@ */ @Slf4j public class AppInitStep extends AppReconcileStep { - @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - ApplicationState currentState = context.getSparkApplication().getStatus().getCurrentState(); - if (!currentState.getCurrentStateSummary().isInitializing()) { - return proceed(); - } - SparkApplication app = context.getSparkApplication(); - if (app.getStatus().getPreviousAttemptSummary() != null) { - Instant lastTransitionTime = Instant.parse(currentState.getLastTransitionTime()); - Instant restartTime = lastTransitionTime.plusMillis( - app.getSpec().getApplicationTolerations().getRestartConfig() - .getRestartBackoffMillis()); - Instant now = Instant.now(); - if (restartTime.isAfter(now)) { - return ReconcileProgress.completeAndRequeueAfter( - Duration.between(now, restartTime)); - } + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + ApplicationState currentState = context.getSparkApplication().getStatus().getCurrentState(); + if (!currentState.getCurrentStateSummary().isInitializing()) { + return proceed(); + } + SparkApplication app = context.getSparkApplication(); + if (app.getStatus().getPreviousAttemptSummary() != null) { + Instant lastTransitionTime = Instant.parse(currentState.getLastTransitionTime()); + Instant restartTime = lastTransitionTime.plusMillis( + app.getSpec().getApplicationTolerations().getRestartConfig() + .getRestartBackoffMillis()); + Instant now = Instant.now(); + if (restartTime.isAfter(now)) { + return ReconcileProgress.completeAndRequeueAfter( + Duration.between(now, restartTime)); + } + } + try { + List createdPreResources = new ArrayList<>(); + for (HasMetadata resource : context.getDriverPreResourcesSpec()) { + Optional createdResource = + SparkReconcilerUtils.getOrCreateSecondaryResource(context.getClient(), + resource); + if (createdResource.isPresent()) { + createdPreResources.add(createdResource.get()); + } else { + updateStatusForCreationFailure(context, resource, statusRecorder); + return completeAndImmediateRequeue(); } - try { - List createdPreResources = new ArrayList<>(); - for (HasMetadata resource : context.getDriverPreResourcesSpec()) { - Optional createdResource = - SparkReconcilerUtils.getOrCreateSecondaryResource(context.getClient(), - resource); - if (createdResource.isPresent()) { - createdPreResources.add(createdResource.get()); - } else { - updateStatusForCreationFailure(context, resource, statusRecorder); - return completeAndImmediateRequeue(); - } - } - Optional driverPod = - SparkReconcilerUtils.getOrCreateSecondaryResource(context.getClient(), - context.getDriverPodSpec()); - if (driverPod.isPresent()) { - DriverResourceDecorator decorator = new DriverResourceDecorator(driverPod.get()); - createdPreResources.forEach(decorator::decorate); - context.getClient().resourceList(createdPreResources).forceConflicts() - .serverSideApply(); - List driverResources = context.getDriverResourcesSpec(); - driverResources.forEach(decorator::decorate); - for (HasMetadata resource : driverResources) { - Optional createdResource = - SparkReconcilerUtils.getOrCreateSecondaryResource(context.getClient(), - resource); - if (createdResource.isEmpty()) { - updateStatusForCreationFailure(context, resource, statusRecorder); - return completeAndImmediateRequeue(); - } - } - } - ApplicationStatus updatedStatus = context.getSparkApplication().getStatus() - .appendNewState(new ApplicationState(ApplicationStateSummary.DRIVER_REQUESTED, - Constants.DriverRequestedMessage)); - statusRecorder.persistStatus(context, updatedStatus); - return completeAndDefaultRequeue(); - } catch (Exception e) { - if (log.isErrorEnabled()) { - log.error("Failed to request driver resource.", e); - } - String errorMessage = ScheduleFailureMessage + - " StackTrace: " + - buildGeneralErrorMessage(e); - statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() - .appendNewState(new ApplicationState(ApplicationStateSummary.SCHEDULING_FAILURE, - errorMessage))); + } + Optional driverPod = + SparkReconcilerUtils.getOrCreateSecondaryResource(context.getClient(), + context.getDriverPodSpec()); + if (driverPod.isPresent()) { + DriverResourceDecorator decorator = new DriverResourceDecorator(driverPod.get()); + createdPreResources.forEach(decorator::decorate); + context.getClient().resourceList(createdPreResources).forceConflicts() + .serverSideApply(); + List driverResources = context.getDriverResourcesSpec(); + driverResources.forEach(decorator::decorate); + for (HasMetadata resource : driverResources) { + Optional createdResource = + SparkReconcilerUtils.getOrCreateSecondaryResource(context.getClient(), + resource); + if (createdResource.isEmpty()) { + updateStatusForCreationFailure(context, resource, statusRecorder); return completeAndImmediateRequeue(); + } } + } + ApplicationStatus updatedStatus = context.getSparkApplication().getStatus() + .appendNewState(new ApplicationState(ApplicationStateSummary.DRIVER_REQUESTED, + Constants.DriverRequestedMessage)); + statusRecorder.persistStatus(context, updatedStatus); + return completeAndDefaultRequeue(); + } catch (Exception e) { + if (log.isErrorEnabled()) { + log.error("Failed to request driver resource.", e); + } + String errorMessage = ScheduleFailureMessage + + " StackTrace: " + + buildGeneralErrorMessage(e); + statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() + .appendNewState(new ApplicationState(ApplicationStateSummary.SCHEDULING_FAILURE, + errorMessage))); + return completeAndImmediateRequeue(); } + } - private void updateStatusForCreationFailure(SparkApplicationContext context, - HasMetadata resourceSpec, - StatusRecorder statusRecorder) { - if (log.isErrorEnabled()) { - log.error("Failed all attempts to request driver resource {}.", - resourceSpec.getMetadata()); - } - statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() - .appendNewState(new ApplicationState(ApplicationStateSummary.SCHEDULING_FAILURE, - "Failed to request resource for driver with kind: " - + resourceSpec.getKind() - + ", name: " - + resourceSpec.getMetadata().getName()))); + private void updateStatusForCreationFailure(SparkApplicationContext context, + HasMetadata resourceSpec, + StatusRecorder statusRecorder) { + if (log.isErrorEnabled()) { + log.error("Failed all attempts to request driver resource {}.", + resourceSpec.getMetadata()); } + statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() + .appendNewState(new ApplicationState(ApplicationStateSummary.SCHEDULING_FAILURE, + "Failed to request resource for driver with kind: " + + resourceSpec.getKind() + + ", name: " + + resourceSpec.getMetadata().getName()))); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java index 6a1a5eef..8e792901 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java @@ -18,7 +18,12 @@ package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + import io.fabric8.kubernetes.api.model.Pod; + import org.apache.spark.kubernetes.operator.SparkApplication; import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; @@ -27,10 +32,6 @@ import org.apache.spark.kubernetes.operator.status.ApplicationStatus; import org.apache.spark.kubernetes.operator.utils.StatusRecorder; -import java.util.List; -import java.util.Optional; -import java.util.stream.Collectors; - import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndImmediateRequeue; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.proceed; import static org.apache.spark.kubernetes.operator.utils.ApplicationStatusUtils.driverUnexpectedRemoved; @@ -39,35 +40,35 @@ * Basic reconcile step for application */ public abstract class AppReconcileStep { - public abstract ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder); + public abstract ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder); - protected ReconcileProgress observeDriver(final SparkApplicationContext context, - final StatusRecorder statusRecorder, - final List observers) { - Optional driverPodOptional = context.getDriverPod(); - SparkApplication app = context.getSparkApplication(); - ApplicationStatus currentStatus = app.getStatus(); - if (driverPodOptional.isPresent()) { - List stateUpdates = observers.stream() - .map(o -> o.observe(driverPodOptional.get(), app.getSpec(), app.getStatus())) - .filter(Optional::isPresent) - .map(Optional::get) - .collect(Collectors.toList()); - if (stateUpdates.isEmpty()) { - return proceed(); - } else { - for (ApplicationState state : stateUpdates) { - currentStatus = currentStatus.appendNewState(state); - } - statusRecorder.persistStatus(context, currentStatus); - return completeAndImmediateRequeue(); - } - } else { - ApplicationStatus updatedStatus = - currentStatus.appendNewState(driverUnexpectedRemoved()); - statusRecorder.persistStatus(context, updatedStatus); - return completeAndImmediateRequeue(); + protected ReconcileProgress observeDriver(final SparkApplicationContext context, + final StatusRecorder statusRecorder, + final List observers) { + Optional driverPodOptional = context.getDriverPod(); + SparkApplication app = context.getSparkApplication(); + ApplicationStatus currentStatus = app.getStatus(); + if (driverPodOptional.isPresent()) { + List stateUpdates = observers.stream() + .map(o -> o.observe(driverPodOptional.get(), app.getSpec(), app.getStatus())) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + if (stateUpdates.isEmpty()) { + return proceed(); + } else { + for (ApplicationState state : stateUpdates) { + currentStatus = currentStatus.appendNewState(state); } + statusRecorder.persistStatus(context, currentStatus); + return completeAndImmediateRequeue(); + } + } else { + ApplicationStatus updatedStatus = + currentStatus.appendNewState(driverUnexpectedRemoved()); + statusRecorder.persistStatus(context, updatedStatus); + return completeAndImmediateRequeue(); } + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java index d303bfca..b61662bb 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java @@ -18,25 +18,26 @@ package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; +import java.util.List; + import lombok.RequiredArgsConstructor; + import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; import org.apache.spark.kubernetes.operator.reconciler.observers.BaseAppDriverObserver; import org.apache.spark.kubernetes.operator.utils.StatusRecorder; -import java.util.List; - /** * Observes secondary resource and update app status if needed */ @RequiredArgsConstructor public class AppResourceObserveStep extends AppReconcileStep { - private final List observers; + private final List observers; - @Override - public ReconcileProgress reconcile(final SparkApplicationContext context, - final StatusRecorder statusRecorder) { - return observeDriver(context, statusRecorder, observers); - } + @Override + public ReconcileProgress reconcile(final SparkApplicationContext context, + final StatusRecorder statusRecorder) { + return observeDriver(context, statusRecorder, observers); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java index 16a076f8..651cb1ba 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java @@ -18,7 +18,11 @@ package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; +import java.util.Collections; +import java.util.Set; + import io.fabric8.kubernetes.api.model.Pod; + import org.apache.spark.kubernetes.operator.Constants; import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; @@ -29,67 +33,64 @@ import org.apache.spark.kubernetes.operator.utils.PodUtils; import org.apache.spark.kubernetes.operator.utils.StatusRecorder; -import java.util.Collections; -import java.util.Set; - import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndDefaultRequeue; /** * Observe whether app acquires enough executors as configured in spec */ public class AppRunningStep extends AppReconcileStep { - @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - InstanceConfig instanceConfig = - context.getSparkApplication().getSpec().getApplicationTolerations() - .getInstanceConfig(); - ApplicationStateSummary prevStateSummary = - context.getSparkApplication().getStatus().getCurrentState() - .getCurrentStateSummary(); - ApplicationStateSummary proposedStateSummary; - String stateMessage = - context.getSparkApplication().getStatus().getCurrentState().getMessage(); - if (instanceConfig == null - || instanceConfig.getInitExecutors() == 0L - || (!prevStateSummary.isStarting() && instanceConfig.getMinExecutors() == 0L)) { - proposedStateSummary = ApplicationStateSummary.RUNNING_HEALTHY; - stateMessage = Constants.RunningHealthyMessage; + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + InstanceConfig instanceConfig = + context.getSparkApplication().getSpec().getApplicationTolerations() + .getInstanceConfig(); + ApplicationStateSummary prevStateSummary = + context.getSparkApplication().getStatus().getCurrentState() + .getCurrentStateSummary(); + ApplicationStateSummary proposedStateSummary; + String stateMessage = + context.getSparkApplication().getStatus().getCurrentState().getMessage(); + if (instanceConfig == null + || instanceConfig.getInitExecutors() == 0L + || (!prevStateSummary.isStarting() && instanceConfig.getMinExecutors() == 0L)) { + proposedStateSummary = ApplicationStateSummary.RUNNING_HEALTHY; + stateMessage = Constants.RunningHealthyMessage; + } else { + Set executors = context.getExecutorsForApplication(); + long runningExecutors = executors.stream() + .filter(PodUtils::isPodReady) + .count(); + if (prevStateSummary.isStarting()) { + if (runningExecutors >= instanceConfig.getInitExecutors()) { + proposedStateSummary = ApplicationStateSummary.RUNNING_HEALTHY; + stateMessage = Constants.RunningHealthyMessage; + } else if (runningExecutors > 0L) { + proposedStateSummary = + ApplicationStateSummary.INITIALIZED_BELOW_THRESHOLD_EXECUTORS; + stateMessage = Constants.InitializedWithBelowThresholdExecutorsMessage; } else { - Set executors = context.getExecutorsForApplication(); - long runningExecutors = executors.stream() - .filter(PodUtils::isPodReady) - .count(); - if (prevStateSummary.isStarting()) { - if (runningExecutors >= instanceConfig.getInitExecutors()) { - proposedStateSummary = ApplicationStateSummary.RUNNING_HEALTHY; - stateMessage = Constants.RunningHealthyMessage; - } else if (runningExecutors > 0L) { - proposedStateSummary = - ApplicationStateSummary.INITIALIZED_BELOW_THRESHOLD_EXECUTORS; - stateMessage = Constants.InitializedWithBelowThresholdExecutorsMessage; - } else { - // keep previous state for 0 executor - proposedStateSummary = prevStateSummary; - } - } else { - if (runningExecutors >= instanceConfig.getMinExecutors()) { - proposedStateSummary = ApplicationStateSummary.RUNNING_HEALTHY; - stateMessage = Constants.RunningHealthyMessage; - } else { - proposedStateSummary = - ApplicationStateSummary.RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS; - stateMessage = Constants.RunningWithBelowThresholdExecutorsMessage; - } - } + // keep previous state for 0 executor + proposedStateSummary = prevStateSummary; } - if (!proposedStateSummary.equals(prevStateSummary)) { - statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() - .appendNewState(new ApplicationState(proposedStateSummary, stateMessage))); - return completeAndDefaultRequeue(); + } else { + if (runningExecutors >= instanceConfig.getMinExecutors()) { + proposedStateSummary = ApplicationStateSummary.RUNNING_HEALTHY; + stateMessage = Constants.RunningHealthyMessage; } else { - return observeDriver(context, statusRecorder, - Collections.singletonList(new AppDriverRunningObserver())); + proposedStateSummary = + ApplicationStateSummary.RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS; + stateMessage = Constants.RunningWithBelowThresholdExecutorsMessage; } + } + } + if (!proposedStateSummary.equals(prevStateSummary)) { + statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() + .appendNewState(new ApplicationState(proposedStateSummary, stateMessage))); + return completeAndDefaultRequeue(); + } else { + return observeDriver(context, statusRecorder, + Collections.singletonList(new AppDriverRunningObserver())); } + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java index ed253d82..d5d91868 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java @@ -28,14 +28,14 @@ * Observes whether app is already terminated. If so, end the reconcile. */ public class AppTerminatedStep extends AppReconcileStep { - @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - if (context.getSparkApplication().getStatus().getCurrentState().getCurrentStateSummary() - .isTerminated()) { - statusRecorder.removeCachedStatus(context.getSparkApplication()); - return ReconcileProgress.completeAndNoRequeue(); - } - return proceed(); + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + if (context.getSparkApplication().getStatus().getCurrentState().getCurrentStateSummary() + .isTerminated()) { + statusRecorder.removeCachedStatus(context.getSparkApplication()); + return ReconcileProgress.completeAndNoRequeue(); } + return proceed(); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java index 1bbb3012..afd0c755 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java @@ -19,6 +19,7 @@ package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; import lombok.extern.slf4j.Slf4j; + import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; import org.apache.spark.kubernetes.operator.spec.DeploymentMode; @@ -36,20 +37,20 @@ */ @Slf4j public class AppValidateStep extends AppReconcileStep { - @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - if (!isValidApplicationStatus(context.getSparkApplication())) { - log.warn("Spark application found with empty status. Resetting to initial state."); - statusRecorder.persistStatus(context, new ApplicationStatus()); - } - if (DeploymentMode.ClientMode.equals(context.getSparkApplication().getSpec())) { - ApplicationState failure = new ApplicationState(ApplicationStateSummary.FAILED, - "Client mode is not supported yet."); - statusRecorder.persistStatus(context, - context.getSparkApplication().getStatus().appendNewState(failure)); - return completeAndImmediateRequeue(); - } - return proceed(); + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + if (!isValidApplicationStatus(context.getSparkApplication())) { + log.warn("Spark application found with empty status. Resetting to initial state."); + statusRecorder.persistStatus(context, new ApplicationStatus()); + } + if (DeploymentMode.ClientMode.equals(context.getSparkApplication().getSpec())) { + ApplicationState failure = new ApplicationState(ApplicationStateSummary.FAILED, + "Client mode is not supported yet."); + statusRecorder.persistStatus(context, + context.getSparkApplication().getStatus().appendNewState(failure)); + return completeAndImmediateRequeue(); } + return proceed(); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java index becc747a..37200e02 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java @@ -18,30 +18,31 @@ package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; +import java.util.Optional; + import io.fabric8.kubernetes.api.model.Pod; + import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; import org.apache.spark.kubernetes.operator.utils.StatusRecorder; -import java.util.Optional; - import static org.apache.spark.kubernetes.operator.Constants.UnknownStateMessage; /** * Abnormal state handler */ public class UnknownStateStep extends AppReconcileStep { - @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - ApplicationState state = - new ApplicationState(ApplicationStateSummary.FAILED, UnknownStateMessage); - Optional driver = context.getDriverPod(); - driver.ifPresent(pod -> state.setLastObservedDriverStatus(pod.getStatus())); - statusRecorder.persistStatus(context, - context.getSparkApplication().getStatus().appendNewState(state)); - return ReconcileProgress.completeAndImmediateRequeue(); - } + @Override + public ReconcileProgress reconcile(SparkApplicationContext context, + StatusRecorder statusRecorder) { + ApplicationState state = + new ApplicationState(ApplicationStateSummary.FAILED, UnknownStateMessage); + Optional driver = context.getDriverPod(); + driver.ifPresent(pod -> state.setLastObservedDriverStatus(pod.getStatus())); + statusRecorder.persistStatus(context, + context.getSparkApplication().getStatus().appendNewState(state)); + return ReconcileProgress.completeAndImmediateRequeue(); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java index dcd1bafe..7817ad2f 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java @@ -28,45 +28,45 @@ */ public class ApplicationStatusUtils { - public static boolean isValidApplicationStatus(SparkApplication app) { - // null check - return app.getStatus() != null - && app.getStatus().getCurrentState() != null - && app.getStatus().getCurrentState().getCurrentStateSummary() != null; - } + public static boolean isValidApplicationStatus(SparkApplication app) { + // null check + return app.getStatus() != null + && app.getStatus().getCurrentState() != null + && app.getStatus().getCurrentState().getCurrentStateSummary() != null; + } - public static ApplicationState driverUnexpectedRemoved() { - return new ApplicationState(ApplicationStateSummary.FAILED, - Constants.DriverUnexpectedRemovedMessage); - } + public static ApplicationState driverUnexpectedRemoved() { + return new ApplicationState(ApplicationStateSummary.FAILED, + Constants.DriverUnexpectedRemovedMessage); + } - public static ApplicationState driverLaunchTimedOut() { - return new ApplicationState(ApplicationStateSummary.DRIVER_LAUNCH_TIMED_OUT, - Constants.DriverLaunchTimeoutMessage); - } + public static ApplicationState driverLaunchTimedOut() { + return new ApplicationState(ApplicationStateSummary.DRIVER_LAUNCH_TIMED_OUT, + Constants.DriverLaunchTimeoutMessage); + } - public static ApplicationState driverReadyTimedOut() { - return new ApplicationState(ApplicationStateSummary.SPARK_SESSION_INITIALIZATION_TIMED_OUT, - Constants.DriverLaunchTimeoutMessage); - } + public static ApplicationState driverReadyTimedOut() { + return new ApplicationState(ApplicationStateSummary.SPARK_SESSION_INITIALIZATION_TIMED_OUT, + Constants.DriverLaunchTimeoutMessage); + } - public static ApplicationState executorLaunchTimedOut() { - return new ApplicationState(ApplicationStateSummary.EXECUTORS_LAUNCH_TIMED_OUT, - Constants.ExecutorLaunchTimeoutMessage); - } + public static ApplicationState executorLaunchTimedOut() { + return new ApplicationState(ApplicationStateSummary.EXECUTORS_LAUNCH_TIMED_OUT, + Constants.ExecutorLaunchTimeoutMessage); + } - public static ApplicationState appCancelled() { - return new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, - Constants.AppCancelledMessage); - } + public static ApplicationState appCancelled() { + return new ApplicationState(ApplicationStateSummary.RESOURCE_RELEASED, + Constants.AppCancelledMessage); + } - public static boolean hasReachedState(SparkApplication application, - ApplicationState stateToCheck) { - if (!isValidApplicationStatus(application)) { - return false; - } - return application.getStatus().getStateTransitionHistory().keySet().parallelStream() - .anyMatch(stateId -> stateToCheck.equals( - application.getStatus().getStateTransitionHistory().get(stateId))); + public static boolean hasReachedState(SparkApplication application, + ApplicationState stateToCheck) { + if (!isValidApplicationStatus(application)) { + return false; } + return application.getStatus().getStateTransitionHistory().keySet().parallelStream() + .anyMatch(stateId -> stateToCheck.equals( + application.getStatus().getStateTransitionHistory().get(stateId))); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java index 80f5d621..868359fe 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java @@ -18,58 +18,59 @@ package org.apache.spark.kubernetes.operator.utils; -import org.apache.commons.lang3.StringUtils; -import org.apache.spark.kubernetes.operator.SparkApplication; -import org.slf4j.MDC; - import java.util.HashSet; import java.util.Set; import java.util.concurrent.locks.ReentrantLock; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.MDC; + +import org.apache.spark.kubernetes.operator.SparkApplication; + public class LoggingUtils { - public static final class TrackedMDC { - public static final String NamespaceKey = "app_namespace"; - public static final String NameKey = "app_name"; - public static final String UuidKey = "app_uuid"; - public static final String GenerationKey = "app_generation"; - private final ReentrantLock lock = new ReentrantLock(); - private Set keys = new HashSet<>(); + public static final class TrackedMDC { + public static final String NamespaceKey = "app_namespace"; + public static final String NameKey = "app_name"; + public static final String UuidKey = "app_uuid"; + public static final String GenerationKey = "app_generation"; + private final ReentrantLock lock = new ReentrantLock(); + private Set keys = new HashSet<>(); - public void set(final SparkApplication application) { - if (application != null && application.getMetadata() != null) { - try { - lock.lock(); - if (StringUtils.isNotEmpty(application.getMetadata().getNamespace())) { - MDC.put(NamespaceKey, application.getMetadata().getNamespace()); - keys.add(NamespaceKey); - } - if (StringUtils.isNotEmpty(application.getMetadata().getName())) { - MDC.put(NameKey, application.getMetadata().getName()); - keys.add(NameKey); - } - if (StringUtils.isNotEmpty(application.getMetadata().getUid())) { - MDC.put(UuidKey, application.getMetadata().getUid()); - keys.add(UuidKey); - } - MDC.put(GenerationKey, - String.valueOf(application.getMetadata().getGeneration())); - keys.add(GenerationKey); - } finally { - lock.unlock(); - } - } + public void set(final SparkApplication application) { + if (application != null && application.getMetadata() != null) { + try { + lock.lock(); + if (StringUtils.isNotEmpty(application.getMetadata().getNamespace())) { + MDC.put(NamespaceKey, application.getMetadata().getNamespace()); + keys.add(NamespaceKey); + } + if (StringUtils.isNotEmpty(application.getMetadata().getName())) { + MDC.put(NameKey, application.getMetadata().getName()); + keys.add(NameKey); + } + if (StringUtils.isNotEmpty(application.getMetadata().getUid())) { + MDC.put(UuidKey, application.getMetadata().getUid()); + keys.add(UuidKey); + } + MDC.put(GenerationKey, + String.valueOf(application.getMetadata().getGeneration())); + keys.add(GenerationKey); + } finally { + lock.unlock(); } + } + } - public void reset() { - try { - lock.lock(); - for (String mdcKey : keys) { - MDC.remove(mdcKey); - } - keys.clear(); - } finally { - lock.unlock(); - } + public void reset() { + try { + lock.lock(); + for (String mdcKey : keys) { + MDC.remove(mdcKey); } + keys.clear(); + } finally { + lock.unlock(); + } } + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodPhase.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodPhase.java index a430928e..64ce47a7 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodPhase.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodPhase.java @@ -21,29 +21,29 @@ import io.fabric8.kubernetes.api.model.Pod; public enum PodPhase { - // hope this is provided by k8s client in future - PENDING("pending"), - RUNNING("running"), - FAILED("failed"), - SUCCEEDED("succeeded"), - TERMINATING("terminating"), - UNKNOWN("unknown"); + // hope this is provided by k8s client in future + PENDING("pending"), + RUNNING("running"), + FAILED("failed"), + SUCCEEDED("succeeded"), + TERMINATING("terminating"), + UNKNOWN("unknown"); - private final String description; + private final String description; - PodPhase(String description) { - this.description = description; - } + PodPhase(String description) { + this.description = description; + } - public static PodPhase getPhase(final Pod pod) { - if (pod != null && pod.getStatus() != null) { - for (PodPhase podPhase : values()) { - if (podPhase.description.equalsIgnoreCase(pod.getStatus().getPhase())) { - return podPhase; - } - } + public static PodPhase getPhase(final Pod pod) { + if (pod != null && pod.getStatus() != null) { + for (PodPhase podPhase : values()) { + if (podPhase.description.equalsIgnoreCase(pod.getStatus().getPhase())) { + return podPhase; } - return UNKNOWN; + } } + return UNKNOWN; + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodUtils.java index 233b50ce..41a1e622 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodUtils.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/PodUtils.java @@ -18,64 +18,65 @@ package org.apache.spark.kubernetes.operator.utils; +import java.util.List; + import io.fabric8.kubernetes.api.model.ContainerStatus; import io.fabric8.kubernetes.api.model.Pod; -import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; -import java.util.List; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; import static org.apache.spark.kubernetes.operator.utils.ModelUtils.isDriverMainContainer; public class PodUtils { - public static boolean isPodReady(final Pod pod) { - if (pod == null || pod.getStatus() == null - || pod.getStatus().getConditions() == null - || pod.getStatus().getConditions().isEmpty()) { - return false; - } - return pod.getStatus().getConditions().parallelStream() - .anyMatch(condition -> "ready".equalsIgnoreCase(condition.getType()) - && "true".equalsIgnoreCase(condition.getStatus())); + public static boolean isPodReady(final Pod pod) { + if (pod == null || pod.getStatus() == null + || pod.getStatus().getConditions() == null + || pod.getStatus().getConditions().isEmpty()) { + return false; } + return pod.getStatus().getConditions().parallelStream() + .anyMatch(condition -> "ready".equalsIgnoreCase(condition.getType()) + && "true".equalsIgnoreCase(condition.getStatus())); + } - public static boolean isPodStarted(final Pod driver, - final ApplicationSpec spec) { - // Consider pod as 'started' if any of Spark container is started and ready - if (driver == null || driver.getStatus() == null - || driver.getStatus().getContainerStatuses() == null - || driver.getStatus().getContainerStatuses().isEmpty()) { - return false; - } - - List containerStatusList = driver.getStatus().getContainerStatuses(); + public static boolean isPodStarted(final Pod driver, + final ApplicationSpec spec) { + // Consider pod as 'started' if any of Spark container is started and ready + if (driver == null || driver.getStatus() == null + || driver.getStatus().getContainerStatuses() == null + || driver.getStatus().getContainerStatuses().isEmpty()) { + return false; + } - // If there's only one container in given pod, evaluate it - // Otherwise, use the provided name as filter. - if (containerStatusList.size() == 1) { - return containerStatusList.get(0).getReady(); - } + List containerStatusList = driver.getStatus().getContainerStatuses(); - return containerStatusList - .stream() - .filter(c -> isDriverMainContainer(spec, c.getName())) - .anyMatch(ContainerStatus::getReady); + // If there's only one container in given pod, evaluate it + // Otherwise, use the provided name as filter. + if (containerStatusList.size() == 1) { + return containerStatusList.get(0).getReady(); } - public static boolean isContainerExited(final ContainerStatus containerStatus) { - return containerStatus != null - && containerStatus.getState() != null - && containerStatus.getState().getTerminated() != null; - } + return containerStatusList + .stream() + .filter(c -> isDriverMainContainer(spec, c.getName())) + .anyMatch(ContainerStatus::getReady); + } - public static boolean isContainerRestarted(final ContainerStatus containerStatus) { - return containerStatus != null - && containerStatus.getRestartCount() > 0; - } + public static boolean isContainerExited(final ContainerStatus containerStatus) { + return containerStatus != null + && containerStatus.getState() != null + && containerStatus.getState().getTerminated() != null; + } - public static boolean isContainerFailed(final ContainerStatus containerStatus) { - return isContainerExited(containerStatus) - && containerStatus.getState().getTerminated().getExitCode() > 0; - } + public static boolean isContainerRestarted(final ContainerStatus containerStatus) { + return containerStatus != null + && containerStatus.getRestartCount() > 0; + } + + public static boolean isContainerFailed(final ContainerStatus containerStatus) { + return isContainerExited(containerStatus) + && containerStatus.getState().getTerminated().getExitCode() > 0; + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ProbeUtil.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ProbeUtil.java index 61e18e5a..dd6e5308 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ProbeUtil.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ProbeUtil.java @@ -17,38 +17,38 @@ package org.apache.spark.kubernetes.operator.utils; -import com.sun.net.httpserver.HttpExchange; -import io.javaoperatorsdk.operator.Operator; -import lombok.extern.slf4j.Slf4j; - import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Optional; +import com.sun.net.httpserver.HttpExchange; +import io.javaoperatorsdk.operator.Operator; +import lombok.extern.slf4j.Slf4j; + @Slf4j public class ProbeUtil { - public static void sendMessage(HttpExchange httpExchange, int code, String message) - throws IOException { - try (var outputStream = httpExchange.getResponseBody()) { - var bytes = message.getBytes(StandardCharsets.UTF_8); - httpExchange.sendResponseHeaders(code, bytes.length); - outputStream.write(bytes); - outputStream.flush(); - } + public static void sendMessage(HttpExchange httpExchange, int code, String message) + throws IOException { + try (var outputStream = httpExchange.getResponseBody()) { + var bytes = message.getBytes(StandardCharsets.UTF_8); + httpExchange.sendResponseHeaders(code, bytes.length); + outputStream.write(bytes); + outputStream.flush(); } + } - public static Optional areOperatorsStarted(List operators) { - return operators.stream().map(operator -> { - var runtimeInfo = operator.getRuntimeInfo(); - if (runtimeInfo != null) { - if (!operator.getRuntimeInfo().isStarted()) { - log.error("Operator is not running"); - return false; - } - return true; - } - return false; - }).reduce((a, b) -> a && b); - } + public static Optional areOperatorsStarted(List operators) { + return operators.stream().map(operator -> { + var runtimeInfo = operator.getRuntimeInfo(); + if (runtimeInfo != null) { + if (!operator.getRuntimeInfo().isStarted()) { + log.error("Operator is not running"); + return false; + } + return true; + } + return false; + }).reduce((a, b) -> a && b); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkExceptionUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkExceptionUtils.java index 7ba037de..a814d4e1 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkExceptionUtils.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkExceptionUtils.java @@ -23,16 +23,16 @@ import org.apache.commons.lang3.exception.ExceptionUtils; public class SparkExceptionUtils { - public static boolean isConflictForExistingResource(KubernetesClientException e) { - return e != null && - e.getCode() == 409 && - e.getStatus() != null && - StringUtils.isNotEmpty(e.getStatus().toString()) && - e.getStatus().toString().toLowerCase().contains("alreadyexists"); - } + public static boolean isConflictForExistingResource(KubernetesClientException e) { + return e != null && + e.getCode() == 409 && + e.getStatus() != null && + StringUtils.isNotEmpty(e.getStatus().toString()) && + e.getStatus().toString().toLowerCase().contains("alreadyexists"); + } - public static String buildGeneralErrorMessage(Exception e) { - return ExceptionUtils.getStackTrace(e); - } + public static String buildGeneralErrorMessage(Exception e) { + return ExceptionUtils.getStackTrace(e); + } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java index 6fef4752..56b055b7 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java @@ -18,6 +18,10 @@ package org.apache.spark.kubernetes.operator.utils; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; @@ -26,15 +30,12 @@ import io.javaoperatorsdk.operator.processing.event.ResourceID; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; + import org.apache.spark.kubernetes.operator.SparkApplication; import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; import org.apache.spark.kubernetes.operator.listeners.ApplicationStatusListener; import org.apache.spark.kubernetes.operator.status.ApplicationStatus; -import java.util.List; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; - import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.StatusPatchFailureBackoffSeconds; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.StatusPatchMaxRetry; @@ -43,158 +44,158 @@ * Note - this is inspired by * Flink Operator Status Recorder * - * Enables additional (extendable) observers for Spark App status. - * Cache & version locking might be removed in future version as batch app does not expect - * spec change after submitted. + * Enables additional (extendable) observers for Spark App status. + * Cache & version locking might be removed in future version as batch app does not expect + * spec change after submitted. */ @Slf4j public class StatusRecorder { - protected final List appStatusListeners; - protected final ObjectMapper objectMapper = new ObjectMapper(); - protected final ConcurrentHashMap statusCache; - - public StatusRecorder(List appStatusListeners) { - this.appStatusListeners = appStatusListeners; - this.statusCache = new ConcurrentHashMap<>(); + protected final List appStatusListeners; + protected final ObjectMapper objectMapper = new ObjectMapper(); + protected final ConcurrentHashMap statusCache; + + public StatusRecorder(List appStatusListeners) { + this.appStatusListeners = appStatusListeners; + this.statusCache = new ConcurrentHashMap<>(); + } + + /** + * Update the status of the provided kubernetes resource on the k8s cluster. We use patch + * together with null resourceVersion to try to guarantee that the status update succeeds even + * if the underlying resource spec was update in the meantime. This is necessary for the correct + * operator behavior. + * + * @param resource Resource for which status update should be performed + */ + @SneakyThrows + private void patchAndCacheStatus(SparkApplication resource, KubernetesClient client) { + ObjectNode newStatusNode = + objectMapper.convertValue(resource.getStatus(), ObjectNode.class); + ResourceID resourceId = ResourceID.fromResource(resource); + ObjectNode previousStatusNode = statusCache.get(resourceId); + + if (newStatusNode.equals(previousStatusNode)) { + log.debug("No status change."); + return; } - /** - * Update the status of the provided kubernetes resource on the k8s cluster. We use patch - * together with null resourceVersion to try to guarantee that the status update succeeds even - * if the underlying resource spec was update in the meantime. This is necessary for the correct - * operator behavior. - * - * @param resource Resource for which status update should be performed - */ - @SneakyThrows - private void patchAndCacheStatus(SparkApplication resource, KubernetesClient client) { - ObjectNode newStatusNode = - objectMapper.convertValue(resource.getStatus(), ObjectNode.class); - ResourceID resourceId = ResourceID.fromResource(resource); - ObjectNode previousStatusNode = statusCache.get(resourceId); - - if (newStatusNode.equals(previousStatusNode)) { - log.debug("No status change."); - return; - } - - ApplicationStatus prevStatus = - objectMapper.convertValue(previousStatusNode, ApplicationStatus.class); - - Exception err = null; - for (long i = 0; i < StatusPatchMaxRetry.getValue(); i++) { - // We retry the status update 3 times to avoid some intermittent connectivity errors - try { - replaceStatus(resource, prevStatus, client); - err = null; - } catch (KubernetesClientException e) { - log.error("Error while patching status, retrying {}/3...", (i + 1), e); - Thread.sleep( - TimeUnit.SECONDS.toMillis(StatusPatchFailureBackoffSeconds.getValue())); - err = e; - } - } - - if (err != null) { - throw err; - } - - statusCache.put(resourceId, newStatusNode); - appStatusListeners.forEach(listener -> { - listener.listenStatus(resource, prevStatus, resource.getStatus()); - }); + ApplicationStatus prevStatus = + objectMapper.convertValue(previousStatusNode, ApplicationStatus.class); + + Exception err = null; + for (long i = 0; i < StatusPatchMaxRetry.getValue(); i++) { + // We retry the status update 3 times to avoid some intermittent connectivity errors + try { + replaceStatus(resource, prevStatus, client); + err = null; + } catch (KubernetesClientException e) { + log.error("Error while patching status, retrying {}/3...", (i + 1), e); + Thread.sleep( + TimeUnit.SECONDS.toMillis(StatusPatchFailureBackoffSeconds.getValue())); + err = e; + } } - public void persistStatus(SparkApplicationContext context, - ApplicationStatus newStatus) { - context.getSparkApplication().setStatus(newStatus); - patchAndCacheStatus(context.getSparkApplication(), context.getClient()); + if (err != null) { + throw err; } - private void replaceStatus(SparkApplication resource, ApplicationStatus prevStatus, - KubernetesClient client) - throws JsonProcessingException { - int retries = 0; - while (true) { - try { - var updated = client.resource(resource).lockResourceVersion().updateStatus(); - - // If we successfully replaced the status, update the resource version so we know - // what to lock next in the same reconciliation loop - resource.getMetadata() - .setResourceVersion(updated.getMetadata().getResourceVersion()); - return; - } catch (KubernetesClientException kce) { - // 409 is the error code for conflicts resulting from the locking - if (kce.getCode() == 409) { - var currentVersion = resource.getMetadata().getResourceVersion(); - log.debug( - "Could not apply status update for resource version {}", - currentVersion); - - var latest = client.resource(resource).get(); - var latestVersion = latest.getMetadata().getResourceVersion(); - - if (latestVersion.equals(currentVersion)) { - // This should not happen as long as the client works consistently - log.error("Unable to fetch latest resource version"); - throw kce; - } - - if (latest.getStatus().equals(prevStatus)) { - if (retries++ < 3) { - log.debug( - "Retrying status update for latest version {}", latestVersion); - resource.getMetadata().setResourceVersion(latestVersion); - } else { - // If we cannot get the latest version in 3 tries we throw the error to - // retry with delay - throw kce; - } - } else { - throw new RuntimeException( - "Status have been modified externally in version " - + latestVersion - + " Previous: " - + objectMapper.writeValueAsString(prevStatus) - + " Latest: " - + objectMapper.writeValueAsString(latest.getStatus()), kce); - } - } else { - // We simply throw non conflict errors, to trigger retry with delay - throw kce; - } + statusCache.put(resourceId, newStatusNode); + appStatusListeners.forEach(listener -> { + listener.listenStatus(resource, prevStatus, resource.getStatus()); + }); + } + + public void persistStatus(SparkApplicationContext context, + ApplicationStatus newStatus) { + context.getSparkApplication().setStatus(newStatus); + patchAndCacheStatus(context.getSparkApplication(), context.getClient()); + } + + private void replaceStatus(SparkApplication resource, ApplicationStatus prevStatus, + KubernetesClient client) + throws JsonProcessingException { + int retries = 0; + while (true) { + try { + var updated = client.resource(resource).lockResourceVersion().updateStatus(); + + // If we successfully replaced the status, update the resource version so we know + // what to lock next in the same reconciliation loop + resource.getMetadata() + .setResourceVersion(updated.getMetadata().getResourceVersion()); + return; + } catch (KubernetesClientException kce) { + // 409 is the error code for conflicts resulting from the locking + if (kce.getCode() == 409) { + var currentVersion = resource.getMetadata().getResourceVersion(); + log.debug( + "Could not apply status update for resource version {}", + currentVersion); + + var latest = client.resource(resource).get(); + var latestVersion = latest.getMetadata().getResourceVersion(); + + if (latestVersion.equals(currentVersion)) { + // This should not happen as long as the client works consistently + log.error("Unable to fetch latest resource version"); + throw kce; + } + + if (latest.getStatus().equals(prevStatus)) { + if (retries++ < 3) { + log.debug( + "Retrying status update for latest version {}", latestVersion); + resource.getMetadata().setResourceVersion(latestVersion); + } else { + // If we cannot get the latest version in 3 tries we throw the error to + // retry with delay + throw kce; } - } - } - - /** - * Update the custom resource status based on the in-memory cached to ensure that any status - * updates that we made previously are always visible in the reconciliation loop. This is - * required due to our custom status patching logic. - * - *

If the cache doesn't have a status stored, we do no update. This happens when the operator - * reconciles a resource for the first time after a restart. - * - * @param resource Resource for which the status should be updated from the cache - */ - public void updateStatusFromCache(SparkApplication resource) { - var key = ResourceID.fromResource(resource); - var cachedStatus = statusCache.get(key); - if (cachedStatus != null) { - resource.setStatus( - objectMapper.convertValue( - cachedStatus, resource.getStatus().getClass())); + } else { + throw new RuntimeException( + "Status have been modified externally in version " + + latestVersion + + " Previous: " + + objectMapper.writeValueAsString(prevStatus) + + " Latest: " + + objectMapper.writeValueAsString(latest.getStatus()), kce); + } } else { - // Initialize cache with current status copy - statusCache.put(key, objectMapper.convertValue(resource.getStatus(), ObjectNode.class)); + // We simply throw non conflict errors, to trigger retry with delay + throw kce; } + } } - - /** - * Remove cached status - */ - public void removeCachedStatus(SparkApplication resource) { - statusCache.remove(ResourceID.fromResource(resource)); + } + + /** + * Update the custom resource status based on the in-memory cached to ensure that any status + * updates that we made previously are always visible in the reconciliation loop. This is + * required due to our custom status patching logic. + * + *

If the cache doesn't have a status stored, we do no update. This happens when the operator + * reconciles a resource for the first time after a restart. + * + * @param resource Resource for which the status should be updated from the cache + */ + public void updateStatusFromCache(SparkApplication resource) { + var key = ResourceID.fromResource(resource); + var cachedStatus = statusCache.get(key); + if (cachedStatus != null) { + resource.setStatus( + objectMapper.convertValue( + cachedStatus, resource.getStatus().getClass())); + } else { + // Initialize cache with current status copy + statusCache.put(key, objectMapper.convertValue(resource.getStatus(), ObjectNode.class)); } + } + + /** + * Remove cached status + */ + public void removeCachedStatus(SparkApplication resource) { + statusCache.remove(ResourceID.fromResource(resource)); + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/ConfigOptionTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/ConfigOptionTest.java index 09b84c18..2ef3683d 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/ConfigOptionTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/ConfigOptionTest.java @@ -18,164 +18,164 @@ package org.apache.spark.kubernetes.operator.config; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - import java.util.HashMap; import java.util.Map; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + class ConfigOptionTest { - @Test - void testResolveValueWithoutOverride() { - byte defaultByteValue = 9; - short defaultShortValue = 9; - long defaultLongValue = 9; - int defaultIntValue = 9; - float defaultFloatValue = 9.0f; - double defaultDoubleValue = 9.0; - boolean defaultBooleanValue = false; - String defaultStringValue = "bar"; - ConfigOption testStrConf = ConfigOption.builder() - .key("foo") - .typeParameterClass(String.class) - .description("foo foo.") - .defaultValue(defaultStringValue) - .build(); - ConfigOption testIntConf = ConfigOption.builder() - .key("fooint") - .typeParameterClass(Integer.class) - .description("foo foo.") - .defaultValue(defaultIntValue) - .build(); - ConfigOption testShortConf = ConfigOption.builder() - .key("fooshort") - .typeParameterClass(Short.class) - .description("foo foo.") - .defaultValue(defaultShortValue) - .build(); - ConfigOption testLongConf = ConfigOption.builder() - .key("foolong") - .typeParameterClass(Long.class) - .description("foo foo.") - .defaultValue(defaultLongValue) - .build(); - ConfigOption testBooleanConf = ConfigOption.builder() - .key("foobool") - .typeParameterClass(Boolean.class) - .description("foo foo.") - .defaultValue(defaultBooleanValue) - .build(); - ConfigOption testFloatConf = ConfigOption.builder() - .key("foofloat") - .typeParameterClass(Float.class) - .description("foo foo.") - .defaultValue(defaultFloatValue) - .build(); - ConfigOption testDoubleConf = ConfigOption.builder() - .key("foodouble") - .typeParameterClass(Double.class) - .description("foo foo.") - .defaultValue(defaultDoubleValue) - .build(); - ConfigOption testByteConf = ConfigOption.builder() - .key("foobyte") - .typeParameterClass(Byte.class) - .description("foo foo.") - .defaultValue(defaultByteValue) - .build(); - Assertions.assertEquals(defaultStringValue, testStrConf.getValue()); - Assertions.assertEquals(defaultIntValue, testIntConf.getValue()); - Assertions.assertEquals(defaultLongValue, testLongConf.getValue()); - Assertions.assertEquals(defaultBooleanValue, testBooleanConf.getValue()); - Assertions.assertEquals(defaultFloatValue, testFloatConf.getValue()); - Assertions.assertEquals(defaultByteValue, testByteConf.getValue()); - Assertions.assertEquals(defaultShortValue, testShortConf.getValue()); - Assertions.assertEquals(defaultDoubleValue, testDoubleConf.getValue()); - } + @Test + void testResolveValueWithoutOverride() { + byte defaultByteValue = 9; + short defaultShortValue = 9; + long defaultLongValue = 9; + int defaultIntValue = 9; + float defaultFloatValue = 9.0f; + double defaultDoubleValue = 9.0; + boolean defaultBooleanValue = false; + String defaultStringValue = "bar"; + ConfigOption testStrConf = ConfigOption.builder() + .key("foo") + .typeParameterClass(String.class) + .description("foo foo.") + .defaultValue(defaultStringValue) + .build(); + ConfigOption testIntConf = ConfigOption.builder() + .key("fooint") + .typeParameterClass(Integer.class) + .description("foo foo.") + .defaultValue(defaultIntValue) + .build(); + ConfigOption testShortConf = ConfigOption.builder() + .key("fooshort") + .typeParameterClass(Short.class) + .description("foo foo.") + .defaultValue(defaultShortValue) + .build(); + ConfigOption testLongConf = ConfigOption.builder() + .key("foolong") + .typeParameterClass(Long.class) + .description("foo foo.") + .defaultValue(defaultLongValue) + .build(); + ConfigOption testBooleanConf = ConfigOption.builder() + .key("foobool") + .typeParameterClass(Boolean.class) + .description("foo foo.") + .defaultValue(defaultBooleanValue) + .build(); + ConfigOption testFloatConf = ConfigOption.builder() + .key("foofloat") + .typeParameterClass(Float.class) + .description("foo foo.") + .defaultValue(defaultFloatValue) + .build(); + ConfigOption testDoubleConf = ConfigOption.builder() + .key("foodouble") + .typeParameterClass(Double.class) + .description("foo foo.") + .defaultValue(defaultDoubleValue) + .build(); + ConfigOption testByteConf = ConfigOption.builder() + .key("foobyte") + .typeParameterClass(Byte.class) + .description("foo foo.") + .defaultValue(defaultByteValue) + .build(); + Assertions.assertEquals(defaultStringValue, testStrConf.getValue()); + Assertions.assertEquals(defaultIntValue, testIntConf.getValue()); + Assertions.assertEquals(defaultLongValue, testLongConf.getValue()); + Assertions.assertEquals(defaultBooleanValue, testBooleanConf.getValue()); + Assertions.assertEquals(defaultFloatValue, testFloatConf.getValue()); + Assertions.assertEquals(defaultByteValue, testByteConf.getValue()); + Assertions.assertEquals(defaultShortValue, testShortConf.getValue()); + Assertions.assertEquals(defaultDoubleValue, testDoubleConf.getValue()); + } - @Test - void testResolveValueWithOverride() { - byte overrideByteValue = 10; - short overrideShortValue = 10; - long overrideLongValue = 10; - int overrideIntValue = 10; - float overrideFloatValue = 10.0f; - double overrideDoubleValue = 10.0; - boolean overrideBooleanValue = true; - String overrideStringValue = "barbar"; - byte defaultByteValue = 9; - short defaultShortValue = 9; - long defaultLongValue = 9; - int defaultIntValue = 9; - float defaultFloatValue = 9.0f; - double defaultDoubleValue = 9.0; - boolean defaultBooleanValue = false; - String defaultStringValue = "bar"; - Map configOverride = new HashMap<>(); - configOverride.put("foobyte", "10"); - configOverride.put("fooshort", "10"); - configOverride.put("foolong", "10"); - configOverride.put("fooint", "10"); - configOverride.put("foofloat", "10.0"); - configOverride.put("foodouble", "10.0"); - configOverride.put("foobool", "true"); - configOverride.put("foo", "barbar"); - SparkOperatorConfManager.INSTANCE.refresh(configOverride); - ConfigOption testStrConf = ConfigOption.builder() - .key("foo") - .typeParameterClass(String.class) - .description("foo foo.") - .defaultValue(defaultStringValue) - .build(); - ConfigOption testIntConf = ConfigOption.builder() - .key("fooint") - .typeParameterClass(Integer.class) - .description("foo foo.") - .defaultValue(defaultIntValue) - .build(); - ConfigOption testShortConf = ConfigOption.builder() - .key("fooshort") - .typeParameterClass(Short.class) - .description("foo foo.") - .defaultValue(defaultShortValue) - .build(); - ConfigOption testLongConf = ConfigOption.builder() - .key("foolong") - .typeParameterClass(Long.class) - .description("foo foo.") - .defaultValue(defaultLongValue) - .build(); - ConfigOption testBooleanConf = ConfigOption.builder() - .key("foobool") - .typeParameterClass(Boolean.class) - .description("foo foo.") - .defaultValue(defaultBooleanValue) - .build(); - ConfigOption testFloatConf = ConfigOption.builder() - .key("foofloat") - .typeParameterClass(Float.class) - .description("foo foo.") - .defaultValue(defaultFloatValue) - .build(); - ConfigOption testDoubleConf = ConfigOption.builder() - .key("foodouble") - .typeParameterClass(Double.class) - .description("foo foo.") - .defaultValue(defaultDoubleValue) - .build(); - ConfigOption testByteConf = ConfigOption.builder() - .key("foobyte") - .typeParameterClass(Byte.class) - .description("foo foo.") - .defaultValue(defaultByteValue) - .build(); - Assertions.assertEquals(overrideStringValue, testStrConf.getValue()); - Assertions.assertEquals(overrideIntValue, testIntConf.getValue()); - Assertions.assertEquals(overrideLongValue, testLongConf.getValue()); - Assertions.assertEquals(overrideBooleanValue, testBooleanConf.getValue()); - Assertions.assertEquals(overrideFloatValue, testFloatConf.getValue()); - Assertions.assertEquals(overrideByteValue, testByteConf.getValue()); - Assertions.assertEquals(overrideShortValue, testShortConf.getValue()); - Assertions.assertEquals(overrideDoubleValue, testDoubleConf.getValue()); - } + @Test + void testResolveValueWithOverride() { + byte overrideByteValue = 10; + short overrideShortValue = 10; + long overrideLongValue = 10; + int overrideIntValue = 10; + float overrideFloatValue = 10.0f; + double overrideDoubleValue = 10.0; + boolean overrideBooleanValue = true; + String overrideStringValue = "barbar"; + byte defaultByteValue = 9; + short defaultShortValue = 9; + long defaultLongValue = 9; + int defaultIntValue = 9; + float defaultFloatValue = 9.0f; + double defaultDoubleValue = 9.0; + boolean defaultBooleanValue = false; + String defaultStringValue = "bar"; + Map configOverride = new HashMap<>(); + configOverride.put("foobyte", "10"); + configOverride.put("fooshort", "10"); + configOverride.put("foolong", "10"); + configOverride.put("fooint", "10"); + configOverride.put("foofloat", "10.0"); + configOverride.put("foodouble", "10.0"); + configOverride.put("foobool", "true"); + configOverride.put("foo", "barbar"); + SparkOperatorConfManager.INSTANCE.refresh(configOverride); + ConfigOption testStrConf = ConfigOption.builder() + .key("foo") + .typeParameterClass(String.class) + .description("foo foo.") + .defaultValue(defaultStringValue) + .build(); + ConfigOption testIntConf = ConfigOption.builder() + .key("fooint") + .typeParameterClass(Integer.class) + .description("foo foo.") + .defaultValue(defaultIntValue) + .build(); + ConfigOption testShortConf = ConfigOption.builder() + .key("fooshort") + .typeParameterClass(Short.class) + .description("foo foo.") + .defaultValue(defaultShortValue) + .build(); + ConfigOption testLongConf = ConfigOption.builder() + .key("foolong") + .typeParameterClass(Long.class) + .description("foo foo.") + .defaultValue(defaultLongValue) + .build(); + ConfigOption testBooleanConf = ConfigOption.builder() + .key("foobool") + .typeParameterClass(Boolean.class) + .description("foo foo.") + .defaultValue(defaultBooleanValue) + .build(); + ConfigOption testFloatConf = ConfigOption.builder() + .key("foofloat") + .typeParameterClass(Float.class) + .description("foo foo.") + .defaultValue(defaultFloatValue) + .build(); + ConfigOption testDoubleConf = ConfigOption.builder() + .key("foodouble") + .typeParameterClass(Double.class) + .description("foo foo.") + .defaultValue(defaultDoubleValue) + .build(); + ConfigOption testByteConf = ConfigOption.builder() + .key("foobyte") + .typeParameterClass(Byte.class) + .description("foo foo.") + .defaultValue(defaultByteValue) + .build(); + Assertions.assertEquals(overrideStringValue, testStrConf.getValue()); + Assertions.assertEquals(overrideIntValue, testIntConf.getValue()); + Assertions.assertEquals(overrideLongValue, testLongConf.getValue()); + Assertions.assertEquals(overrideBooleanValue, testBooleanConf.getValue()); + Assertions.assertEquals(overrideFloatValue, testFloatConf.getValue()); + Assertions.assertEquals(overrideByteValue, testByteConf.getValue()); + Assertions.assertEquals(overrideShortValue, testShortConf.getValue()); + Assertions.assertEquals(overrideDoubleValue, testDoubleConf.getValue()); + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManagerTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManagerTest.java index 82859732..3bd77b2e 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManagerTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConfManagerTest.java @@ -18,55 +18,55 @@ package org.apache.spark.kubernetes.operator.config; +import java.io.IOException; +import java.util.Collections; + import org.apache.commons.lang3.StringUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -import java.io.IOException; -import java.util.Collections; - class SparkOperatorConfManagerTest { - @Test - void testLoadPropertiesFromInitFile() throws IOException { - String propBackUp = System.getProperty("spark.operator.base.property.file.name"); - try { - String propsFilePath = SparkOperatorConfManagerTest.class.getClassLoader() - .getResource("spark-operator.properties").getPath(); - System.setProperty("spark.operator.base.property.file.name", propsFilePath); - SparkOperatorConfManager confManager = new SparkOperatorConfManager(); - Assertions.assertEquals("bar", confManager.getValue("spark.operator.foo")); - } finally { - if (StringUtils.isNotEmpty(propBackUp)) { - System.setProperty("spark.operator.base.property.file.name", propBackUp); - } else { - System.clearProperty("spark.operator.base.property.file.name"); - } - } + @Test + void testLoadPropertiesFromInitFile() throws IOException { + String propBackUp = System.getProperty("spark.operator.base.property.file.name"); + try { + String propsFilePath = SparkOperatorConfManagerTest.class.getClassLoader() + .getResource("spark-operator.properties").getPath(); + System.setProperty("spark.operator.base.property.file.name", propsFilePath); + SparkOperatorConfManager confManager = new SparkOperatorConfManager(); + Assertions.assertEquals("bar", confManager.getValue("spark.operator.foo")); + } finally { + if (StringUtils.isNotEmpty(propBackUp)) { + System.setProperty("spark.operator.base.property.file.name", propBackUp); + } else { + System.clearProperty("spark.operator.base.property.file.name"); + } } + } - @Test - void testOverrideProperties() { - String propBackUp = System.getProperty("spark.operator.foo"); - System.setProperty("spark.operator.foo", "bar"); - try { - SparkOperatorConfManager confManager = new SparkOperatorConfManager(); - Assertions.assertEquals("bar", confManager.getInitialValue("spark.operator.foo")); - Assertions.assertEquals("bar", confManager.getValue("spark.operator.foo")); + @Test + void testOverrideProperties() { + String propBackUp = System.getProperty("spark.operator.foo"); + System.setProperty("spark.operator.foo", "bar"); + try { + SparkOperatorConfManager confManager = new SparkOperatorConfManager(); + Assertions.assertEquals("bar", confManager.getInitialValue("spark.operator.foo")); + Assertions.assertEquals("bar", confManager.getValue("spark.operator.foo")); - confManager.refresh(Collections.singletonMap("spark.operator.foo", "barbar")); - Assertions.assertEquals("bar", confManager.getInitialValue("spark.operator.foo")); - Assertions.assertEquals("barbar", confManager.getValue("spark.operator.foo")); + confManager.refresh(Collections.singletonMap("spark.operator.foo", "barbar")); + Assertions.assertEquals("bar", confManager.getInitialValue("spark.operator.foo")); + Assertions.assertEquals("barbar", confManager.getValue("spark.operator.foo")); - confManager.refresh(Collections.singletonMap("spark.operator.foo", "barbarbar")); - Assertions.assertEquals("bar", confManager.getInitialValue("spark.operator.foo")); - Assertions.assertEquals("barbarbar", confManager.getValue("spark.operator.foo")); + confManager.refresh(Collections.singletonMap("spark.operator.foo", "barbarbar")); + Assertions.assertEquals("bar", confManager.getInitialValue("spark.operator.foo")); + Assertions.assertEquals("barbarbar", confManager.getValue("spark.operator.foo")); - } finally { - if (StringUtils.isNotEmpty(propBackUp)) { - System.setProperty("spark.operator.foo", propBackUp); - } else { - System.clearProperty("spark.operator.foo"); - } - } + } finally { + if (StringUtils.isNotEmpty(propBackUp)) { + System.setProperty("spark.operator.foo", propBackUp); + } else { + System.clearProperty("spark.operator.foo"); + } } + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java index 452d07d5..59976fc6 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/health/SentinelManagerTest.java @@ -17,6 +17,15 @@ package org.apache.spark.kubernetes.operator.health; +import javax.validation.constraints.NotNull; +import java.time.Duration; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + import io.fabric8.kubernetes.api.model.KubernetesResourceList; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; @@ -24,9 +33,6 @@ import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient; import io.fabric8.kubernetes.client.server.mock.KubernetesMockServer; import io.javaoperatorsdk.operator.processing.event.ResourceID; -import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.config.SparkOperatorConfManager; -import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; @@ -35,14 +41,9 @@ import org.junit.jupiter.api.TestMethodOrder; import org.mockito.MockedStatic; -import javax.validation.constraints.NotNull; -import java.time.Duration; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.config.SparkOperatorConfManager; +import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; import static org.apache.spark.kubernetes.operator.Constants.SENTINEL_LABEL; import static org.apache.spark.kubernetes.operator.Constants.SPARK_CONF_SENTINEL_DUMMY_FIELD; @@ -54,150 +55,150 @@ @EnableKubernetesMockClient(crud = true) @TestMethodOrder(MethodOrderer.OrderAnnotation.class) class SentinelManagerTest { - public static final String DEFAULT = "default"; - public static final String SPARK_DEMO = "spark-demo"; - @NotNull - KubernetesClient kubernetesClient; - @NotNull - KubernetesMockServer server; - public static final int SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS = 10; - - @BeforeAll - static void beforeAll() { - Map overrideValue = - Collections.singletonMap(SENTINEL_RESOURCE_RECONCILIATION_DELAY.getKey(), - Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS) - .toString()); - SparkOperatorConfManager.INSTANCE.refresh(overrideValue); - } - - @Test - @Order(1) - void testIsSentinelResource() { - SparkApplication sparkApplication = new SparkApplication(); - var lableMap = sparkApplication.getMetadata().getLabels(); - lableMap.put(SENTINEL_LABEL, "true"); - Set namespaces = new HashSet<>(); - sparkApplication.getMetadata().setNamespace("spark-test"); - namespaces.add("spark-test"); - try (MockedStatic mockUtils = - mockStatic(SparkReconcilerUtils.class)) { - mockUtils.when(SparkReconcilerUtils::getWatchedNamespaces).thenReturn(namespaces); - Assertions.assertTrue(SentinelManager.isSentinelResource(sparkApplication)); - } + public static final String DEFAULT = "default"; + public static final String SPARK_DEMO = "spark-demo"; + @NotNull + KubernetesClient kubernetesClient; + @NotNull + KubernetesMockServer server; + public static final int SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS = 10; + + @BeforeAll + static void beforeAll() { + Map overrideValue = + Collections.singletonMap(SENTINEL_RESOURCE_RECONCILIATION_DELAY.getKey(), + Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS) + .toString()); + SparkOperatorConfManager.INSTANCE.refresh(overrideValue); + } + + @Test + @Order(1) + void testIsSentinelResource() { + SparkApplication sparkApplication = new SparkApplication(); + var lableMap = sparkApplication.getMetadata().getLabels(); + lableMap.put(SENTINEL_LABEL, "true"); + Set namespaces = new HashSet<>(); + sparkApplication.getMetadata().setNamespace("spark-test"); + namespaces.add("spark-test"); + try (MockedStatic mockUtils = + mockStatic(SparkReconcilerUtils.class)) { + mockUtils.when(SparkReconcilerUtils::getWatchedNamespaces).thenReturn(namespaces); + Assertions.assertTrue(SentinelManager.isSentinelResource(sparkApplication)); } - - @Test - @Order(3) - void testHandleSentinelResourceReconciliation() throws InterruptedException { - // Reduce the SENTINEL_RESOURCE_RECONCILIATION_DELAY time to 0 - SparkOperatorConfManager.INSTANCE.refresh( - Collections.singletonMap(SENTINEL_RESOURCE_RECONCILIATION_DELAY.getKey(), "10")); - - // Before Spark Reconciler Started - var mockDeployment = createMockDeployment(DEFAULT); - kubernetesClient.resource(SparkReconcilerUtils.clone(mockDeployment)).create(); - var crList = kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list(); - var sparkApplication = crList.getItems().get(0); - var generation = sparkApplication.getMetadata().getGeneration(); - Assertions.assertEquals(generation, 1L); - - // Spark Reconciler Handle Sentinel Resources at the first time - SentinelManager sentinelManager = new SentinelManager(); - sentinelManager.handleSentinelResourceReconciliation(sparkApplication, kubernetesClient); - var crList2 = - kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list(); - var sparkApplication2 = crList2.getItems().get(0); - var sparkConf2 = new HashMap<>(sparkApplication2.getSpec().getSparkConf()); - var generation2 = sparkApplication2.getMetadata().getGeneration(); - - Assertions.assertEquals(sparkConf2.get(SPARK_CONF_SENTINEL_DUMMY_FIELD), "1"); - Assertions.assertEquals(generation2, 2L); - SentinelManager.SentinelResourceState state2 = - (SentinelManager.SentinelResourceState) sentinelManager.getSentinelResources() - .get(ResourceID.fromResource(mockDeployment)); - var previousGeneration2 = state2.previousGeneration; - Assertions.assertTrue(sentinelManager.allSentinelsAreHealthy()); - Assertions.assertEquals(previousGeneration2, 1L); - - Thread.sleep( - Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS * 2).toMillis()); - var crList3 = kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list() - .getItems(); - var sparkApplication3 = crList3.get(0); - var sparkConf3 = new HashMap<>(sparkApplication3.getSpec().getSparkConf()); - // Spark Sentinel Applications' s k8s generation should change - Assertions.assertNotEquals(sparkApplication3.getMetadata().getGeneration(), generation2); - // Spark conf SPARK_CONF_SENTINEL_DUMMY_FIELD values should increase - Assertions.assertNotEquals(sparkConf2.get(SPARK_CONF_SENTINEL_DUMMY_FIELD), - sparkConf3.get(SPARK_CONF_SENTINEL_DUMMY_FIELD)); - SentinelManager.SentinelResourceState state3 = - (SentinelManager.SentinelResourceState) sentinelManager.getSentinelResources() - .get(ResourceID.fromResource(mockDeployment)); - Assertions.assertEquals(state3.previousGeneration, previousGeneration2); - // Given the 2 * SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS, the reconcile method is - // not called to handleSentinelResourceReconciliation to update - Assertions.assertFalse(sentinelManager.allSentinelsAreHealthy()); - - sentinelManager.handleSentinelResourceReconciliation(sparkApplication2, kubernetesClient); - sentinelManager.handleSentinelResourceReconciliation(sparkApplication2, kubernetesClient); - boolean isHealthy; - long currentTimeInMills = System.currentTimeMillis(); - do { - isHealthy = sentinelManager.allSentinelsAreHealthy(); - } while (!isHealthy && notTimedOut(currentTimeInMills, TimeUnit.MILLISECONDS.convert( - Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS)))); - Assertions.assertTrue(isHealthy); - kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).delete(); - } - - @Test - @Order(2) - void sentinelManagerShouldReportHealthyWhenWatchedNamespaceIsReduced() - throws InterruptedException { - Set namespaces = new HashSet<>(); - namespaces.add(DEFAULT); - namespaces.add(SPARK_DEMO); - - try (MockedStatic mockUtils = - mockStatic(SparkReconcilerUtils.class)) { - mockUtils.when(SparkReconcilerUtils::getWatchedNamespaces).thenReturn(namespaces); - SentinelManager sentinelManager = new SentinelManager(); - NonNamespaceOperation, - Resource> cr1 = - kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT); - NonNamespaceOperation, - Resource> cr2 = - kubernetesClient.resources(SparkApplication.class).inNamespace(SPARK_DEMO); - - var mockDeployment1 = createMockDeployment(DEFAULT); - var mockDeployment2 = createMockDeployment(SPARK_DEMO); - cr1.create(mockDeployment1); - cr2.create(mockDeployment2); - - var crList1 = - kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list(); - var crList2 = - kubernetesClient.resources(SparkApplication.class) - .inNamespace(SPARK_DEMO).list(); - var sparkApplication1 = crList1.getItems().get(0); - var sparkApplication2 = crList2.getItems().get(0); - sentinelManager.handleSentinelResourceReconciliation(sparkApplication1, - kubernetesClient); - sentinelManager.handleSentinelResourceReconciliation(sparkApplication2, - kubernetesClient); - Assertions.assertEquals(sentinelManager.getSentinelResources().size(), 2, - "Sentinel Manager should watch on resources in two namespaces"); - Assertions.assertTrue(sentinelManager.allSentinelsAreHealthy(), - "Sentinel Manager should report healthy"); - namespaces.remove(SPARK_DEMO); - Thread.sleep(Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS) - .toMillis()); - Assertions.assertTrue(sentinelManager.allSentinelsAreHealthy(), - "Sentinel Manager should report healthy after one namespace is " + - "removed from the watch"); - Assertions.assertEquals(sentinelManager.getSentinelResources().size(), 1, - "Sentinel Manager should only watch on one namespace"); - } + } + + @Test + @Order(3) + void testHandleSentinelResourceReconciliation() throws InterruptedException { + // Reduce the SENTINEL_RESOURCE_RECONCILIATION_DELAY time to 0 + SparkOperatorConfManager.INSTANCE.refresh( + Collections.singletonMap(SENTINEL_RESOURCE_RECONCILIATION_DELAY.getKey(), "10")); + + // Before Spark Reconciler Started + var mockDeployment = createMockDeployment(DEFAULT); + kubernetesClient.resource(SparkReconcilerUtils.clone(mockDeployment)).create(); + var crList = kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list(); + var sparkApplication = crList.getItems().get(0); + var generation = sparkApplication.getMetadata().getGeneration(); + Assertions.assertEquals(generation, 1L); + + // Spark Reconciler Handle Sentinel Resources at the first time + SentinelManager sentinelManager = new SentinelManager(); + sentinelManager.handleSentinelResourceReconciliation(sparkApplication, kubernetesClient); + var crList2 = + kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list(); + var sparkApplication2 = crList2.getItems().get(0); + var sparkConf2 = new HashMap<>(sparkApplication2.getSpec().getSparkConf()); + var generation2 = sparkApplication2.getMetadata().getGeneration(); + + Assertions.assertEquals(sparkConf2.get(SPARK_CONF_SENTINEL_DUMMY_FIELD), "1"); + Assertions.assertEquals(generation2, 2L); + SentinelManager.SentinelResourceState state2 = + (SentinelManager.SentinelResourceState) sentinelManager.getSentinelResources() + .get(ResourceID.fromResource(mockDeployment)); + var previousGeneration2 = state2.previousGeneration; + Assertions.assertTrue(sentinelManager.allSentinelsAreHealthy()); + Assertions.assertEquals(previousGeneration2, 1L); + + Thread.sleep( + Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS * 2).toMillis()); + var crList3 = kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list() + .getItems(); + var sparkApplication3 = crList3.get(0); + var sparkConf3 = new HashMap<>(sparkApplication3.getSpec().getSparkConf()); + // Spark Sentinel Applications' s k8s generation should change + Assertions.assertNotEquals(sparkApplication3.getMetadata().getGeneration(), generation2); + // Spark conf SPARK_CONF_SENTINEL_DUMMY_FIELD values should increase + Assertions.assertNotEquals(sparkConf2.get(SPARK_CONF_SENTINEL_DUMMY_FIELD), + sparkConf3.get(SPARK_CONF_SENTINEL_DUMMY_FIELD)); + SentinelManager.SentinelResourceState state3 = + (SentinelManager.SentinelResourceState) sentinelManager.getSentinelResources() + .get(ResourceID.fromResource(mockDeployment)); + Assertions.assertEquals(state3.previousGeneration, previousGeneration2); + // Given the 2 * SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS, the reconcile method is + // not called to handleSentinelResourceReconciliation to update + Assertions.assertFalse(sentinelManager.allSentinelsAreHealthy()); + + sentinelManager.handleSentinelResourceReconciliation(sparkApplication2, kubernetesClient); + sentinelManager.handleSentinelResourceReconciliation(sparkApplication2, kubernetesClient); + boolean isHealthy; + long currentTimeInMills = System.currentTimeMillis(); + do { + isHealthy = sentinelManager.allSentinelsAreHealthy(); + } while (!isHealthy && notTimedOut(currentTimeInMills, TimeUnit.MILLISECONDS.convert( + Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS)))); + Assertions.assertTrue(isHealthy); + kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).delete(); + } + + @Test + @Order(2) + void sentinelManagerShouldReportHealthyWhenWatchedNamespaceIsReduced() + throws InterruptedException { + Set namespaces = new HashSet<>(); + namespaces.add(DEFAULT); + namespaces.add(SPARK_DEMO); + + try (MockedStatic mockUtils = + mockStatic(SparkReconcilerUtils.class)) { + mockUtils.when(SparkReconcilerUtils::getWatchedNamespaces).thenReturn(namespaces); + SentinelManager sentinelManager = new SentinelManager(); + NonNamespaceOperation, + Resource> cr1 = + kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT); + NonNamespaceOperation, + Resource> cr2 = + kubernetesClient.resources(SparkApplication.class).inNamespace(SPARK_DEMO); + + var mockDeployment1 = createMockDeployment(DEFAULT); + var mockDeployment2 = createMockDeployment(SPARK_DEMO); + cr1.create(mockDeployment1); + cr2.create(mockDeployment2); + + var crList1 = + kubernetesClient.resources(SparkApplication.class).inNamespace(DEFAULT).list(); + var crList2 = + kubernetesClient.resources(SparkApplication.class) + .inNamespace(SPARK_DEMO).list(); + var sparkApplication1 = crList1.getItems().get(0); + var sparkApplication2 = crList2.getItems().get(0); + sentinelManager.handleSentinelResourceReconciliation(sparkApplication1, + kubernetesClient); + sentinelManager.handleSentinelResourceReconciliation(sparkApplication2, + kubernetesClient); + Assertions.assertEquals(sentinelManager.getSentinelResources().size(), 2, + "Sentinel Manager should watch on resources in two namespaces"); + Assertions.assertTrue(sentinelManager.allSentinelsAreHealthy(), + "Sentinel Manager should report healthy"); + namespaces.remove(SPARK_DEMO); + Thread.sleep(Duration.ofSeconds(SENTINEL_RESOURCE_RECONCILIATION_DELAY_SECONDS) + .toMillis()); + Assertions.assertTrue(sentinelManager.allSentinelsAreHealthy(), + "Sentinel Manager should report healthy after one namespace is " + + "removed from the watch"); + Assertions.assertEquals(sentinelManager.getSentinelResources().size(), 1, + "Sentinel Manager should only watch on one namespace"); } + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactoryTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactoryTest.java index 58b4e151..c30f81ba 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactoryTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemFactoryTest.java @@ -18,35 +18,35 @@ package org.apache.spark.kubernetes.operator.metrics; +import java.util.Properties; + import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -import java.util.Properties; - import static org.apache.spark.kubernetes.operator.metrics.MetricsSystemFactory.parseSinkProperties; import static org.junit.Assert.assertThrows; class MetricsSystemFactoryTest { - @Test - void testMetricsSystemFailFastWithNoClassFullName() { - Properties properties = new Properties(); - properties.put("sink.mocksink.period", "10"); - properties.put("sink.console.class", "org.apache.spark.metrics.sink.ConsoleSink"); - RuntimeException e = - assertThrows(RuntimeException.class, () -> parseSinkProperties(properties)); - Assertions.assertEquals( - "mocksink provides properties, but does not provide full class name", - e.getMessage()); - } + @Test + void testMetricsSystemFailFastWithNoClassFullName() { + Properties properties = new Properties(); + properties.put("sink.mocksink.period", "10"); + properties.put("sink.console.class", "org.apache.spark.metrics.sink.ConsoleSink"); + RuntimeException e = + assertThrows(RuntimeException.class, () -> parseSinkProperties(properties)); + Assertions.assertEquals( + "mocksink provides properties, but does not provide full class name", + e.getMessage()); + } - @Test - void testMetricsSystemFailFastWithNotFoundClassName() { - Properties properties = new Properties(); - properties.put("sink.console.class", "org.apache.spark.metrics.sink.FooSink"); - RuntimeException e = - assertThrows(RuntimeException.class, () -> parseSinkProperties(properties)); - Assertions.assertEquals("Fail to find class org.apache.spark.metrics.sink.FooSink", - e.getMessage()); - } + @Test + void testMetricsSystemFailFastWithNotFoundClassName() { + Properties properties = new Properties(); + properties.put("sink.console.class", "org.apache.spark.metrics.sink.FooSink"); + RuntimeException e = + assertThrows(RuntimeException.class, () -> parseSinkProperties(properties)); + Assertions.assertEquals("Fail to find class org.apache.spark.metrics.sink.FooSink", + e.getMessage()); + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemTest.java index d9381e11..bbc12d9e 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/MetricsSystemTest.java @@ -18,55 +18,56 @@ package org.apache.spark.kubernetes.operator.metrics; -import org.apache.spark.kubernetes.operator.metrics.sink.MockSink; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.TimeUnit; + import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; + +import org.apache.spark.kubernetes.operator.metrics.sink.MockSink; import org.apache.spark.metrics.sink.Sink; import org.apache.spark.metrics.source.Source; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.TimeUnit; - class MetricsSystemTest { - @Test - void testMetricsSystemWithResourcesAdd() { - MetricsSystem metricsSystem = new MetricsSystem(); - List sourcesList = metricsSystem.getSources(); - List sinks = metricsSystem.getSinks(); - metricsSystem.start(); - Assertions.assertEquals(1, sourcesList.size()); - // Default no sink added - Assertions.assertEquals(0, sinks.size()); - Assertions.assertFalse(metricsSystem.getRegistry().getMetrics().isEmpty()); - metricsSystem.stop(); - Assertions.assertTrue(metricsSystem.getRegistry().getMetrics().isEmpty()); - } + @Test + void testMetricsSystemWithResourcesAdd() { + MetricsSystem metricsSystem = new MetricsSystem(); + List sourcesList = metricsSystem.getSources(); + List sinks = metricsSystem.getSinks(); + metricsSystem.start(); + Assertions.assertEquals(1, sourcesList.size()); + // Default no sink added + Assertions.assertEquals(0, sinks.size()); + Assertions.assertFalse(metricsSystem.getRegistry().getMetrics().isEmpty()); + metricsSystem.stop(); + Assertions.assertTrue(metricsSystem.getRegistry().getMetrics().isEmpty()); + } - @Test - void testMetricsSystemWithCustomizedSink() { - Properties properties = new Properties(); - properties.put("sink.mocksink.class", - "org.apache.spark.kubernetes.operator.metrics.sink.MockSink"); - properties.put("sink.mocksink.period", "10"); - MetricsSystem metricsSystem = new MetricsSystem(properties); - metricsSystem.start(); - Sink mockSink = metricsSystem.getSinks().get(0); - metricsSystem.stop(); - MockSink sink = (MockSink) mockSink; - Assertions.assertEquals(sink.getPollPeriod(), 10); - Assertions.assertEquals(sink.getTimeUnit(), TimeUnit.SECONDS); - } + @Test + void testMetricsSystemWithCustomizedSink() { + Properties properties = new Properties(); + properties.put("sink.mocksink.class", + "org.apache.spark.kubernetes.operator.metrics.sink.MockSink"); + properties.put("sink.mocksink.period", "10"); + MetricsSystem metricsSystem = new MetricsSystem(properties); + metricsSystem.start(); + Sink mockSink = metricsSystem.getSinks().get(0); + metricsSystem.stop(); + MockSink sink = (MockSink) mockSink; + Assertions.assertEquals(sink.getPollPeriod(), 10); + Assertions.assertEquals(sink.getTimeUnit(), TimeUnit.SECONDS); + } - @Test - void testMetricsSystemWithTwoSinkConfigurations() { - Properties properties = new Properties(); - properties.put("sink.mocksink.class", - "org.apache.spark.kubernetes.operator.metrics.sink.MockSink"); - properties.put("sink.mocksink.period", "10"); - properties.put("sink.console.class", "org.apache.spark.metrics.sink.ConsoleSink"); - MetricsSystem metricsSystem = new MetricsSystem(properties); - metricsSystem.start(); - Assertions.assertEquals(2, metricsSystem.getSinks().size()); - } + @Test + void testMetricsSystemWithTwoSinkConfigurations() { + Properties properties = new Properties(); + properties.put("sink.mocksink.class", + "org.apache.spark.kubernetes.operator.metrics.sink.MockSink"); + properties.put("sink.mocksink.period", "10"); + properties.put("sink.console.class", "org.apache.spark.metrics.sink.ConsoleSink"); + MetricsSystem metricsSystem = new MetricsSystem(properties); + metricsSystem.start(); + Assertions.assertEquals(2, metricsSystem.getSinks().size()); + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/sink/MockSink.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/sink/MockSink.java index 97e874d0..1909bf17 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/sink/MockSink.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/sink/MockSink.java @@ -18,51 +18,51 @@ package org.apache.spark.kubernetes.operator.metrics.sink; -import org.apache.spark.metrics.sink.Sink; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Properties; import java.util.concurrent.TimeUnit; import com.codahale.metrics.MetricRegistry; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.spark.metrics.sink.Sink; @SuppressWarnings("PMD") public class MockSink implements Sink { - private static final Logger logger = LoggerFactory.getLogger(MockSink.class); - private Properties properties; - private MetricRegistry metricRegistry; - public static final String DEFAULT_UNIT = "SECONDS"; - public static final int DEFAULT_PERIOD = 20; - public static final String KEY_PERIOD = "period"; - public static final String KEY_UNIT = "unit"; + private static final Logger logger = LoggerFactory.getLogger(MockSink.class); + private Properties properties; + private MetricRegistry metricRegistry; + public static final String DEFAULT_UNIT = "SECONDS"; + public static final int DEFAULT_PERIOD = 20; + public static final String KEY_PERIOD = "period"; + public static final String KEY_UNIT = "unit"; - public int getPollPeriod() { - return Integer.parseInt((String) properties.getOrDefault(KEY_PERIOD, DEFAULT_PERIOD)); - } + public int getPollPeriod() { + return Integer.parseInt((String) properties.getOrDefault(KEY_PERIOD, DEFAULT_PERIOD)); + } - public TimeUnit getTimeUnit() { - return TimeUnit.valueOf((String) properties.getOrDefault(KEY_UNIT, DEFAULT_UNIT)); - } + public TimeUnit getTimeUnit() { + return TimeUnit.valueOf((String) properties.getOrDefault(KEY_UNIT, DEFAULT_UNIT)); + } - public MockSink(Properties properties, MetricRegistry metricRegistry) { - logger.info("Current properties: {}", properties); - this.properties = properties; - this.metricRegistry = metricRegistry; - } + public MockSink(Properties properties, MetricRegistry metricRegistry) { + logger.info("Current properties: {}", properties); + this.properties = properties; + this.metricRegistry = metricRegistry; + } - @Override - public void start() { - logger.info("Mock sink started"); - } + @Override + public void start() { + logger.info("Mock sink started"); + } - @Override - public void stop() { - logger.info("Mock sink stopped"); - } + @Override + public void stop() { + logger.info("Mock sink stopped"); + } - @Override - public void report() { - logger.info("Mock sink reported"); - } + @Override + public void report() { + logger.info("Mock sink reported"); + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptorTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptorTest.java index 0f1dae98..cf6c1643 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptorTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/KubernetesMetricsInterceptorTest.java @@ -18,6 +18,12 @@ package org.apache.spark.kubernetes.operator.metrics.source; +import javax.validation.constraints.NotNull; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import com.codahale.metrics.Meter; import com.codahale.metrics.Metric; import io.fabric8.kubernetes.api.model.ConfigMap; @@ -25,12 +31,6 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient; import io.fabric8.kubernetes.client.server.mock.KubernetesMockServer; -import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.client.KubernetesClientFactory; -import org.apache.spark.kubernetes.operator.metrics.MetricsSystem; -import org.apache.spark.kubernetes.operator.metrics.MetricsSystemFactory; -import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; -import org.apache.spark.metrics.source.Source; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.MethodOrderer; @@ -38,11 +38,12 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; -import javax.validation.constraints.NotNull; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.client.KubernetesClientFactory; +import org.apache.spark.kubernetes.operator.metrics.MetricsSystem; +import org.apache.spark.kubernetes.operator.metrics.MetricsSystemFactory; +import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; +import org.apache.spark.metrics.source.Source; import static org.junit.Assert.assertThrows; @@ -51,87 +52,87 @@ @SuppressWarnings("PMD") class KubernetesMetricsInterceptorTest { - @NotNull - KubernetesMockServer mockServer; - @NotNull - KubernetesClient kubernetesClient; + @NotNull + KubernetesMockServer mockServer; + @NotNull + KubernetesClient kubernetesClient; - @AfterEach - void cleanUp() { - mockServer.reset(); - } + @AfterEach + void cleanUp() { + mockServer.reset(); + } - @Test - @Order(1) - void testMetricsEnabled() { - MetricsSystem metricsSystem = MetricsSystemFactory.createMetricsSystem(); - KubernetesClient client = KubernetesClientFactory.buildKubernetesClient(metricsSystem, - kubernetesClient.getConfiguration()); - var sparkApplication = createSparkApplication(); - var configMap = createConfigMap(); - Source source = metricsSystem.getSources().get(0); - Map metrics = new HashMap<>(source.metricRegistry().getMetrics()); - Assertions.assertEquals(9, metrics.size()); - client.resource(sparkApplication).create(); - client.resource(configMap).get(); - Map metrics2 = new HashMap<>(source.metricRegistry().getMetrics()); - Assertions.assertEquals(17, metrics2.size()); - List expectedMetricsName = - Arrays.asList("http.response.201", "http.request.post", "sparkapplications.post", - "spark-test.sparkapplications.post", "spark-test.sparkapplications.post", - "configmaps.get", - "spark-system.configmaps.get", "2xx", "4xx"); - expectedMetricsName.stream().forEach(name -> { - Meter metric = (Meter) metrics2.get(name); - Assertions.assertEquals(metric.getCount(), 1); - }); - client.resource(sparkApplication).delete(); - } + @Test + @Order(1) + void testMetricsEnabled() { + MetricsSystem metricsSystem = MetricsSystemFactory.createMetricsSystem(); + KubernetesClient client = KubernetesClientFactory.buildKubernetesClient(metricsSystem, + kubernetesClient.getConfiguration()); + var sparkApplication = createSparkApplication(); + var configMap = createConfigMap(); + Source source = metricsSystem.getSources().get(0); + Map metrics = new HashMap<>(source.metricRegistry().getMetrics()); + Assertions.assertEquals(9, metrics.size()); + client.resource(sparkApplication).create(); + client.resource(configMap).get(); + Map metrics2 = new HashMap<>(source.metricRegistry().getMetrics()); + Assertions.assertEquals(17, metrics2.size()); + List expectedMetricsName = + Arrays.asList("http.response.201", "http.request.post", "sparkapplications.post", + "spark-test.sparkapplications.post", "spark-test.sparkapplications.post", + "configmaps.get", + "spark-system.configmaps.get", "2xx", "4xx"); + expectedMetricsName.stream().forEach(name -> { + Meter metric = (Meter) metrics2.get(name); + Assertions.assertEquals(metric.getCount(), 1); + }); + client.resource(sparkApplication).delete(); + } - @Test - @Order(2) - void testWhenKubernetesServerNotWorking() { - MetricsSystem metricsSystem = MetricsSystemFactory.createMetricsSystem(); - KubernetesClient client = KubernetesClientFactory.buildKubernetesClient(metricsSystem, - kubernetesClient.getConfiguration()); - int retry = client.getConfiguration().getRequestRetryBackoffLimit(); - mockServer.shutdown(); - var sparkApplication = createSparkApplication(); - assertThrows(Exception.class, () -> { - client.resource(sparkApplication).create(); - }); - Source source = metricsSystem.getSources().get(0); - Map map = source.metricRegistry().getMetrics(); - Assertions.assertEquals(21, map.size()); - Meter metric = (Meter) map.get("failed"); - Assertions.assertEquals(metric.getCount(), retry + 1); - } + @Test + @Order(2) + void testWhenKubernetesServerNotWorking() { + MetricsSystem metricsSystem = MetricsSystemFactory.createMetricsSystem(); + KubernetesClient client = KubernetesClientFactory.buildKubernetesClient(metricsSystem, + kubernetesClient.getConfiguration()); + int retry = client.getConfiguration().getRequestRetryBackoffLimit(); + mockServer.shutdown(); + var sparkApplication = createSparkApplication(); + assertThrows(Exception.class, () -> { + client.resource(sparkApplication).create(); + }); + Source source = metricsSystem.getSources().get(0); + Map map = source.metricRegistry().getMetrics(); + Assertions.assertEquals(21, map.size()); + Meter metric = (Meter) map.get("failed"); + Assertions.assertEquals(metric.getCount(), retry + 1); + } - private static SparkApplication createSparkApplication() { - ObjectMeta meta = new ObjectMeta(); - meta.setName("sample-spark-application"); - meta.setNamespace("spark-test"); - var sparkApplication = new SparkApplication(); - sparkApplication.setMetadata(meta); - ApplicationSpec applicationSpec = new ApplicationSpec(); - applicationSpec.setMainClass("org.apache.spark.examples.SparkPi"); - applicationSpec.setJars("local:///opt/spark/examples/jars/spark-examples.jar"); - applicationSpec.setSparkConf(Map.of( - "spark.executor.instances", "5", - "spark.kubernetes.container.image", "spark", - "spark.kubernetes.namespace", "spark-test", - "spark.kubernetes.authenticate.driver.serviceAccountName", "spark" - )); - sparkApplication.setSpec(applicationSpec); - return sparkApplication; - } + private static SparkApplication createSparkApplication() { + ObjectMeta meta = new ObjectMeta(); + meta.setName("sample-spark-application"); + meta.setNamespace("spark-test"); + var sparkApplication = new SparkApplication(); + sparkApplication.setMetadata(meta); + ApplicationSpec applicationSpec = new ApplicationSpec(); + applicationSpec.setMainClass("org.apache.spark.examples.SparkPi"); + applicationSpec.setJars("local:///opt/spark/examples/jars/spark-examples.jar"); + applicationSpec.setSparkConf(Map.of( + "spark.executor.instances", "5", + "spark.kubernetes.container.image", "spark", + "spark.kubernetes.namespace", "spark-test", + "spark.kubernetes.authenticate.driver.serviceAccountName", "spark" + )); + sparkApplication.setSpec(applicationSpec); + return sparkApplication; + } - private static ConfigMap createConfigMap() { - ObjectMeta meta = new ObjectMeta(); - meta.setName("spark-job-operator-configuration"); - meta.setNamespace("spark-system"); - var configMap = new ConfigMap(); - configMap.setMetadata(meta); - return configMap; - } + private static ConfigMap createConfigMap() { + ObjectMeta meta = new ObjectMeta(); + meta.setName("spark-job-operator-configuration"); + meta.setNamespace("spark-system"); + var configMap = new ConfigMap(); + configMap.setMetadata(meta); + return configMap; + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java index eb3ab541..14202360 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java @@ -18,6 +18,8 @@ package org.apache.spark.kubernetes.operator.metrics.source; +import java.util.Map; + import com.codahale.metrics.Metric; import io.fabric8.kubernetes.api.model.ConfigMap; import io.fabric8.kubernetes.api.model.HasMetadata; @@ -29,172 +31,171 @@ import io.javaoperatorsdk.operator.processing.event.ResourceID; import io.javaoperatorsdk.operator.processing.event.source.controller.ResourceAction; import io.javaoperatorsdk.operator.processing.event.source.controller.ResourceEvent; -import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconciler; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Map; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconciler; class OperatorJosdkMetricsTest { - public static final String DEFAULT_NAMESPACE = "default"; - public static final String TEST_RESOURCE_NAME = "test1"; - private static final ResourceID resourceId = new ResourceID("spark-pi", "testns"); + public static final String DEFAULT_NAMESPACE = "default"; + public static final String TEST_RESOURCE_NAME = "test1"; + private static final ResourceID resourceId = new ResourceID("spark-pi", "testns"); + + private static final Map metadata = + Map.of(Constants.RESOURCE_GVK_KEY, GroupVersionKind.gvkFor(SparkApplication.class), + Constants.CONTROLLER_NAME, "test-controller-name"); + private static final String controllerName = SparkApplicationReconciler.class.getSimpleName(); + + private OperatorJosdkMetrics operatorMetrics; + + @BeforeEach + public void setup() { + operatorMetrics = + new OperatorJosdkMetrics(); + } + + @Test + void testTimeControllerExecution() throws Exception { + var successExecution = new TestingExecutionBase<>(); + operatorMetrics.timeControllerExecution(successExecution); + Map metrics = operatorMetrics.metricRegistry().getMetrics(); + Assertions.assertEquals(4, metrics.size()); + Assertions.assertTrue( + metrics.containsKey("sparkapplication.sparkapplicationreconciler.reconcile.both")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.testns.sparkapplicationreconciler.reconcile.both")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.sparkapplicationreconciler.reconcile.success.both")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.testns.sparkapplicationreconciler.reconcile.success.both")); + + var failedExecution = new FooTestingExecutionBase<>(); + try { + operatorMetrics.timeControllerExecution(failedExecution); + } catch (Exception e) { + Assertions.assertEquals(e.getMessage(), "Foo exception"); + Assertions.assertEquals(8, metrics.size()); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.sparkapplicationreconciler.reconcile.failure")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.sparkapplicationreconciler.reconcile.failure.exception" + + ".nosuchfieldexception")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.testns.sparkapplicationreconciler.reconcile.failure")); + Assertions.assertTrue(metrics.containsKey( + "sparkapplication.testns.sparkapplicationreconciler.reconcile.failure." + + "exception.nosuchfieldexception")); + } + } + + @Test + void testReconciliationFinished() { + operatorMetrics.finishedReconciliation(buildNamespacedResource(), metadata); + Map metrics = operatorMetrics.metricRegistry().getMetrics(); + Assertions.assertEquals(2, metrics.size()); + Assertions.assertTrue(metrics.containsKey("configmap.default.reconciliation.finished")); + Assertions.assertTrue(metrics.containsKey("configmap.reconciliation.finished")); + } + + @Test + void testReconciliationExecutionStartedAndFinished() { + operatorMetrics.reconciliationExecutionStarted(buildNamespacedResource(), metadata); + Map metrics = operatorMetrics.metricRegistry().getMetrics(); + Assertions.assertEquals(2, metrics.size()); + Assertions.assertTrue( + metrics.containsKey("configmap.test-controller-name.reconciliations.executions")); + Assertions.assertTrue(metrics.containsKey( + "configmap.default.test-controller-name.reconciliations.executions")); + operatorMetrics.reconciliationExecutionFinished(buildNamespacedResource(), metadata); + Assertions.assertEquals(3, metrics.size()); + Assertions.assertTrue( + metrics.containsKey("configmap.test-controller-name.reconciliations.queue.size")); + } + + @Test + void testReceivedEvent() { + Event event = + new ResourceEvent(ResourceAction.ADDED, resourceId, buildNamespacedResource()); + operatorMetrics.receivedEvent(event, metadata); + Map metrics = operatorMetrics.metricRegistry().getMetrics(); + Assertions.assertEquals(2, metrics.size()); + Assertions.assertTrue(metrics.containsKey("sparkapplication.added.resource.event")); + Assertions.assertTrue(metrics.containsKey("sparkapplication.testns.added.resource.event")); + } + + private static class TestingExecutionBase implements Metrics.ControllerExecution { + @Override + public String controllerName() { + return controllerName; + } + + @Override + public String successTypeName(Object o) { + return "both"; + } - private static final Map metadata = - Map.of(Constants.RESOURCE_GVK_KEY, GroupVersionKind.gvkFor(SparkApplication.class), - Constants.CONTROLLER_NAME, "test-controller-name"); - private static final String controllerName = SparkApplicationReconciler.class.getSimpleName(); + @Override + public ResourceID resourceID() { + return resourceId; + } - private OperatorJosdkMetrics operatorMetrics; + @Override + public Map metadata() { + return metadata; + } - @BeforeEach - public void setup() { - operatorMetrics = - new OperatorJosdkMetrics(); + @Override + public String name() { + return "reconcile"; } - @Test - void testTimeControllerExecution() throws Exception { - var successExecution = new TestingExecutionBase<>(); - operatorMetrics.timeControllerExecution(successExecution); - Map metrics = operatorMetrics.metricRegistry().getMetrics(); - Assertions.assertEquals(4, metrics.size()); - Assertions.assertTrue( - metrics.containsKey("sparkapplication.sparkapplicationreconciler.reconcile.both")); - Assertions.assertTrue(metrics.containsKey( - "sparkapplication.testns.sparkapplicationreconciler.reconcile.both")); - Assertions.assertTrue(metrics.containsKey( - "sparkapplication.sparkapplicationreconciler.reconcile.success.both")); - Assertions.assertTrue(metrics.containsKey( - "sparkapplication.testns.sparkapplicationreconciler.reconcile.success.both")); - - var failedExecution = new FooTestingExecutionBase<>(); - try { - operatorMetrics.timeControllerExecution(failedExecution); - } catch (Exception e) { - Assertions.assertEquals(e.getMessage(), "Foo exception"); - Assertions.assertEquals(8, metrics.size()); - Assertions.assertTrue(metrics.containsKey( - "sparkapplication.sparkapplicationreconciler.reconcile.failure")); - Assertions.assertTrue(metrics.containsKey( - "sparkapplication.sparkapplicationreconciler.reconcile.failure.exception" + - ".nosuchfieldexception")); - Assertions.assertTrue(metrics.containsKey( - "sparkapplication.testns.sparkapplicationreconciler.reconcile.failure")); - Assertions.assertTrue(metrics.containsKey( - "sparkapplication.testns.sparkapplicationreconciler.reconcile.failure." + - "exception.nosuchfieldexception")); - } + @Override + public T execute() throws Exception { + Thread.sleep(1000); + return null; } + } - @Test - void testReconciliationFinished() { - operatorMetrics.finishedReconciliation(buildNamespacedResource(), metadata); - Map metrics = operatorMetrics.metricRegistry().getMetrics(); - Assertions.assertEquals(2, metrics.size()); - Assertions.assertTrue(metrics.containsKey("configmap.default.reconciliation.finished")); - Assertions.assertTrue(metrics.containsKey("configmap.reconciliation.finished")); + private static class FooTestingExecutionBase implements Metrics.ControllerExecution { + @Override + public String controllerName() { + return controllerName; } - @Test - void testReconciliationExecutionStartedAndFinished() { - operatorMetrics.reconciliationExecutionStarted(buildNamespacedResource(), metadata); - Map metrics = operatorMetrics.metricRegistry().getMetrics(); - Assertions.assertEquals(2, metrics.size()); - Assertions.assertTrue( - metrics.containsKey("configmap.test-controller-name.reconciliations.executions")); - Assertions.assertTrue(metrics.containsKey( - "configmap.default.test-controller-name.reconciliations.executions")); - operatorMetrics.reconciliationExecutionFinished(buildNamespacedResource(), metadata); - Assertions.assertEquals(3, metrics.size()); - Assertions.assertTrue( - metrics.containsKey("configmap.test-controller-name.reconciliations.queue.size")); + @Override + public String successTypeName(Object o) { + return "resource"; } - @Test - void testReceivedEvent() { - Event event = - new ResourceEvent(ResourceAction.ADDED, resourceId, buildNamespacedResource()); - operatorMetrics.receivedEvent(event, metadata); - Map metrics = operatorMetrics.metricRegistry().getMetrics(); - Assertions.assertEquals(2, metrics.size()); - Assertions.assertTrue(metrics.containsKey("sparkapplication.added.resource.event")); - Assertions.assertTrue(metrics.containsKey("sparkapplication.testns.added.resource.event")); + @Override + public ResourceID resourceID() { + return resourceId; } - private static class TestingExecutionBase implements Metrics.ControllerExecution { - @Override - public String controllerName() { - return controllerName; - } - - @Override - public String successTypeName(Object o) { - return "both"; - } - - @Override - public ResourceID resourceID() { - return resourceId; - } - - @Override - public Map metadata() { - return metadata; - } - - @Override - public String name() { - return "reconcile"; - } - - @Override - public T execute() throws Exception { - Thread.sleep(1000); - return null; - } + @Override + public Map metadata() { + return metadata; } - private static class FooTestingExecutionBase implements Metrics.ControllerExecution { - @Override - public String controllerName() { - return controllerName; - } - - @Override - public String successTypeName(Object o) { - return "resource"; - } - - @Override - public ResourceID resourceID() { - return resourceId; - } - - @Override - public Map metadata() { - return metadata; - } - - @Override - public String name() { - return "reconcile"; - } - - @Override - public T execute() throws Exception { - throw new NoSuchFieldException("Foo exception"); - } + @Override + public String name() { + return "reconcile"; } - private HasMetadata buildNamespacedResource() { - var cm = new ConfigMap(); - cm.setMetadata(new ObjectMetaBuilder() - .withName(TEST_RESOURCE_NAME) - .withNamespace(DEFAULT_NAMESPACE) - .build()); - return cm; + @Override + public T execute() throws Exception { + throw new NoSuchFieldException("Foo exception"); } + } + + private HasMetadata buildNamespacedResource() { + var cm = new ConfigMap(); + cm.setMetadata(new ObjectMetaBuilder() + .withName(TEST_RESOURCE_NAME) + .withNamespace(DEFAULT_NAMESPACE) + .build()); + return cm; + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/HealthProbeTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/HealthProbeTest.java index 5c517634..2b84b5d1 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/HealthProbeTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/HealthProbeTest.java @@ -17,6 +17,13 @@ package org.apache.spark.kubernetes.operator.probe; +import javax.validation.constraints.NotNull; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient; import io.javaoperatorsdk.operator.Operator; @@ -25,17 +32,11 @@ import io.javaoperatorsdk.operator.health.InformerHealthIndicator; import io.javaoperatorsdk.operator.health.InformerWrappingEventSourceHealthIndicator; import io.javaoperatorsdk.operator.health.Status; -import org.apache.spark.kubernetes.operator.health.SentinelManager; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; -import javax.validation.constraints.NotNull; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.spark.kubernetes.operator.health.SentinelManager; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -44,154 +45,154 @@ @EnableKubernetesMockClient(crud = true) class HealthProbeTest { - public static Operator operator; - public static Operator sparkConfMonitor; - public static List operators; - @NotNull - KubernetesClient kubernetesClient; - private AtomicBoolean isRunning; - private AtomicBoolean isRunning2; - private Map> - unhealthyEventSources = new HashMap<>(); - private Map> - unhealthyEventSources2 = new HashMap<>(); - - @BeforeAll - public static void beforeAll() { - operator = mock(Operator.class); - sparkConfMonitor = mock(Operator.class); - operators = Arrays.asList(operator, sparkConfMonitor); - } - - @BeforeEach - public void beforeEach() { - isRunning = new AtomicBoolean(false); - isRunning2 = new AtomicBoolean(false); - var runtimeInfo = - new RuntimeInfo(new Operator( - overrider -> overrider.withKubernetesClient(kubernetesClient))) { - @Override - public boolean isStarted() { - return isRunning.get(); - } - - @Override - public Map> - unhealthyInformerWrappingEventSourceHealthIndicator() { - return unhealthyEventSources; - } - }; - - var runtimeInfo2 = - new RuntimeInfo(new Operator( - overrider -> overrider.withKubernetesClient(kubernetesClient))) { - @Override - public boolean isStarted() { - return isRunning2.get(); - } - - @Override - public Map> - unhealthyInformerWrappingEventSourceHealthIndicator() { - return unhealthyEventSources2; - } - }; - - when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); - when(sparkConfMonitor.getRuntimeInfo()).thenReturn(runtimeInfo2); - } - - @Test - void testHealthProbeWithInformerHealthWithMultiOperators() { - var healthyProbe = new HealthProbe(operators); - isRunning.set(true); - assertFalse(healthyProbe.isHealthy(), - "Healthy Probe should fail when the spark conf monitor operator is not running"); - isRunning2.set(true); - assertTrue(healthyProbe.isHealthy(), - "Healthy Probe should pass when both operators are running"); - - unhealthyEventSources2.put( - "c1", Map.of("e1", informerHealthIndicator(Map.of("i1", Status.UNHEALTHY)))); - assertFalse(healthyProbe.isHealthy(), - "Healthy Probe should fail when monitor's informer health is not healthy"); - unhealthyEventSources2.clear(); - assertTrue(healthyProbe.isHealthy(), "Healthy Probe should pass"); - } - - @Test - void testHealthProbeWithInformerHealthWithSingleOperator() { - var healthyProbe = new HealthProbe(Arrays.asList(operator)); - assertFalse(healthyProbe.isHealthy(), - "Health Probe should fail when operator is not running"); - isRunning.set(true); - unhealthyEventSources.put( - "c1", Map.of("e1", informerHealthIndicator(Map.of("i1", Status.UNHEALTHY)))); - assertFalse(healthyProbe.isHealthy(), - "Healthy Probe should fail when informer health is not healthy"); - unhealthyEventSources.clear(); - assertTrue(healthyProbe.isHealthy(), "Healthy Probe should pass"); - } - - @Test - void testHealthProbeWithSentinelHealthWithMultiOperators() { - var healthyProbe = new HealthProbe(operators); - SentinelManager sentinelManager = mock(SentinelManager.class); - healthyProbe.registerSentinelResourceManager(sentinelManager); - isRunning.set(true); - isRunning2.set(true); - when(sentinelManager.allSentinelsAreHealthy()).thenReturn(false); - assertFalse(healthyProbe.isHealthy(), - "Healthy Probe should fail when sentinels report failures"); - - when(sentinelManager.allSentinelsAreHealthy()).thenReturn(true); - assertTrue(healthyProbe.isHealthy(), "Healthy Probe should pass"); - } - - private static InformerWrappingEventSourceHealthIndicator informerHealthIndicator( - Map informerStatuses) { - Map informers = new HashMap<>(); - informerStatuses.forEach( - (n, s) -> - informers.put( - n, - new InformerHealthIndicator() { - @Override - public boolean hasSynced() { - return false; - } - - @Override - public boolean isWatching() { - return false; - } - - @Override - public boolean isRunning() { - return false; - } - - @Override - public Status getStatus() { - return s; - } - - @Override - public String getTargetNamespace() { - return null; - } - })); - - return new InformerWrappingEventSourceHealthIndicator() { - @Override - public Map informerHealthIndicators() { - return informers; - } - - @Override - public ResourceConfiguration getInformerConfiguration() { - return null; - } + public static Operator operator; + public static Operator sparkConfMonitor; + public static List operators; + @NotNull + KubernetesClient kubernetesClient; + private AtomicBoolean isRunning; + private AtomicBoolean isRunning2; + private Map> + unhealthyEventSources = new HashMap<>(); + private Map> + unhealthyEventSources2 = new HashMap<>(); + + @BeforeAll + public static void beforeAll() { + operator = mock(Operator.class); + sparkConfMonitor = mock(Operator.class); + operators = Arrays.asList(operator, sparkConfMonitor); + } + + @BeforeEach + public void beforeEach() { + isRunning = new AtomicBoolean(false); + isRunning2 = new AtomicBoolean(false); + var runtimeInfo = + new RuntimeInfo(new Operator( + overrider -> overrider.withKubernetesClient(kubernetesClient))) { + @Override + public boolean isStarted() { + return isRunning.get(); + } + + @Override + public Map> + unhealthyInformerWrappingEventSourceHealthIndicator() { + return unhealthyEventSources; + } }; - } + + var runtimeInfo2 = + new RuntimeInfo(new Operator( + overrider -> overrider.withKubernetesClient(kubernetesClient))) { + @Override + public boolean isStarted() { + return isRunning2.get(); + } + + @Override + public Map> + unhealthyInformerWrappingEventSourceHealthIndicator() { + return unhealthyEventSources2; + } + }; + + when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); + when(sparkConfMonitor.getRuntimeInfo()).thenReturn(runtimeInfo2); + } + + @Test + void testHealthProbeWithInformerHealthWithMultiOperators() { + var healthyProbe = new HealthProbe(operators); + isRunning.set(true); + assertFalse(healthyProbe.isHealthy(), + "Healthy Probe should fail when the spark conf monitor operator is not running"); + isRunning2.set(true); + assertTrue(healthyProbe.isHealthy(), + "Healthy Probe should pass when both operators are running"); + + unhealthyEventSources2.put( + "c1", Map.of("e1", informerHealthIndicator(Map.of("i1", Status.UNHEALTHY)))); + assertFalse(healthyProbe.isHealthy(), + "Healthy Probe should fail when monitor's informer health is not healthy"); + unhealthyEventSources2.clear(); + assertTrue(healthyProbe.isHealthy(), "Healthy Probe should pass"); + } + + @Test + void testHealthProbeWithInformerHealthWithSingleOperator() { + var healthyProbe = new HealthProbe(Arrays.asList(operator)); + assertFalse(healthyProbe.isHealthy(), + "Health Probe should fail when operator is not running"); + isRunning.set(true); + unhealthyEventSources.put( + "c1", Map.of("e1", informerHealthIndicator(Map.of("i1", Status.UNHEALTHY)))); + assertFalse(healthyProbe.isHealthy(), + "Healthy Probe should fail when informer health is not healthy"); + unhealthyEventSources.clear(); + assertTrue(healthyProbe.isHealthy(), "Healthy Probe should pass"); + } + + @Test + void testHealthProbeWithSentinelHealthWithMultiOperators() { + var healthyProbe = new HealthProbe(operators); + SentinelManager sentinelManager = mock(SentinelManager.class); + healthyProbe.registerSentinelResourceManager(sentinelManager); + isRunning.set(true); + isRunning2.set(true); + when(sentinelManager.allSentinelsAreHealthy()).thenReturn(false); + assertFalse(healthyProbe.isHealthy(), + "Healthy Probe should fail when sentinels report failures"); + + when(sentinelManager.allSentinelsAreHealthy()).thenReturn(true); + assertTrue(healthyProbe.isHealthy(), "Healthy Probe should pass"); + } + + private static InformerWrappingEventSourceHealthIndicator informerHealthIndicator( + Map informerStatuses) { + Map informers = new HashMap<>(); + informerStatuses.forEach( + (n, s) -> + informers.put( + n, + new InformerHealthIndicator() { + @Override + public boolean hasSynced() { + return false; + } + + @Override + public boolean isWatching() { + return false; + } + + @Override + public boolean isRunning() { + return false; + } + + @Override + public Status getStatus() { + return s; + } + + @Override + public String getTargetNamespace() { + return null; + } + })); + + return new InformerWrappingEventSourceHealthIndicator() { + @Override + public Map informerHealthIndicators() { + return informers; + } + + @Override + public ResourceConfiguration getInformerConfiguration() { + return null; + } + }; + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java index 3ce0875c..8b7e4674 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ProbeServiceTest.java @@ -17,17 +17,18 @@ package org.apache.spark.kubernetes.operator.probe; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Arrays; +import java.util.HashMap; + import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient; import io.javaoperatorsdk.operator.Operator; import io.javaoperatorsdk.operator.RuntimeInfo; -import org.apache.spark.kubernetes.operator.health.SentinelManager; import org.junit.jupiter.api.Test; -import java.net.HttpURLConnection; -import java.net.URL; -import java.util.Arrays; -import java.util.HashMap; +import org.apache.spark.kubernetes.operator.health.SentinelManager; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.OperatorProbePort; import static org.apache.spark.kubernetes.operator.probe.ProbeService.HEALTHZ; @@ -39,86 +40,86 @@ @SuppressWarnings("PMD.JUnitTestsShouldIncludeAssert") @EnableKubernetesMockClient class ProbeServiceTest { - @Test - void testHealthProbeEndpointWithStaticProperties() throws Exception { - Operator operator = mock(Operator.class); - RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); - when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); - when(runtimeInfo.isStarted()).thenReturn(true).thenReturn(true); - SentinelManager sentinelManager = mock(SentinelManager.class); - when(runtimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( - new HashMap<>()); - when(sentinelManager.allSentinelsAreHealthy()).thenReturn(true); - ProbeService probeService = new ProbeService(Arrays.asList(operator), sentinelManager); - probeService.start(); - hitHealthyEndpoint(); - probeService.stop(); - } + @Test + void testHealthProbeEndpointWithStaticProperties() throws Exception { + Operator operator = mock(Operator.class); + RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); + when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); + when(runtimeInfo.isStarted()).thenReturn(true).thenReturn(true); + SentinelManager sentinelManager = mock(SentinelManager.class); + when(runtimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( + new HashMap<>()); + when(sentinelManager.allSentinelsAreHealthy()).thenReturn(true); + ProbeService probeService = new ProbeService(Arrays.asList(operator), sentinelManager); + probeService.start(); + hitHealthyEndpoint(); + probeService.stop(); + } - @Test - void testHealthProbeEndpointWithDynamicProperties() throws Exception { - Operator operator = mock(Operator.class); - Operator operator1 = mock(Operator.class); - RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); - RuntimeInfo runtimeInfo1 = mock(RuntimeInfo.class); - when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); - when(operator1.getRuntimeInfo()).thenReturn(runtimeInfo1); + @Test + void testHealthProbeEndpointWithDynamicProperties() throws Exception { + Operator operator = mock(Operator.class); + Operator operator1 = mock(Operator.class); + RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); + RuntimeInfo runtimeInfo1 = mock(RuntimeInfo.class); + when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); + when(operator1.getRuntimeInfo()).thenReturn(runtimeInfo1); - when(runtimeInfo.isStarted()).thenReturn(true).thenReturn(true); - when(runtimeInfo1.isStarted()).thenReturn(true).thenReturn(true); + when(runtimeInfo.isStarted()).thenReturn(true).thenReturn(true); + when(runtimeInfo1.isStarted()).thenReturn(true).thenReturn(true); - SentinelManager sentinelManager = mock(SentinelManager.class); - when(runtimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( - new HashMap<>()); - when(runtimeInfo1.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( - new HashMap<>()); - when(sentinelManager.allSentinelsAreHealthy()).thenReturn(true); - ProbeService probeService = - new ProbeService(Arrays.asList(operator, operator1), sentinelManager); - probeService.start(); - hitHealthyEndpoint(); - probeService.stop(); - } + SentinelManager sentinelManager = mock(SentinelManager.class); + when(runtimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( + new HashMap<>()); + when(runtimeInfo1.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( + new HashMap<>()); + when(sentinelManager.allSentinelsAreHealthy()).thenReturn(true); + ProbeService probeService = + new ProbeService(Arrays.asList(operator, operator1), sentinelManager); + probeService.start(); + hitHealthyEndpoint(); + probeService.stop(); + } - @Test - void testReadinessProbeEndpointWithDynamicProperties() throws Exception { - Operator operator = mock(Operator.class); - Operator operator1 = mock(Operator.class); - RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); - RuntimeInfo runtimeInfo1 = mock(RuntimeInfo.class); - when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); - when(operator1.getRuntimeInfo()).thenReturn(runtimeInfo1); + @Test + void testReadinessProbeEndpointWithDynamicProperties() throws Exception { + Operator operator = mock(Operator.class); + Operator operator1 = mock(Operator.class); + RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); + RuntimeInfo runtimeInfo1 = mock(RuntimeInfo.class); + when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); + when(operator1.getRuntimeInfo()).thenReturn(runtimeInfo1); - when(runtimeInfo.isStarted()).thenReturn(true).thenReturn(true); - when(runtimeInfo1.isStarted()).thenReturn(true).thenReturn(true); + when(runtimeInfo.isStarted()).thenReturn(true).thenReturn(true); + when(runtimeInfo1.isStarted()).thenReturn(true).thenReturn(true); - SentinelManager sentinelManager = mock(SentinelManager.class); - KubernetesClient client = mock(KubernetesClient.class); - when(runtimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( - new HashMap<>()); - when(runtimeInfo1.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( - new HashMap<>()); - when(operator1.getKubernetesClient()).thenReturn(client); - ProbeService probeService = - new ProbeService(Arrays.asList(operator, operator1), sentinelManager); - probeService.start(); - hitStartedUpEndpoint(); - probeService.stop(); - } + SentinelManager sentinelManager = mock(SentinelManager.class); + KubernetesClient client = mock(KubernetesClient.class); + when(runtimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( + new HashMap<>()); + when(runtimeInfo1.unhealthyInformerWrappingEventSourceHealthIndicator()).thenReturn( + new HashMap<>()); + when(operator1.getKubernetesClient()).thenReturn(client); + ProbeService probeService = + new ProbeService(Arrays.asList(operator, operator1), sentinelManager); + probeService.start(); + hitStartedUpEndpoint(); + probeService.stop(); + } - private void hitHealthyEndpoint() throws Exception { - URL u = new URL("http://localhost:" + OperatorProbePort.getValue() + HEALTHZ); - HttpURLConnection connection = (HttpURLConnection) u.openConnection(); - connection.setConnectTimeout(100000); - connection.connect(); - assertEquals(connection.getResponseCode(), 200, "Health Probe should return 200"); - } + private void hitHealthyEndpoint() throws Exception { + URL u = new URL("http://localhost:" + OperatorProbePort.getValue() + HEALTHZ); + HttpURLConnection connection = (HttpURLConnection) u.openConnection(); + connection.setConnectTimeout(100000); + connection.connect(); + assertEquals(connection.getResponseCode(), 200, "Health Probe should return 200"); + } - private void hitStartedUpEndpoint() throws Exception { - URL u = new URL("http://localhost:" + OperatorProbePort.getValue() + READYZ); - HttpURLConnection connection = (HttpURLConnection) u.openConnection(); - connection.setConnectTimeout(100000); - connection.connect(); - assertEquals(connection.getResponseCode(), 200, "operators are not ready"); - } + private void hitStartedUpEndpoint() throws Exception { + URL u = new URL("http://localhost:" + OperatorProbePort.getValue() + READYZ); + HttpURLConnection connection = (HttpURLConnection) u.openConnection(); + connection.setConnectTimeout(100000); + connection.connect(); + assertEquals(connection.getResponseCode(), 200, "operators are not ready"); + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbeTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbeTest.java index c03d9e2e..5ac5b409 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbeTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/probe/ReadinessProbeTest.java @@ -17,52 +17,52 @@ package org.apache.spark.kubernetes.operator.probe; +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; + +import com.sun.net.httpserver.HttpExchange; import io.fabric8.kubernetes.client.KubernetesClient; import io.javaoperatorsdk.operator.Operator; import io.javaoperatorsdk.operator.RuntimeInfo; -import org.apache.spark.kubernetes.operator.utils.ProbeUtil; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.sun.net.httpserver.HttpExchange; import org.mockito.MockedStatic; import org.mockito.Mockito; -import java.io.IOException; -import java.io.OutputStream; -import java.util.Arrays; +import org.apache.spark.kubernetes.operator.utils.ProbeUtil; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; @SuppressWarnings("PMD.JUnitTestsShouldIncludeAssert") class ReadinessProbeTest { - KubernetesClient client; - HttpExchange httpExchange; + KubernetesClient client; + HttpExchange httpExchange; - @BeforeEach - public void beforeEach() { - OutputStream outputStream = mock(OutputStream.class); - httpExchange = mock(HttpExchange.class); - client = mock(KubernetesClient.class); - when(httpExchange.getResponseBody()).thenReturn(outputStream); - } + @BeforeEach + public void beforeEach() { + OutputStream outputStream = mock(OutputStream.class); + httpExchange = mock(HttpExchange.class); + client = mock(KubernetesClient.class); + when(httpExchange.getResponseBody()).thenReturn(outputStream); + } - @Test - void testHandleSucceed() throws IOException { - Operator operator = mock(Operator.class); - Operator sparkConfMonitor = mock(Operator.class); - RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); - RuntimeInfo sparkConfMonitorRuntimeInfo = mock(RuntimeInfo.class); - when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); - when(runtimeInfo.isStarted()).thenReturn(true); - when(sparkConfMonitor.getRuntimeInfo()).thenReturn(sparkConfMonitorRuntimeInfo); - when(sparkConfMonitorRuntimeInfo.isStarted()).thenReturn(true); - when(sparkConfMonitor.getKubernetesClient()).thenReturn(client); - ReadinessProbe readinessProbe = new ReadinessProbe(Arrays.asList(operator)); - try (MockedStatic mockedStatic = Mockito.mockStatic(ProbeUtil.class)) { - readinessProbe.handle(httpExchange); - mockedStatic.verify(() -> ProbeUtil.sendMessage(httpExchange, 200, "started")); - } + @Test + void testHandleSucceed() throws IOException { + Operator operator = mock(Operator.class); + Operator sparkConfMonitor = mock(Operator.class); + RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); + RuntimeInfo sparkConfMonitorRuntimeInfo = mock(RuntimeInfo.class); + when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); + when(runtimeInfo.isStarted()).thenReturn(true); + when(sparkConfMonitor.getRuntimeInfo()).thenReturn(sparkConfMonitorRuntimeInfo); + when(sparkConfMonitorRuntimeInfo.isStarted()).thenReturn(true); + when(sparkConfMonitor.getKubernetesClient()).thenReturn(client); + ReadinessProbe readinessProbe = new ReadinessProbe(Arrays.asList(operator)); + try (MockedStatic mockedStatic = Mockito.mockStatic(ProbeUtil.class)) { + readinessProbe.handle(httpExchange); + mockedStatic.verify(() -> ProbeUtil.sendMessage(httpExchange, 200, "started")); } + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java index 2426df1d..d6ac27d1 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java @@ -18,21 +18,22 @@ package org.apache.spark.kubernetes.operator.reconciler; +import java.time.Instant; +import java.util.Map; + import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; import io.fabric8.kubernetes.api.model.Pod; import io.fabric8.kubernetes.client.KubernetesClient; -import org.apache.spark.kubernetes.operator.ApplicationClientWorker; -import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; -import org.apache.spark.kubernetes.operator.SparkApplication; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import org.mockito.MockedStatic; import org.mockito.Mockito; -import java.time.Instant; -import java.util.Map; +import org.apache.spark.kubernetes.operator.ApplicationClientWorker; +import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; +import org.apache.spark.kubernetes.operator.SparkApplication; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -42,52 +43,53 @@ class SparkApplicationReconcileUtilsTest { - @Test - void testForceDeleteEnabled() { - SparkApplication app = new SparkApplication(); - app.getStatus().getCurrentState().setLastTransitionTime( - Instant.now().minusSeconds(5).toString()); - app.getSpec().getApplicationTolerations().getApplicationTimeoutConfig() - .setForceTerminationGracePeriodMillis(3000L); - Assertions.assertTrue(SparkApplicationReconcileUtils.enableForceDelete(app)); - } - @Test - void testBuildResourceSpecCoversBasicOverride() { - SparkApplication app = new SparkApplication(); - app.setMetadata(new ObjectMetaBuilder() - .withNamespace("foo") - .withName("bar-app") - .withUid("uid") - .build()); - KubernetesClient mockClient = mock(KubernetesClient.class); - Pod mockDriver = mock(Pod.class); - when(mockDriver.getMetadata()).thenReturn(new ObjectMeta()); - try (MockedStatic worker = - Mockito.mockStatic(ApplicationClientWorker.class)) { - ApplicationResourceSpec mockSpec = mock(ApplicationResourceSpec.class); - when(mockSpec.getConfiguredPod()).thenReturn(mockDriver); - ArgumentCaptor> captor = ArgumentCaptor.forClass(Map.class); - worker.when(() -> ApplicationClientWorker.getResourceSpec( - any(), any(), captor.capture())).thenReturn(mockSpec); - ApplicationResourceSpec spec = SparkApplicationReconcileUtils.buildResourceSpec(app, - mockClient); - worker.verify(() -> ApplicationClientWorker.getResourceSpec(eq(app), - eq(mockClient), any())); - Map props = captor.getValue(); - Assertions.assertTrue(props.containsKey("spark.kubernetes.namespace")); - Assertions.assertEquals("foo", props.get("spark.kubernetes.namespace")); - ArgumentCaptor metaArgumentCaptor = - ArgumentCaptor.forClass(ObjectMeta.class); - verify(mockDriver).setMetadata(metaArgumentCaptor.capture()); - Assertions.assertEquals(mockSpec, spec); - ObjectMeta metaOverride = metaArgumentCaptor.getValue(); - Assertions.assertEquals(1, metaOverride.getOwnerReferences().size()); - Assertions.assertEquals("bar-app", - metaOverride.getOwnerReferences().get(0).getName()); - Assertions.assertEquals("uid", - metaOverride.getOwnerReferences().get(0).getUid()); - Assertions.assertEquals(app.getKind(), - metaOverride.getOwnerReferences().get(0).getKind()); - } + @Test + void testForceDeleteEnabled() { + SparkApplication app = new SparkApplication(); + app.getStatus().getCurrentState().setLastTransitionTime( + Instant.now().minusSeconds(5).toString()); + app.getSpec().getApplicationTolerations().getApplicationTimeoutConfig() + .setForceTerminationGracePeriodMillis(3000L); + Assertions.assertTrue(SparkApplicationReconcileUtils.enableForceDelete(app)); + } + + @Test + void testBuildResourceSpecCoversBasicOverride() { + SparkApplication app = new SparkApplication(); + app.setMetadata(new ObjectMetaBuilder() + .withNamespace("foo") + .withName("bar-app") + .withUid("uid") + .build()); + KubernetesClient mockClient = mock(KubernetesClient.class); + Pod mockDriver = mock(Pod.class); + when(mockDriver.getMetadata()).thenReturn(new ObjectMeta()); + try (MockedStatic worker = + Mockito.mockStatic(ApplicationClientWorker.class)) { + ApplicationResourceSpec mockSpec = mock(ApplicationResourceSpec.class); + when(mockSpec.getConfiguredPod()).thenReturn(mockDriver); + ArgumentCaptor> captor = ArgumentCaptor.forClass(Map.class); + worker.when(() -> ApplicationClientWorker.getResourceSpec( + any(), any(), captor.capture())).thenReturn(mockSpec); + ApplicationResourceSpec spec = SparkApplicationReconcileUtils.buildResourceSpec(app, + mockClient); + worker.verify(() -> ApplicationClientWorker.getResourceSpec(eq(app), + eq(mockClient), any())); + Map props = captor.getValue(); + Assertions.assertTrue(props.containsKey("spark.kubernetes.namespace")); + Assertions.assertEquals("foo", props.get("spark.kubernetes.namespace")); + ArgumentCaptor metaArgumentCaptor = + ArgumentCaptor.forClass(ObjectMeta.class); + verify(mockDriver).setMetadata(metaArgumentCaptor.capture()); + Assertions.assertEquals(mockSpec, spec); + ObjectMeta metaOverride = metaArgumentCaptor.getValue(); + Assertions.assertEquals(1, metaOverride.getOwnerReferences().size()); + Assertions.assertEquals("bar-app", + metaOverride.getOwnerReferences().get(0).getName()); + Assertions.assertEquals("uid", + metaOverride.getOwnerReferences().get(0).getUid()); + Assertions.assertEquals(app.getKind(), + metaOverride.getOwnerReferences().get(0).getKind()); } + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java index a22a06d8..7f4212cb 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java @@ -18,17 +18,13 @@ package org.apache.spark.kubernetes.operator.reconciler; +import java.util.Collections; +import java.util.Optional; + import io.fabric8.kubernetes.api.model.Pod; import io.fabric8.kubernetes.client.KubernetesClient; import io.javaoperatorsdk.operator.api.reconciler.Context; import io.javaoperatorsdk.operator.api.reconciler.DeleteControl; -import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; -import org.apache.spark.kubernetes.operator.health.SentinelManager; -import org.apache.spark.kubernetes.operator.status.ApplicationState; -import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; -import org.apache.spark.kubernetes.operator.status.ApplicationStatus; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -36,8 +32,13 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; -import java.util.Collections; -import java.util.Optional; +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.health.SentinelManager; +import org.apache.spark.kubernetes.operator.status.ApplicationState; +import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.utils.StatusRecorder; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -47,51 +48,51 @@ import static org.mockito.Mockito.when; class SparkApplicationReconcilerTest { - private StatusRecorder mockRecorder = mock(StatusRecorder.class); - private SentinelManager mockSentinelManager = mock(SentinelManager.class); - private KubernetesClient mockClient = mock(KubernetesClient.class); - private Context mockContext = mock(Context.class); - private Pod mockDriver = mock(Pod.class); - SparkApplication app = new SparkApplication(); - SparkApplicationReconciler reconciler = new SparkApplicationReconciler(mockRecorder, - mockSentinelManager); + private StatusRecorder mockRecorder = mock(StatusRecorder.class); + private SentinelManager mockSentinelManager = mock(SentinelManager.class); + private KubernetesClient mockClient = mock(KubernetesClient.class); + private Context mockContext = mock(Context.class); + private Pod mockDriver = mock(Pod.class); + SparkApplication app = new SparkApplication(); + SparkApplicationReconciler reconciler = new SparkApplicationReconciler(mockRecorder, + mockSentinelManager); - @BeforeEach - public void beforeEach() { - when(mockContext.getClient()).thenReturn(mockClient); - doNothing().when(mockRecorder).removeCachedStatus(any(SparkApplication.class)); - doAnswer(invocation -> { - app.setStatus(invocation.getArgument(1)); - return null; - }).when(mockRecorder).persistStatus(any(SparkApplicationContext.class), - any(ApplicationStatus.class)); - } + @BeforeEach + public void beforeEach() { + when(mockContext.getClient()).thenReturn(mockClient); + doNothing().when(mockRecorder).removeCachedStatus(any(SparkApplication.class)); + doAnswer(invocation -> { + app.setStatus(invocation.getArgument(1)); + return null; + }).when(mockRecorder).persistStatus(any(SparkApplicationContext.class), + any(ApplicationStatus.class)); + } - @Test - void testCleanupRunningApp() { - try (MockedConstruction mockAppContext = mockConstruction( - SparkApplicationContext.class, (mock, context) -> { - when(mock.getSparkApplication()).thenReturn(app); - when(mock.getClient()).thenReturn(mockClient); - when(mock.getDriverPod()).thenReturn(Optional.of(mockDriver)); - when(mock.getDriverPodSpec()).thenReturn(mockDriver); - when(mock.getDriverPreResourcesSpec()).thenReturn(Collections.emptyList()); - when(mock.getDriverResourcesSpec()).thenReturn(Collections.emptyList()); - }); MockedStatic utils = - Mockito.mockStatic(SparkReconcilerUtils.class)) { - // delete running app - app.setStatus(app.getStatus().appendNewState(new ApplicationState( - ApplicationStateSummary.RUNNING_HEALTHY, ""))); - DeleteControl deleteControl = reconciler.cleanup(app, mockContext); - Assertions.assertFalse(deleteControl.isRemoveFinalizer()); - utils.verify(() -> SparkReconcilerUtils.deleteResourceIfExists(mockClient, - mockDriver, false)); - Assertions.assertEquals(ApplicationStateSummary.RESOURCE_RELEASED, - app.getStatus().getCurrentState().getCurrentStateSummary()); + @Test + void testCleanupRunningApp() { + try (MockedConstruction mockAppContext = mockConstruction( + SparkApplicationContext.class, (mock, context) -> { + when(mock.getSparkApplication()).thenReturn(app); + when(mock.getClient()).thenReturn(mockClient); + when(mock.getDriverPod()).thenReturn(Optional.of(mockDriver)); + when(mock.getDriverPodSpec()).thenReturn(mockDriver); + when(mock.getDriverPreResourcesSpec()).thenReturn(Collections.emptyList()); + when(mock.getDriverResourcesSpec()).thenReturn(Collections.emptyList()); + }); MockedStatic utils = + Mockito.mockStatic(SparkReconcilerUtils.class)) { + // delete running app + app.setStatus(app.getStatus().appendNewState(new ApplicationState( + ApplicationStateSummary.RUNNING_HEALTHY, ""))); + DeleteControl deleteControl = reconciler.cleanup(app, mockContext); + Assertions.assertFalse(deleteControl.isRemoveFinalizer()); + utils.verify(() -> SparkReconcilerUtils.deleteResourceIfExists(mockClient, + mockDriver, false)); + Assertions.assertEquals(ApplicationStateSummary.RESOURCE_RELEASED, + app.getStatus().getCurrentState().getCurrentStateSummary()); - // proceed delete for terminated app - deleteControl = reconciler.cleanup(app, mockContext); - Assertions.assertTrue(deleteControl.isRemoveFinalizer()); - } + // proceed delete for terminated app + deleteControl = reconciler.cleanup(app, mockContext); + Assertions.assertTrue(deleteControl.isRemoveFinalizer()); } + } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java index 401f350d..7976e571 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/utils/TestUtils.java @@ -17,42 +17,43 @@ package org.apache.spark.kubernetes.operator.utils; -import io.fabric8.kubernetes.api.model.ObjectMeta; -import org.apache.spark.kubernetes.operator.SparkApplication; - import java.io.File; import java.util.Map; +import io.fabric8.kubernetes.api.model.ObjectMeta; + +import org.apache.spark.kubernetes.operator.SparkApplication; + import static org.apache.spark.kubernetes.operator.Constants.SENTINEL_LABEL; public class TestUtils { - public static SparkApplication createMockDeployment(String namespace) { - var cr = new SparkApplication(); - cr.setKind("org.apache.spark/v1alpha1"); - cr.setApiVersion("SparkApplication"); - cr.setSpec(cr.initSpec()); - var meta = new ObjectMeta(); - meta.setGeneration(0L); - meta.setLabels(Map.of(SENTINEL_LABEL, "true")); - meta.setName("sentinel"); - meta.setNamespace(namespace); - cr.setMetadata(meta); - return cr; + public static SparkApplication createMockDeployment(String namespace) { + var cr = new SparkApplication(); + cr.setKind("org.apache.spark/v1alpha1"); + cr.setApiVersion("SparkApplication"); + cr.setSpec(cr.initSpec()); + var meta = new ObjectMeta(); + meta.setGeneration(0L); + meta.setLabels(Map.of(SENTINEL_LABEL, "true")); + meta.setName("sentinel"); + meta.setNamespace(namespace); + cr.setMetadata(meta); + return cr; + } + + public static void cleanPropertiesFile(String filePath) { + File myObj = new File(filePath); + if (!myObj.delete()) { + throw new RuntimeException("Failed to clean properties file: " + filePath); } + } - public static void cleanPropertiesFile(String filePath) { - File myObj = new File(filePath); - if (!myObj.delete()) { - throw new RuntimeException("Failed to clean properties file: " + filePath); - } - } + public static boolean notTimedOut(long startTime, long maxWaitTimeInMills) { + long elapsedTimeInMills = calculateElapsedTimeInMills(startTime); + return elapsedTimeInMills < maxWaitTimeInMills; + } - public static boolean notTimedOut(long startTime, long maxWaitTimeInMills) { - long elapsedTimeInMills = calculateElapsedTimeInMills(startTime); - return elapsedTimeInMills < maxWaitTimeInMills; - } - - public static long calculateElapsedTimeInMills(long startTime) { - return System.currentTimeMillis() - startTime; - } + public static long calculateElapsedTimeInMills(long startTime) { + return System.currentTimeMillis() - startTime; + } } diff --git a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java index 4d89f439..631e57e4 100644 --- a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java +++ b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java @@ -18,9 +18,13 @@ package org.apache.spark.kubernetes.operator; +import java.util.Map; + import io.fabric8.kubernetes.client.KubernetesClient; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; +import scala.Option; + import org.apache.spark.SparkConf; import org.apache.spark.deploy.k8s.KubernetesDriverSpec; import org.apache.spark.deploy.k8s.submit.JavaMainAppResource; @@ -29,9 +33,6 @@ import org.apache.spark.deploy.k8s.submit.PythonMainAppResource; import org.apache.spark.deploy.k8s.submit.RMainAppResource; import org.apache.spark.kubernetes.operator.spec.ApplicationSpec; -import scala.Option; - -import java.util.Map; /** * Similar to org.apache.spark.deploy.k8s.submit.KubernetesClientApplication @@ -39,69 +40,69 @@ */ public class ApplicationClientWorker { - public static ApplicationResourceSpec getResourceSpec( - org.apache.spark.kubernetes.operator.SparkApplication app, - KubernetesClient client, - Map confOverrides) { - ApplicationDriverConf applicationDriverConf = buildDriverConf(app, confOverrides); - return buildResourceSpec(applicationDriverConf, client); - } + public static ApplicationResourceSpec getResourceSpec( + org.apache.spark.kubernetes.operator.SparkApplication app, + KubernetesClient client, + Map confOverrides) { + ApplicationDriverConf applicationDriverConf = buildDriverConf(app, confOverrides); + return buildResourceSpec(applicationDriverConf, client); + } - protected static ApplicationDriverConf buildDriverConf( - org.apache.spark.kubernetes.operator.SparkApplication app, - Map confOverrides) { - ApplicationSpec applicationSpec = app.getSpec(); - SparkConf effectiveSparkConf = new SparkConf(); - if (MapUtils.isNotEmpty(applicationSpec.getSparkConf())) { - for (String confKey : applicationSpec.getSparkConf().keySet()) { - effectiveSparkConf.set(confKey, applicationSpec.getSparkConf().get(confKey)); - } - } - if (MapUtils.isNotEmpty(confOverrides)) { - for (Map.Entry entry : confOverrides.entrySet()) { - effectiveSparkConf.set(entry.getKey(), entry.getValue()); - } - } - effectiveSparkConf.set("spark.kubernetes.namespace", app.getMetadata().getNamespace()); - MainAppResource primaryResource = new JavaMainAppResource(Option.empty()); - if (StringUtils.isNotEmpty(applicationSpec.getJars())) { - primaryResource = new JavaMainAppResource(Option.apply(applicationSpec.getJars())); - effectiveSparkConf.setIfMissing("spark.jars", applicationSpec.getJars()); - } else if (StringUtils.isNotEmpty(applicationSpec.getPyFiles())) { - primaryResource = new PythonMainAppResource(applicationSpec.getPyFiles()); - effectiveSparkConf.setIfMissing("spark.submit.pyFiles", applicationSpec.getPyFiles()); - } else if (StringUtils.isNotEmpty(applicationSpec.getSparkRFiles())) { - primaryResource = new RMainAppResource(applicationSpec.getSparkRFiles()); - } - effectiveSparkConf.setMaster( - "k8s://https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT"); - return ApplicationDriverConf.create(effectiveSparkConf, - createSparkAppId(app), - primaryResource, - applicationSpec.getMainClass(), - applicationSpec.getDriverArgs().toArray(new String[0]), - Option.apply(applicationSpec.getProxyUser())); + protected static ApplicationDriverConf buildDriverConf( + org.apache.spark.kubernetes.operator.SparkApplication app, + Map confOverrides) { + ApplicationSpec applicationSpec = app.getSpec(); + SparkConf effectiveSparkConf = new SparkConf(); + if (MapUtils.isNotEmpty(applicationSpec.getSparkConf())) { + for (String confKey : applicationSpec.getSparkConf().keySet()) { + effectiveSparkConf.set(confKey, applicationSpec.getSparkConf().get(confKey)); + } } - - protected static ApplicationResourceSpec buildResourceSpec( - ApplicationDriverConf kubernetesDriverConf, - KubernetesClient client) { - KubernetesDriverBuilder builder = new KubernetesDriverBuilder(); - KubernetesDriverSpec kubernetesDriverSpec = - builder.buildFromFeatures(kubernetesDriverConf, client); - return new ApplicationResourceSpec(kubernetesDriverConf, kubernetesDriverSpec); + if (MapUtils.isNotEmpty(confOverrides)) { + for (Map.Entry entry : confOverrides.entrySet()) { + effectiveSparkConf.set(entry.getKey(), entry.getValue()); + } } + effectiveSparkConf.set("spark.kubernetes.namespace", app.getMetadata().getNamespace()); + MainAppResource primaryResource = new JavaMainAppResource(Option.empty()); + if (StringUtils.isNotEmpty(applicationSpec.getJars())) { + primaryResource = new JavaMainAppResource(Option.apply(applicationSpec.getJars())); + effectiveSparkConf.setIfMissing("spark.jars", applicationSpec.getJars()); + } else if (StringUtils.isNotEmpty(applicationSpec.getPyFiles())) { + primaryResource = new PythonMainAppResource(applicationSpec.getPyFiles()); + effectiveSparkConf.setIfMissing("spark.submit.pyFiles", applicationSpec.getPyFiles()); + } else if (StringUtils.isNotEmpty(applicationSpec.getSparkRFiles())) { + primaryResource = new RMainAppResource(applicationSpec.getSparkRFiles()); + } + effectiveSparkConf.setMaster( + "k8s://https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT"); + return ApplicationDriverConf.create(effectiveSparkConf, + createSparkAppId(app), + primaryResource, + applicationSpec.getMainClass(), + applicationSpec.getDriverArgs().toArray(new String[0]), + Option.apply(applicationSpec.getProxyUser())); + } + + protected static ApplicationResourceSpec buildResourceSpec( + ApplicationDriverConf kubernetesDriverConf, + KubernetesClient client) { + KubernetesDriverBuilder builder = new KubernetesDriverBuilder(); + KubernetesDriverSpec kubernetesDriverSpec = + builder.buildFromFeatures(kubernetesDriverConf, client); + return new ApplicationResourceSpec(kubernetesDriverConf, kubernetesDriverSpec); + } - /** - * Spark application id need to be deterministic per attempt per Spark App. - * This is to ensure operator reconciliation idempotency - */ - protected static String createSparkAppId( - final org.apache.spark.kubernetes.operator.SparkApplication app) { - long attemptId = 0L; - if (app.getStatus() != null && app.getStatus().getCurrentAttemptSummary() != null) { - attemptId = app.getStatus().getCurrentAttemptSummary().getAttemptInfo().getId(); - } - return String.format("%s-%d", app.getMetadata().getName(), attemptId); + /** + * Spark application id need to be deterministic per attempt per Spark App. + * This is to ensure operator reconciliation idempotency + */ + protected static String createSparkAppId( + final org.apache.spark.kubernetes.operator.SparkApplication app) { + long attemptId = 0L; + if (app.getStatus() != null && app.getStatus().getCurrentAttemptSummary() != null) { + attemptId = app.getStatus().getCurrentAttemptSummary().getAttemptInfo().getId(); } + return String.format("%s-%d", app.getMetadata().getName(), attemptId); + } } diff --git a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java index 92f5ea3f..9b96a88f 100644 --- a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java +++ b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java @@ -18,48 +18,49 @@ package org.apache.spark.kubernetes.operator; +import scala.Option; + import org.apache.spark.SparkConf; import org.apache.spark.deploy.k8s.Config; import org.apache.spark.deploy.k8s.KubernetesDriverConf; import org.apache.spark.deploy.k8s.KubernetesVolumeUtils; import org.apache.spark.deploy.k8s.submit.KubernetesClientUtils; import org.apache.spark.deploy.k8s.submit.MainAppResource; -import scala.Option; public class ApplicationDriverConf extends KubernetesDriverConf { - private ApplicationDriverConf(SparkConf sparkConf, - String appId, - MainAppResource mainAppResource, - String mainClass, - String[] appArgs, - Option proxyUser) { - super(sparkConf, appId, mainAppResource, mainClass, appArgs, proxyUser); - } + private ApplicationDriverConf(SparkConf sparkConf, + String appId, + MainAppResource mainAppResource, + String mainClass, + String[] appArgs, + Option proxyUser) { + super(sparkConf, appId, mainAppResource, mainClass, appArgs, proxyUser); + } - public static ApplicationDriverConf create(SparkConf sparkConf, - String appId, - MainAppResource mainAppResource, - String mainClass, - String[] appArgs, - Option proxyUser) { - // pre-create check only - KubernetesVolumeUtils.parseVolumesWithPrefix(sparkConf, - Config.KUBERNETES_EXECUTOR_VOLUMES_PREFIX()); - return new ApplicationDriverConf(sparkConf, appId, mainAppResource, mainClass, appArgs, - proxyUser); - } + public static ApplicationDriverConf create(SparkConf sparkConf, + String appId, + MainAppResource mainAppResource, + String mainClass, + String[] appArgs, + Option proxyUser) { + // pre-create check only + KubernetesVolumeUtils.parseVolumesWithPrefix(sparkConf, + Config.KUBERNETES_EXECUTOR_VOLUMES_PREFIX()); + return new ApplicationDriverConf(sparkConf, appId, mainAppResource, mainClass, appArgs, + proxyUser); + } - /** - * Application managed by operator has a deterministic prefix - */ - @Override - public String resourceNamePrefix() { - return sparkConf().getOption(Config.KUBERNETES_DRIVER_POD_NAME_PREFIX().key()).isEmpty() - ? appId() : sparkConf().get(Config.KUBERNETES_DRIVER_POD_NAME_PREFIX().key()); - } + /** + * Application managed by operator has a deterministic prefix + */ + @Override + public String resourceNamePrefix() { + return sparkConf().getOption(Config.KUBERNETES_DRIVER_POD_NAME_PREFIX().key()).isEmpty() + ? appId() : sparkConf().get(Config.KUBERNETES_DRIVER_POD_NAME_PREFIX().key()); + } - public String configMapNameDriver() { - return KubernetesClientUtils.configMapName( - String.format("spark-drv-%s", resourceNamePrefix())); - } + public String configMapNameDriver() { + return KubernetesClientUtils.configMapName( + String.format("spark-drv-%s", resourceNamePrefix())); + } } diff --git a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java index 7c2a6a93..56e24ea8 100644 --- a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java +++ b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java @@ -18,24 +18,25 @@ package org.apache.spark.kubernetes.operator; +import java.util.ArrayList; +import java.util.List; + import io.fabric8.kubernetes.api.model.Container; import io.fabric8.kubernetes.api.model.ContainerBuilder; import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.Pod; import io.fabric8.kubernetes.api.model.PodBuilder; import lombok.Getter; -import org.apache.spark.deploy.k8s.Config; -import org.apache.spark.deploy.k8s.Constants; -import org.apache.spark.deploy.k8s.KubernetesDriverSpec; -import org.apache.spark.deploy.k8s.SparkPod; -import org.apache.spark.deploy.k8s.submit.KubernetesClientUtils; import scala.Tuple2; import scala.collection.JavaConverters; import scala.collection.immutable.HashMap; import scala.collection.immutable.Map; -import java.util.ArrayList; -import java.util.List; +import org.apache.spark.deploy.k8s.Config; +import org.apache.spark.deploy.k8s.Constants; +import org.apache.spark.deploy.k8s.KubernetesDriverSpec; +import org.apache.spark.deploy.k8s.SparkPod; +import org.apache.spark.deploy.k8s.submit.KubernetesClientUtils; /** * Resembles resources that would be directly launched by operator. @@ -47,63 +48,63 @@ * This is not thread safe */ public class ApplicationResourceSpec { - @Getter - private final Pod configuredPod; - @Getter - private final List driverPreResources; - @Getter - private final List driverResources; - private final ApplicationDriverConf kubernetesDriverConf; + @Getter + private final Pod configuredPod; + @Getter + private final List driverPreResources; + @Getter + private final List driverResources; + private final ApplicationDriverConf kubernetesDriverConf; - public ApplicationResourceSpec(ApplicationDriverConf kubernetesDriverConf, - KubernetesDriverSpec kubernetesDriverSpec) { - this.kubernetesDriverConf = kubernetesDriverConf; - String namespace = - kubernetesDriverConf.sparkConf().get(Config.KUBERNETES_NAMESPACE().key()); - Map confFilesMap = KubernetesClientUtils.buildSparkConfDirFilesMap( - kubernetesDriverConf.configMapNameDriver(), - kubernetesDriverConf.sparkConf(), kubernetesDriverSpec.systemProperties()) - .$plus(new Tuple2<>(Config.KUBERNETES_NAMESPACE().key(), namespace)); - SparkPod sparkPod = addConfigMap(kubernetesDriverSpec.pod(), confFilesMap); - this.configuredPod = new PodBuilder(sparkPod.pod()) - .editSpec() - .addToContainers(sparkPod.container()) - .endSpec() - .build(); - this.driverPreResources = new ArrayList<>( - JavaConverters.seqAsJavaList(kubernetesDriverSpec.driverPreKubernetesResources())); - this.driverResources = new ArrayList<>( - JavaConverters.seqAsJavaList(kubernetesDriverSpec.driverKubernetesResources())); - this.driverResources.add( - KubernetesClientUtils.buildConfigMap(kubernetesDriverConf.configMapNameDriver(), - confFilesMap, new HashMap<>())); - this.driverPreResources.forEach(r -> r.getMetadata().setNamespace(namespace)); - this.driverResources.forEach(r -> r.getMetadata().setNamespace(namespace)); - } + public ApplicationResourceSpec(ApplicationDriverConf kubernetesDriverConf, + KubernetesDriverSpec kubernetesDriverSpec) { + this.kubernetesDriverConf = kubernetesDriverConf; + String namespace = + kubernetesDriverConf.sparkConf().get(Config.KUBERNETES_NAMESPACE().key()); + Map confFilesMap = KubernetesClientUtils.buildSparkConfDirFilesMap( + kubernetesDriverConf.configMapNameDriver(), + kubernetesDriverConf.sparkConf(), kubernetesDriverSpec.systemProperties()) + .$plus(new Tuple2<>(Config.KUBERNETES_NAMESPACE().key(), namespace)); + SparkPod sparkPod = addConfigMap(kubernetesDriverSpec.pod(), confFilesMap); + this.configuredPod = new PodBuilder(sparkPod.pod()) + .editSpec() + .addToContainers(sparkPod.container()) + .endSpec() + .build(); + this.driverPreResources = new ArrayList<>( + JavaConverters.seqAsJavaList(kubernetesDriverSpec.driverPreKubernetesResources())); + this.driverResources = new ArrayList<>( + JavaConverters.seqAsJavaList(kubernetesDriverSpec.driverKubernetesResources())); + this.driverResources.add( + KubernetesClientUtils.buildConfigMap(kubernetesDriverConf.configMapNameDriver(), + confFilesMap, new HashMap<>())); + this.driverPreResources.forEach(r -> r.getMetadata().setNamespace(namespace)); + this.driverResources.forEach(r -> r.getMetadata().setNamespace(namespace)); + } - private SparkPod addConfigMap(SparkPod pod, Map confFilesMap) { - Container containerWithVolume = new ContainerBuilder(pod.container()) - .addNewEnv() - .withName(org.apache.spark.deploy.k8s.Constants.ENV_SPARK_CONF_DIR()) - .withValue(org.apache.spark.deploy.k8s.Constants.SPARK_CONF_DIR_INTERNAL()) - .endEnv() - .addNewVolumeMount() - .withName(org.apache.spark.deploy.k8s.Constants.SPARK_CONF_VOLUME_DRIVER()) - .withMountPath(org.apache.spark.deploy.k8s.Constants.SPARK_CONF_DIR_INTERNAL()) - .endVolumeMount() - .build(); - Pod podWithVolume = new PodBuilder(pod.pod()) - .editSpec() - .addNewVolume() - .withName(Constants.SPARK_CONF_VOLUME_DRIVER()) - .withNewConfigMap() - .withItems(JavaConverters.seqAsJavaList( - KubernetesClientUtils.buildKeyToPathObjects(confFilesMap))) - .withName(kubernetesDriverConf.configMapNameDriver()) - .endConfigMap() - .endVolume() - .endSpec() - .build(); - return new SparkPod(podWithVolume, containerWithVolume); - } + private SparkPod addConfigMap(SparkPod pod, Map confFilesMap) { + Container containerWithVolume = new ContainerBuilder(pod.container()) + .addNewEnv() + .withName(org.apache.spark.deploy.k8s.Constants.ENV_SPARK_CONF_DIR()) + .withValue(org.apache.spark.deploy.k8s.Constants.SPARK_CONF_DIR_INTERNAL()) + .endEnv() + .addNewVolumeMount() + .withName(org.apache.spark.deploy.k8s.Constants.SPARK_CONF_VOLUME_DRIVER()) + .withMountPath(org.apache.spark.deploy.k8s.Constants.SPARK_CONF_DIR_INTERNAL()) + .endVolumeMount() + .build(); + Pod podWithVolume = new PodBuilder(pod.pod()) + .editSpec() + .addNewVolume() + .withName(Constants.SPARK_CONF_VOLUME_DRIVER()) + .withNewConfigMap() + .withItems(JavaConverters.seqAsJavaList( + KubernetesClientUtils.buildKeyToPathObjects(confFilesMap))) + .withName(kubernetesDriverConf.configMapNameDriver()) + .endConfigMap() + .endVolume() + .endSpec() + .build(); + return new SparkPod(podWithVolume, containerWithVolume); + } } diff --git a/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java index 95137fc5..6fb68721 100644 --- a/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java +++ b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java @@ -18,8 +18,18 @@ package org.apache.spark.kubernetes.operator; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.mockito.MockedConstruction; + import org.apache.spark.SparkConf; import org.apache.spark.deploy.k8s.submit.JavaMainAppResource; import org.apache.spark.deploy.k8s.submit.PythonMainAppResource; @@ -28,174 +38,165 @@ import org.apache.spark.kubernetes.operator.status.ApplicationAttemptSummary; import org.apache.spark.kubernetes.operator.status.ApplicationStatus; import org.apache.spark.kubernetes.operator.status.AttemptInfo; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import org.mockito.MockedConstruction; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockConstruction; import static org.mockito.Mockito.when; class ApplicationClientWorkerTest { - @Test - void buildDriverConfShouldApplySpecAndPropertiesOverride() { - Map> constructorArgs = new HashMap<>(); - try (MockedConstruction mocked = mockConstruction( - ApplicationDriverConf.class, - (mock, context) -> constructorArgs.put(mock, - new ArrayList<>(context.arguments())))) { - SparkApplication mockApp = mock(SparkApplication.class); - ApplicationSpec mockSpec = mock(ApplicationSpec.class); - ObjectMeta appMeta = new ObjectMetaBuilder() - .withName("app1") - .withNamespace("ns1") - .build(); - Map appProps = new HashMap<>(); - appProps.put("foo", "bar"); - appProps.put("spark.executor.instances", "1"); - appProps.put("spark.kubernetes.namespace", "ns2"); - Map overrides = new HashMap<>(); - overrides.put("spark.executor.instances", "5"); - overrides.put("spark.kubernetes.namespace", "ns3"); - when(mockSpec.getSparkConf()).thenReturn(appProps); - when(mockApp.getSpec()).thenReturn(mockSpec); - when(mockApp.getMetadata()).thenReturn(appMeta); - when(mockSpec.getProxyUser()).thenReturn("foo-user"); - when(mockSpec.getMainClass()).thenReturn("foo-class"); - when(mockSpec.getDriverArgs()).thenReturn(List.of("a", "b")); - - ApplicationDriverConf conf = - ApplicationClientWorker.buildDriverConf(mockApp, overrides); - Assertions.assertEquals(6, constructorArgs.get(conf).size()); - - // validate SparkConf with override - Assertions.assertTrue(constructorArgs.get(conf).get(0) instanceof SparkConf); - SparkConf createdConf = (SparkConf) constructorArgs.get(conf).get(0); - Assertions.assertEquals("bar", createdConf.get("foo")); - Assertions.assertEquals("5", createdConf.get("spark.executor.instances")); - - // namespace from CR takes highest precedence - Assertions.assertEquals("ns1", createdConf.get("spark.kubernetes.namespace")); - - // validate main resources - Assertions.assertTrue(constructorArgs.get(conf).get(2) instanceof JavaMainAppResource); - JavaMainAppResource mainResource = - (JavaMainAppResource) constructorArgs.get(conf).get(2); - Assertions.assertTrue(mainResource.primaryResource().isEmpty()); - - Assertions.assertEquals("foo-class", constructorArgs.get(conf).get(3)); - - Assertions.assertTrue(constructorArgs.get(conf).get(4) instanceof String[]); - String[] capturedArgs = (String[]) constructorArgs.get(conf).get(4); - Assertions.assertEquals(2, capturedArgs.length); - Assertions.assertEquals("a", capturedArgs[0]); - Assertions.assertEquals("b", capturedArgs[1]); - } + @Test + void buildDriverConfShouldApplySpecAndPropertiesOverride() { + Map> constructorArgs = new HashMap<>(); + try (MockedConstruction mocked = mockConstruction( + ApplicationDriverConf.class, + (mock, context) -> constructorArgs.put(mock, + new ArrayList<>(context.arguments())))) { + SparkApplication mockApp = mock(SparkApplication.class); + ApplicationSpec mockSpec = mock(ApplicationSpec.class); + ObjectMeta appMeta = new ObjectMetaBuilder() + .withName("app1") + .withNamespace("ns1") + .build(); + Map appProps = new HashMap<>(); + appProps.put("foo", "bar"); + appProps.put("spark.executor.instances", "1"); + appProps.put("spark.kubernetes.namespace", "ns2"); + Map overrides = new HashMap<>(); + overrides.put("spark.executor.instances", "5"); + overrides.put("spark.kubernetes.namespace", "ns3"); + when(mockSpec.getSparkConf()).thenReturn(appProps); + when(mockApp.getSpec()).thenReturn(mockSpec); + when(mockApp.getMetadata()).thenReturn(appMeta); + when(mockSpec.getProxyUser()).thenReturn("foo-user"); + when(mockSpec.getMainClass()).thenReturn("foo-class"); + when(mockSpec.getDriverArgs()).thenReturn(List.of("a", "b")); + + ApplicationDriverConf conf = + ApplicationClientWorker.buildDriverConf(mockApp, overrides); + Assertions.assertEquals(6, constructorArgs.get(conf).size()); + + // validate SparkConf with override + Assertions.assertTrue(constructorArgs.get(conf).get(0) instanceof SparkConf); + SparkConf createdConf = (SparkConf) constructorArgs.get(conf).get(0); + Assertions.assertEquals("bar", createdConf.get("foo")); + Assertions.assertEquals("5", createdConf.get("spark.executor.instances")); + + // namespace from CR takes highest precedence + Assertions.assertEquals("ns1", createdConf.get("spark.kubernetes.namespace")); + + // validate main resources + Assertions.assertTrue(constructorArgs.get(conf).get(2) instanceof JavaMainAppResource); + JavaMainAppResource mainResource = + (JavaMainAppResource) constructorArgs.get(conf).get(2); + Assertions.assertTrue(mainResource.primaryResource().isEmpty()); + + Assertions.assertEquals("foo-class", constructorArgs.get(conf).get(3)); + + Assertions.assertTrue(constructorArgs.get(conf).get(4) instanceof String[]); + String[] capturedArgs = (String[]) constructorArgs.get(conf).get(4); + Assertions.assertEquals(2, capturedArgs.length); + Assertions.assertEquals("a", capturedArgs[0]); + Assertions.assertEquals("b", capturedArgs[1]); } - - @Test - void buildDriverConfForPythonApp() { - Map> constructorArgs = new HashMap<>(); - try (MockedConstruction mocked = mockConstruction( - ApplicationDriverConf.class, - (mock, context) -> constructorArgs.put(mock, - new ArrayList<>(context.arguments())))) { - SparkApplication mockApp = mock(SparkApplication.class); - ApplicationSpec mockSpec = mock(ApplicationSpec.class); - ObjectMeta appMeta = new ObjectMetaBuilder() - .withName("app1") - .withNamespace("ns1") - .build(); - when(mockApp.getSpec()).thenReturn(mockSpec); - when(mockApp.getMetadata()).thenReturn(appMeta); - when(mockSpec.getPyFiles()).thenReturn("foo"); - - ApplicationDriverConf conf = - ApplicationClientWorker.buildDriverConf(mockApp, Collections.emptyMap()); - Assertions.assertEquals(6, constructorArgs.get(conf).size()); - - // validate main resources - Assertions.assertTrue( - constructorArgs.get(conf).get(2) instanceof PythonMainAppResource); - PythonMainAppResource mainResource = - (PythonMainAppResource) constructorArgs.get(conf).get(2); - Assertions.assertEquals("foo", mainResource.primaryResource()); - } + } + + @Test + void buildDriverConfForPythonApp() { + Map> constructorArgs = new HashMap<>(); + try (MockedConstruction mocked = mockConstruction( + ApplicationDriverConf.class, + (mock, context) -> constructorArgs.put(mock, + new ArrayList<>(context.arguments())))) { + SparkApplication mockApp = mock(SparkApplication.class); + ApplicationSpec mockSpec = mock(ApplicationSpec.class); + ObjectMeta appMeta = new ObjectMetaBuilder() + .withName("app1") + .withNamespace("ns1") + .build(); + when(mockApp.getSpec()).thenReturn(mockSpec); + when(mockApp.getMetadata()).thenReturn(appMeta); + when(mockSpec.getPyFiles()).thenReturn("foo"); + + ApplicationDriverConf conf = + ApplicationClientWorker.buildDriverConf(mockApp, Collections.emptyMap()); + Assertions.assertEquals(6, constructorArgs.get(conf).size()); + + // validate main resources + Assertions.assertTrue( + constructorArgs.get(conf).get(2) instanceof PythonMainAppResource); + PythonMainAppResource mainResource = + (PythonMainAppResource) constructorArgs.get(conf).get(2); + Assertions.assertEquals("foo", mainResource.primaryResource()); } - - @Test - void buildDriverConfForRApp() { - Map> constructorArgs = new HashMap<>(); - try (MockedConstruction mocked = mockConstruction( - ApplicationDriverConf.class, - (mock, context) -> constructorArgs.put(mock, - new ArrayList<>(context.arguments())))) { - SparkApplication mockApp = mock(SparkApplication.class); - ApplicationSpec mockSpec = mock(ApplicationSpec.class); - ObjectMeta appMeta = new ObjectMetaBuilder() - .withName("app1") - .withNamespace("ns1") - .build(); - when(mockApp.getSpec()).thenReturn(mockSpec); - when(mockApp.getMetadata()).thenReturn(appMeta); - when(mockSpec.getSparkRFiles()).thenReturn("foo"); - - ApplicationDriverConf conf = - ApplicationClientWorker.buildDriverConf(mockApp, Collections.emptyMap()); - Assertions.assertEquals(6, constructorArgs.get(conf).size()); - - // validate main resources - Assertions.assertTrue(constructorArgs.get(conf).get(2) instanceof RMainAppResource); - RMainAppResource mainResource = (RMainAppResource) constructorArgs.get(conf).get(2); - Assertions.assertEquals("foo", mainResource.primaryResource()); - } - } - - @Test - void sparkAppIdShouldBeDeterministicPerAppPerAttempt() { - SparkApplication mockApp1 = mock(SparkApplication.class); - SparkApplication mockApp2 = mock(SparkApplication.class); - ApplicationStatus mockStatus1 = mock(ApplicationStatus.class); - ApplicationStatus mockStatus2 = mock(ApplicationStatus.class); - String appName1 = "app1"; - String appName2 = "app2"; - ObjectMeta appMeta1 = new ObjectMetaBuilder() - .withName(appName1) - .withNamespace("ns") - .build(); - ObjectMeta appMeta2 = new ObjectMetaBuilder() - .withName(appName2) - .withNamespace("ns") - .build(); - when(mockApp1.getMetadata()).thenReturn(appMeta1); - when(mockApp2.getMetadata()).thenReturn(appMeta2); - when(mockApp1.getStatus()).thenReturn(mockStatus1); - when(mockApp2.getStatus()).thenReturn(mockStatus2); - - String appId1 = ApplicationClientWorker.createSparkAppId(mockApp1); - String appId2 = ApplicationClientWorker.createSparkAppId(mockApp2); - - Assertions.assertNotEquals(appId1, appId2); - Assertions.assertTrue(appId1.contains(appName1)); - // multiple invoke shall give same result - Assertions.assertEquals(appId1, ApplicationClientWorker.createSparkAppId(mockApp1)); - - ApplicationAttemptSummary mockAttempt = mock(ApplicationAttemptSummary.class); - AttemptInfo mockAttemptInfo = mock(AttemptInfo.class); - when(mockAttempt.getAttemptInfo()).thenReturn(mockAttemptInfo); - when(mockAttemptInfo.getId()).thenReturn(2L); - when(mockStatus1.getCurrentAttemptSummary()).thenReturn(mockAttempt); - - String appId1Attempt2 = ApplicationClientWorker.createSparkAppId(mockApp1); - Assertions.assertTrue(appId1Attempt2.contains(appName1)); - Assertions.assertNotEquals(appId1, appId1Attempt2); - - Assertions.assertEquals(appId1Attempt2, ApplicationClientWorker.createSparkAppId(mockApp1)); + } + + @Test + void buildDriverConfForRApp() { + Map> constructorArgs = new HashMap<>(); + try (MockedConstruction mocked = mockConstruction( + ApplicationDriverConf.class, + (mock, context) -> constructorArgs.put(mock, + new ArrayList<>(context.arguments())))) { + SparkApplication mockApp = mock(SparkApplication.class); + ApplicationSpec mockSpec = mock(ApplicationSpec.class); + ObjectMeta appMeta = new ObjectMetaBuilder() + .withName("app1") + .withNamespace("ns1") + .build(); + when(mockApp.getSpec()).thenReturn(mockSpec); + when(mockApp.getMetadata()).thenReturn(appMeta); + when(mockSpec.getSparkRFiles()).thenReturn("foo"); + + ApplicationDriverConf conf = + ApplicationClientWorker.buildDriverConf(mockApp, Collections.emptyMap()); + Assertions.assertEquals(6, constructorArgs.get(conf).size()); + + // validate main resources + Assertions.assertTrue(constructorArgs.get(conf).get(2) instanceof RMainAppResource); + RMainAppResource mainResource = (RMainAppResource) constructorArgs.get(conf).get(2); + Assertions.assertEquals("foo", mainResource.primaryResource()); } + } + + @Test + void sparkAppIdShouldBeDeterministicPerAppPerAttempt() { + SparkApplication mockApp1 = mock(SparkApplication.class); + SparkApplication mockApp2 = mock(SparkApplication.class); + ApplicationStatus mockStatus1 = mock(ApplicationStatus.class); + ApplicationStatus mockStatus2 = mock(ApplicationStatus.class); + String appName1 = "app1"; + String appName2 = "app2"; + ObjectMeta appMeta1 = new ObjectMetaBuilder() + .withName(appName1) + .withNamespace("ns") + .build(); + ObjectMeta appMeta2 = new ObjectMetaBuilder() + .withName(appName2) + .withNamespace("ns") + .build(); + when(mockApp1.getMetadata()).thenReturn(appMeta1); + when(mockApp2.getMetadata()).thenReturn(appMeta2); + when(mockApp1.getStatus()).thenReturn(mockStatus1); + when(mockApp2.getStatus()).thenReturn(mockStatus2); + + String appId1 = ApplicationClientWorker.createSparkAppId(mockApp1); + String appId2 = ApplicationClientWorker.createSparkAppId(mockApp2); + + Assertions.assertNotEquals(appId1, appId2); + Assertions.assertTrue(appId1.contains(appName1)); + // multiple invoke shall give same result + Assertions.assertEquals(appId1, ApplicationClientWorker.createSparkAppId(mockApp1)); + + ApplicationAttemptSummary mockAttempt = mock(ApplicationAttemptSummary.class); + AttemptInfo mockAttemptInfo = mock(AttemptInfo.class); + when(mockAttempt.getAttemptInfo()).thenReturn(mockAttemptInfo); + when(mockAttemptInfo.getId()).thenReturn(2L); + when(mockStatus1.getCurrentAttemptSummary()).thenReturn(mockAttempt); + + String appId1Attempt2 = ApplicationClientWorker.createSparkAppId(mockApp1); + Assertions.assertTrue(appId1Attempt2.contains(appName1)); + Assertions.assertNotEquals(appId1, appId1Attempt2); + + Assertions.assertEquals(appId1Attempt2, ApplicationClientWorker.createSparkAppId(mockApp1)); + } } diff --git a/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java index 9f744d93..58e793bb 100644 --- a/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java +++ b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java @@ -18,6 +18,8 @@ package org.apache.spark.kubernetes.operator; +import java.util.Collections; + import io.fabric8.kubernetes.api.model.ConfigMap; import io.fabric8.kubernetes.api.model.Container; import io.fabric8.kubernetes.api.model.ContainerBuilder; @@ -25,84 +27,83 @@ import io.fabric8.kubernetes.api.model.PodBuilder; import io.fabric8.kubernetes.api.model.Volume; import io.fabric8.kubernetes.api.model.VolumeMount; -import org.apache.spark.SparkConf; -import org.apache.spark.deploy.k8s.KubernetesDriverSpec; -import org.apache.spark.deploy.k8s.SparkPod; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import scala.collection.immutable.HashMap; -import java.util.Collections; +import org.apache.spark.SparkConf; +import org.apache.spark.deploy.k8s.KubernetesDriverSpec; +import org.apache.spark.deploy.k8s.SparkPod; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; class ApplicationResourceSpecTest { - @Test - void testDriverResourceIncludesConfigMap() { - ApplicationDriverConf mockConf = mock(ApplicationDriverConf.class); - when(mockConf.configMapNameDriver()).thenReturn("foo-configmap"); - when(mockConf.sparkConf()).thenReturn( - new SparkConf().set("spark.kubernetes.namespace", "foo-namespace")); + @Test + void testDriverResourceIncludesConfigMap() { + ApplicationDriverConf mockConf = mock(ApplicationDriverConf.class); + when(mockConf.configMapNameDriver()).thenReturn("foo-configmap"); + when(mockConf.sparkConf()).thenReturn( + new SparkConf().set("spark.kubernetes.namespace", "foo-namespace")); - KubernetesDriverSpec mockSpec = mock(KubernetesDriverSpec.class); - Container container = new ContainerBuilder() - .withName("foo-container") - .addNewVolumeMount() - .withName("placeholder") - .endVolumeMount() - .build(); - Pod pod = new PodBuilder() - .withNewMetadata() - .endMetadata() - .withNewSpec() - .addNewContainer() - .withName("placeholder") - .endContainer() - .addNewVolume() - .withName("placeholder") - .endVolume() - .endSpec() - .build(); - SparkPod sparkPod = new SparkPod(pod, container); - when(mockSpec.driverKubernetesResources()).thenReturn( - scala.collection.JavaConverters.asScalaBuffer(Collections.emptyList())); - when(mockSpec.driverPreKubernetesResources()).thenReturn( - scala.collection.JavaConverters.asScalaBuffer(Collections.emptyList())); - when(mockSpec.pod()).thenReturn(sparkPod); - when(mockSpec.systemProperties()).thenReturn(new HashMap<>()); + KubernetesDriverSpec mockSpec = mock(KubernetesDriverSpec.class); + Container container = new ContainerBuilder() + .withName("foo-container") + .addNewVolumeMount() + .withName("placeholder") + .endVolumeMount() + .build(); + Pod pod = new PodBuilder() + .withNewMetadata() + .endMetadata() + .withNewSpec() + .addNewContainer() + .withName("placeholder") + .endContainer() + .addNewVolume() + .withName("placeholder") + .endVolume() + .endSpec() + .build(); + SparkPod sparkPod = new SparkPod(pod, container); + when(mockSpec.driverKubernetesResources()).thenReturn( + scala.collection.JavaConverters.asScalaBuffer(Collections.emptyList())); + when(mockSpec.driverPreKubernetesResources()).thenReturn( + scala.collection.JavaConverters.asScalaBuffer(Collections.emptyList())); + when(mockSpec.pod()).thenReturn(sparkPod); + when(mockSpec.systemProperties()).thenReturn(new HashMap<>()); - ApplicationResourceSpec applicationResourceSpec = - new ApplicationResourceSpec(mockConf, mockSpec); + ApplicationResourceSpec applicationResourceSpec = + new ApplicationResourceSpec(mockConf, mockSpec); - Assertions.assertEquals(1, applicationResourceSpec.getDriverResources().size()); - Assertions.assertEquals(ConfigMap.class, - applicationResourceSpec.getDriverResources().get(0).getClass()); + Assertions.assertEquals(1, applicationResourceSpec.getDriverResources().size()); + Assertions.assertEquals(ConfigMap.class, + applicationResourceSpec.getDriverResources().get(0).getClass()); - ConfigMap proposedConfigMap = - (ConfigMap) applicationResourceSpec.getDriverResources().get(0); - Assertions.assertEquals("foo-configmap", proposedConfigMap.getMetadata().getName()); - Assertions.assertEquals("foo-namespace", - proposedConfigMap.getData().get("spark.kubernetes.namespace")); - Assertions.assertEquals("foo-namespace", proposedConfigMap.getMetadata().getNamespace()); + ConfigMap proposedConfigMap = + (ConfigMap) applicationResourceSpec.getDriverResources().get(0); + Assertions.assertEquals("foo-configmap", proposedConfigMap.getMetadata().getName()); + Assertions.assertEquals("foo-namespace", + proposedConfigMap.getData().get("spark.kubernetes.namespace")); + Assertions.assertEquals("foo-namespace", proposedConfigMap.getMetadata().getNamespace()); - Assertions.assertEquals(2, - applicationResourceSpec.getConfiguredPod().getSpec().getVolumes().size()); - Volume proposedConfigVolume = - applicationResourceSpec.getConfiguredPod().getSpec().getVolumes().get(1); - Assertions.assertEquals("foo-configmap", proposedConfigVolume.getConfigMap().getName()); + Assertions.assertEquals(2, + applicationResourceSpec.getConfiguredPod().getSpec().getVolumes().size()); + Volume proposedConfigVolume = + applicationResourceSpec.getConfiguredPod().getSpec().getVolumes().get(1); + Assertions.assertEquals("foo-configmap", proposedConfigVolume.getConfigMap().getName()); - Assertions.assertEquals(2, - applicationResourceSpec.getConfiguredPod().getSpec().getContainers().size()); - Assertions.assertEquals(2, - applicationResourceSpec.getConfiguredPod().getSpec().getContainers().get(1) - .getVolumeMounts().size()); - VolumeMount proposedConfigVolumeMount = - applicationResourceSpec.getConfiguredPod().getSpec().getContainers().get(1) - .getVolumeMounts().get(1); - Assertions.assertEquals(proposedConfigVolume.getName(), - proposedConfigVolumeMount.getName()); - } + Assertions.assertEquals(2, + applicationResourceSpec.getConfiguredPod().getSpec().getContainers().size()); + Assertions.assertEquals(2, + applicationResourceSpec.getConfiguredPod().getSpec().getContainers().get(1) + .getVolumeMounts().size()); + VolumeMount proposedConfigVolumeMount = + applicationResourceSpec.getConfiguredPod().getSpec().getContainers().get(1) + .getVolumeMounts().get(1); + Assertions.assertEquals(proposedConfigVolume.getName(), + proposedConfigVolumeMount.getName()); + } } From 85c623e16359c144ecc20673201963231470ed7c Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Thu, 4 Apr 2024 17:25:14 -0700 Subject: [PATCH 04/14] Update runtime JDK version to 17, upgrade Spark and slf4j version. Also update chatty health probe logger to debug. --- Dockerfile | 4 ++-- gradle.properties | 6 +++--- .../apache/spark/kubernetes/operator/probe/HealthProbe.java | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index 67934e61..a0233a5d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,13 +16,13 @@ # # -FROM gradle:8.6-jdk11-alpine AS builder +FROM gradle:8.6-jdk17-alpine AS builder ARG BASE_VERSION WORKDIR /app COPY . . RUN ./gradlew clean build -x test -FROM eclipse-temurin:11-jre-jammy +FROM eclipse-temurin:17-jre-jammy ARG BASE_VERSION ENV SPARK_OPERATOR_HOME=/opt/spark-operator diff --git a/gradle.properties b/gradle.properties index 7a1c41ff..0c46dac0 100644 --- a/gradle.properties +++ b/gradle.properties @@ -21,17 +21,17 @@ commonsLang3Version=3.12.0 commonsIOVersion=2.11.0 commonsConfigurationVersion=2.9.0 dropwizardMetricsVersion=4.2.25 -# FIXME: known binary incompatible lib brought in by Spark +# Caution: fabric8 version should be aligned with Spark dependency fabric8Version=6.7.2 lombokVersion=1.18.30 operatorSDKVersion=4.7.0 okHttpVersion=4.11.0 # Spark -sparkVersion=3.5.0 +sparkVersion=3.5.1 sparkScalaVersion=2.12 # Logging slf4jVersion=1.7.36 -log4jVersion=2.17.1 +log4jVersion=2.23.1 log4jLayoutVersion=2.17.1 # Test junitVersion=5.9.2 diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java index a175c490..1128b511 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/probe/HealthProbe.java @@ -85,7 +85,7 @@ public void handle(HttpExchange exchange) throws IOException { } private boolean checkInformersHealth(RuntimeInfo operatorRuntimeInfo) { - log.info("Checking informer health"); + log.debug("Checking informer health"); List informersHealthList = new ArrayList<>(); for (var controllerEntry : operatorRuntimeInfo.unhealthyInformerWrappingEventSourceHealthIndicator() From d0db806b2898730dc2da069dbe7f7599656bfc9a Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Thu, 4 Apr 2024 17:41:20 -0700 Subject: [PATCH 05/14] Remove deprecated Spark versions, and update JDK version for build --- .github/workflows/build_and_test.yml | 2 +- .../spark_3_4_1/pyspark-example.yaml | 31 ------------- .../spark_3_4_1/spark-pi_scala_2.12.yaml | 33 ------------- .../spark_3_4_1/sparkr-example.yaml | 46 ------------------- .../spark_3_5_0/pyspark-example.yaml | 32 ------------- .../spark_3_5_0/spark-pi_scala_2.12.yaml | 34 -------------- .../spark_3_5_0/sparkr-example.yaml | 46 ------------------- .../kubernetes/operator/spec/JDKVersion.java | 2 +- .../operator/spec/SparkVersion.java | 4 +- 9 files changed, 3 insertions(+), 227 deletions(-) delete mode 100644 e2e-tests/spark-apps/spark_3_4_1/pyspark-example.yaml delete mode 100644 e2e-tests/spark-apps/spark_3_4_1/spark-pi_scala_2.12.yaml delete mode 100644 e2e-tests/spark-apps/spark_3_4_1/sparkr-example.yaml delete mode 100644 e2e-tests/spark-apps/spark_3_5_0/pyspark-example.yaml delete mode 100644 e2e-tests/spark-apps/spark_3_5_0/spark-pi_scala_2.12.yaml delete mode 100644 e2e-tests/spark-apps/spark_3_5_0/sparkr-example.yaml diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 7de73818..732f482c 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -31,7 +31,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - java-version: [ 11, 17 ] + java-version: [ 11, 17, 21 ] steps: - name: Checkout repository uses: actions/checkout@v3 diff --git a/e2e-tests/spark-apps/spark_3_4_1/pyspark-example.yaml b/e2e-tests/spark-apps/spark_3_4_1/pyspark-example.yaml deleted file mode 100644 index 98e8ad1c..00000000 --- a/e2e-tests/spark-apps/spark_3_4_1/pyspark-example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# - -apiVersion: org.apache.spark/v1alpha1 -kind: SparkApplication -metadata: - name: py-spark-pi-341 -spec: - pyFiles: "local:///opt/spark/examples/src/main/python/pi.py" - sparkConf: - spark.executor.instances: "1" - # see also https://hub.docker.com/_/spark - spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" - spark.kubernetes.authenticate.driver.serviceAccountName: "spark" - runtimeVersions: - sparkVersion: v3_4_1 diff --git a/e2e-tests/spark-apps/spark_3_4_1/spark-pi_scala_2.12.yaml b/e2e-tests/spark-apps/spark_3_4_1/spark-pi_scala_2.12.yaml deleted file mode 100644 index 7889902e..00000000 --- a/e2e-tests/spark-apps/spark_3_4_1/spark-pi_scala_2.12.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# - -apiVersion: org.apache.spark/v1alpha1 -kind: SparkApplication -metadata: - name: spark-pi-341-212 -spec: - mainClass: "org.apache.spark.examples.SparkPi" - jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.4.1.jar" - sparkConf: - spark.executor.instances: "1" - # see also https://hub.docker.com/_/spark - spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" - spark.kubernetes.authenticate.driver.serviceAccountName: "spark" - runtimeVersions: - scalaVersion: v2_12 - sparkVersion: v3_4_1 diff --git a/e2e-tests/spark-apps/spark_3_4_1/sparkr-example.yaml b/e2e-tests/spark-apps/spark_3_4_1/sparkr-example.yaml deleted file mode 100644 index f3e41189..00000000 --- a/e2e-tests/spark-apps/spark_3_4_1/sparkr-example.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# - -apiVersion: org.apache.spark/v1alpha1 -kind: SparkApplication -metadata: - name: sparkr-example-341 -spec: - driverSpec: - podTemplateSpec: - metadata: - spec: - containers: - - name: driver - workingDir: /opt/spark - executorSpec: - podTemplateSpec: - metadata: - spec: - containers: - - name: executor - workingDir: /opt/spark - sparkRFiles: "local:///opt/spark/examples/src/main/r/dataframe.R" - sparkConf: - spark.executor.instances: "1" - # see also https://hub.docker.com/_/spark - spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" - spark.kubernetes.authenticate.driver.serviceAccountName: "spark" - spark.home: "/opt/spark" - runtimeVersions: - sparkVersion: v3_4_1 diff --git a/e2e-tests/spark-apps/spark_3_5_0/pyspark-example.yaml b/e2e-tests/spark-apps/spark_3_5_0/pyspark-example.yaml deleted file mode 100644 index 4125af91..00000000 --- a/e2e-tests/spark-apps/spark_3_5_0/pyspark-example.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# - -apiVersion: org.apache.spark/v1alpha1 -kind: SparkApplication -metadata: - name: py-spark-pi-350 -spec: - pyFiles: "local:///opt/spark/examples/src/main/python/pi.py" - sparkConf: - spark.executor.instances: "1" - # see also https://hub.docker.com/_/spark - # This is the same as spark:3.5.0-java17-python3 - spark.kubernetes.container.image: "spark:3.5.0-java17-python3" - spark.kubernetes.authenticate.driver.serviceAccountName: "spark" - runtimeVersions: - sparkVersion: v3_5_0 diff --git a/e2e-tests/spark-apps/spark_3_5_0/spark-pi_scala_2.12.yaml b/e2e-tests/spark-apps/spark_3_5_0/spark-pi_scala_2.12.yaml deleted file mode 100644 index 59c04cae..00000000 --- a/e2e-tests/spark-apps/spark_3_5_0/spark-pi_scala_2.12.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# - -apiVersion: org.apache.spark/v1alpha1 -kind: SparkApplication -metadata: - name: spark-pi-350-212 -spec: - mainClass: "org.apache.spark.examples.SparkPi" - jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" - sparkConf: - spark.executor.instances: "1" - # see also https://hub.docker.com/_/spark - # This is the same as spark:3.5.0-java17-python3 - spark.kubernetes.container.image: "spark:3.5.0-java17-python3" - spark.kubernetes.authenticate.driver.serviceAccountName: "spark" - runtimeVersions: - scalaVersion: v2_12 - sparkVersion: v3_5_0 diff --git a/e2e-tests/spark-apps/spark_3_5_0/sparkr-example.yaml b/e2e-tests/spark-apps/spark_3_5_0/sparkr-example.yaml deleted file mode 100644 index 6f1557dc..00000000 --- a/e2e-tests/spark-apps/spark_3_5_0/sparkr-example.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# - -apiVersion: org.apache.spark/v1alpha1 -kind: SparkApplication -metadata: - name: sparkr-example-350 -spec: - driverSpec: - podTemplateSpec: - metadata: - spec: - containers: - - name: driver - workingDir: /opt/spark - executorSpec: - podTemplateSpec: - metadata: - spec: - containers: - - name: executor - workingDir: /opt/spark - sparkRFiles: "local:///opt/spark/examples/src/main/r/dataframe.R" - sparkConf: - spark.executor.instances: "1" - # see also https://hub.docker.com/_/spark - spark.kubernetes.container.image: "spark:3.5.0-java17-r" - spark.kubernetes.authenticate.driver.serviceAccountName: "spark" - spark.home: "/opt/spark" - runtimeVersions: - sparkVersion: v3_5_0 diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java index 8834a28c..5163ceef 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/JDKVersion.java @@ -21,5 +21,5 @@ public enum JDKVersion { Java11, Java17, - Java23 + Java21 } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java index 6e787562..d4e13de5 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/SparkVersion.java @@ -24,7 +24,5 @@ */ public enum SparkVersion { v3_5_1, - v3_5_0, - v3_4_2, - v3_4_1, + v3_4_2 } From 33325e7a82f3895c64e81c6348283882fa33ad41 Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Fri, 5 Apr 2024 13:58:55 -0700 Subject: [PATCH 06/14] Remove deprecated versions from example & update generated CRD --- .../crds/sparkapplications.org.apache.spark-v1.yml | 4 +--- spark-operator-docs/getting_started.md | 6 +++--- spark-operator/src/main/resources/spark-pi.yaml | 6 +++--- spark-operator/src/main/resources/streaming.yaml | 6 +++--- 4 files changed, 10 insertions(+), 12 deletions(-) diff --git a/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml b/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml index 4bf760cc..87cd3e09 100644 --- a/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml +++ b/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml @@ -26,9 +26,7 @@ spec: sparkVersion: enum: - v3_5_1 - - v3_5_0 - v3_4_2 - - v3_4_1 type: string scalaVersion: enum: @@ -39,7 +37,7 @@ spec: enum: - Java11 - Java17 - - Java23 + - Java21 type: string required: - sparkVersion diff --git a/spark-operator-docs/getting_started.md b/spark-operator-docs/getting_started.md index 28cdce7f..31757a56 100644 --- a/spark-operator-docs/getting_started.md +++ b/spark-operator-docs/getting_started.md @@ -127,15 +127,15 @@ spec: restartPolicy: NEVER deploymentMode: CLUSTER_MODE driverArgs: [] - jars: local:///opt/spark/examples/jars/spark-examples_2.12-3.4.1.jar + jars: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.1.jar mainClass: org.apache.spark.examples.SparkPi runtimeVersions: scalaVersion: v2_12 - sparkVersion: v3_4_1 + sparkVersion: v3_5_1 sparkConf: spark.executor.instances: "5" spark.kubernetes.authenticate.driver.serviceAccountName: spark - spark.kubernetes.container.image: spark:3.4.1-scala2.12-java11-python3-r-ubuntu + spark.kubernetes.container.image: spark:3.5.1-scala2.12-java11-python3-r-ubuntu spark.kubernetes.namespace: spark-test status: currentAttemptSummary: diff --git a/spark-operator/src/main/resources/spark-pi.yaml b/spark-operator/src/main/resources/spark-pi.yaml index cad6bb83..52b10681 100644 --- a/spark-operator/src/main/resources/spark-pi.yaml +++ b/spark-operator/src/main/resources/spark-pi.yaml @@ -22,14 +22,14 @@ metadata: name: spark-pi spec: mainClass: "org.apache.spark.examples.SparkPi" - jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.4.1.jar" + jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.1.jar" sparkConf: spark.executor.instances: "5" - spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" + spark.kubernetes.container.image: "spark:3.5.1-scala2.12-java11-python3-r-ubuntu" spark.kubernetes.namespace: "spark-test" spark.kubernetes.authenticate.driver.serviceAccountName: "spark" applicationTolerations: deleteOnTermination: false runtimeVersions: scalaVersion: v2_12 - sparkVersion: v3_4_1 + sparkVersion: v3_5_1 diff --git a/spark-operator/src/main/resources/streaming.yaml b/spark-operator/src/main/resources/streaming.yaml index 61b119fc..4c9ebe99 100644 --- a/spark-operator/src/main/resources/streaming.yaml +++ b/spark-operator/src/main/resources/streaming.yaml @@ -76,16 +76,16 @@ spec: seccompProfile: type: RuntimeDefault mainClass: "org.apache.spark.examples.streaming.NetworkWordCount" - jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.4.1.jar" + jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.1.jar" driverArgs: - "localhost" - "9999" sparkConf: spark.executor.instances: "5" - spark.kubernetes.container.image: "spark:3.4.1-scala2.12-java11-python3-r-ubuntu" + spark.kubernetes.container.image: "spark:3.5.1-scala2.12-java11-python3-r-ubuntu" spark.kubernetes.namespace: "spark-test" spark.kubernetes.authenticate.driver.serviceAccountName: "spark" spark.kubernetes.driver.podTemplateContainerName: "main-container" runtimeVersions: scalaVersion: v2_12 - sparkVersion: v3_4_1 + sparkVersion: v3_5_1 From bae3e140f20b6e227b3eab6691bbb7675314bc88 Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Mon, 8 Apr 2024 15:56:46 -0700 Subject: [PATCH 07/14] Fully qualify the property names. --- .../operator/config/SparkOperatorConf.java | 73 +++++++++---------- 1 file changed, 36 insertions(+), 37 deletions(-) diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java index a6675e05..32dbd71a 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java @@ -38,27 +38,26 @@ */ @Slf4j public class SparkOperatorConf { - public static final String PREFIX = "spark.operator."; public static final String METRIC_PREFIX = "spark.metrics.conf.operator."; public static final String SINK = "sink."; public static final String CLASS = "class"; public static final ConfigOption OperatorAppName = ConfigOption.builder() - .key(PREFIX + "name") + .key("spark.operator.name") .typeParameterClass(String.class) .description("Name of the operator.") .defaultValue("spark-kubernetes-operator") .enableDynamicOverride(false) .build(); public static final ConfigOption OperatorNamespace = ConfigOption.builder() - .key(PREFIX + "namespace") + .key("spark.operator.namespace") .typeParameterClass(String.class) .description("Namespace that operator is deployed within.") .defaultValue("spark-system") .enableDynamicOverride(false) .build(); public static final ConfigOption DynamicConfigEnabled = ConfigOption.builder() - .key(PREFIX + "dynamic.config.enabled") + .key("spark.operator.dynamic.config.enabled") .typeParameterClass(Boolean.class) .description( "When enabled, operator would use config map as source of truth for config " + @@ -69,7 +68,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption DynamicConfigSelectorStr = ConfigOption.builder() - .key(PREFIX + "dynamic.config.selector.str") + .key("spark.operator.dynamic.config.selector.str") .typeParameterClass(String.class) .description("The selector str applied to dynamic config map.") .defaultValue(labelsAsStr(defaultOperatorConfigLabels())) @@ -77,7 +76,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption TerminateOnInformerFailure = ConfigOption.builder() - .key(PREFIX + "terminate.on.informer.failure") + .key("spark.operator.terminate.on.informer.failure") .typeParameterClass(Boolean.class) .description( "Enable to indicate informer errors should stop operator startup. If " + @@ -89,7 +88,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption TerminationTimeoutSeconds = ConfigOption.builder() - .key(PREFIX + "termination.timeout.seconds") + .key("spark.operator.termination.timeout.seconds") .description( "Grace period for operator shutdown before reconciliation threads " + "are killed.") @@ -99,7 +98,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption ReconcilerParallelism = ConfigOption.builder() - .key(PREFIX + "reconciler.parallelism") + .key("spark.operator.reconciler.parallelism") .description( "Thread pool size for Spark Operator reconcilers. Use -1 for " + "unbounded pool.") @@ -109,7 +108,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption RateLimiterRefreshPeriodSeconds = ConfigOption.builder() - .key(PREFIX + "rate.limiter.refresh.period.seconds") + .key("spark.operator.rate.limiter.refresh.period.seconds") .description( "Operator rate limiter refresh period(in seconds) for each resource.") .enableDynamicOverride(false) @@ -117,7 +116,7 @@ public class SparkOperatorConf { .defaultValue(15) .build(); public static final ConfigOption RateLimiterLimit = ConfigOption.builder() - .key(PREFIX + "rate.limiter.limit") + .key("spark.operator.rate.limiter.limit") .description( "Max number of reconcile loops triggered within the rate limiter refresh " + "period for each resource. Setting the limit <= 0 disables the " + @@ -128,7 +127,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption RetryInitialInternalSeconds = ConfigOption.builder() - .key(PREFIX + "retry.initial.internal.seconds") + .key("spark.operator.retry.initial.internal.seconds") .description( "Initial interval(in seconds) of retries on unhandled controller " + "errors.") @@ -138,7 +137,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption RetryInternalMultiplier = ConfigOption.builder() - .key(PREFIX + "retry.internal.multiplier") + .key("spark.operator.retry.internal.multiplier") .description("Interval multiplier of retries on unhandled controller errors.") .enableDynamicOverride(false) .typeParameterClass(Double.class) @@ -146,7 +145,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption RetryMaxIntervalSeconds = ConfigOption.builder() - .key(PREFIX + "retry.max.interval.seconds") + .key("spark.operator.retry.max.interval.seconds") .description( "Max interval(in seconds) of retries on unhandled controller errors. " + "Set to -1 for unlimited.") @@ -155,14 +154,14 @@ public class SparkOperatorConf { .defaultValue(-1) .build(); public static final ConfigOption RetryMaxAttempts = ConfigOption.builder() - .key(PREFIX + "retry.max.attempts") + .key("spark.operator.retry.max.attempts") .description("Max attempts of retries on unhandled controller errors.") .enableDynamicOverride(false) .typeParameterClass(Integer.class) .defaultValue(15) .build(); public static final ConfigOption DriverCreateMaxAttempts = ConfigOption.builder() - .key(PREFIX + "driver.create.max.attempts") + .key("spark.operator.driver.create.max.attempts") .description( "Maximal number of retry attempts of requesting driver for Spark application.") .defaultValue(3L) @@ -170,7 +169,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption MaxRetryAttemptOnKubeServerFailure = ConfigOption.builder() - .key(PREFIX + "max.retry.attempts.on.k8s.failure") + .key("spark.operator.max.retry.attempts.on.k8s.failure") .description( "Maximal number of retry attempts of requests to k8s server upon " + "response 429 and 5xx.") @@ -178,7 +177,7 @@ public class SparkOperatorConf { .typeParameterClass(Long.class) .build(); public static final ConfigOption RetryAttemptAfterSeconds = ConfigOption.builder() - .key(PREFIX + "retry.attempt.after.seconds") + .key("spark.operator.retry.attempt.after.seconds") .description( "Default time (in seconds) to wait till next request. This would be used if " + "server does not set Retry-After in response.") @@ -187,13 +186,13 @@ public class SparkOperatorConf { .build(); public static final ConfigOption MaxRetryAttemptAfterSeconds = ConfigOption.builder() - .key(PREFIX + "max.retry.attempt.after.seconds") + .key("spark.operator.max.retry.attempt.after.seconds") .description("Maximal time (in seconds) to wait till next request.") .defaultValue(15L) .typeParameterClass(Long.class) .build(); public static final ConfigOption StatusPatchMaxRetry = ConfigOption.builder() - .key(PREFIX + "status.patch.max.retry") + .key("spark.operator.status.patch.max.retry") .description( "Maximal number of retry attempts of requests to k8s server for resource " + "status update.") @@ -202,7 +201,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption StatusPatchFailureBackoffSeconds = ConfigOption.builder() - .key(PREFIX + "status.patch.failure.backoff.seconds") + .key("spark.operator.status.patch.failure.backoff.seconds") .description( "Default time (in seconds) to wait till next request to patch " + "resource status update.") @@ -211,7 +210,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption AppReconcileIntervalSeconds = ConfigOption.builder() - .key(PREFIX + "app.reconcile.interval.seconds") + .key("spark.operator.app.reconcile.interval.seconds") .description( "Interval (in seconds) to reconcile when application is is starting " + "up. Note that reconcile is always expected to be triggered " + @@ -223,7 +222,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption ForegroundRequestTimeoutSeconds = ConfigOption.builder() - .key(PREFIX + "foreground.request.timeout.seconds") + .key("spark.operator.foreground.request.timeout.seconds") .description( "Timeout (in seconds) to for requests made to API server. this " + "applies only to foreground requests.") @@ -232,7 +231,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption OperatorWatchedNamespaces = ConfigOption.builder() - .key(PREFIX + "watched.namespaces") + .key("spark.operator.watched.namespaces") .description( "Comma-separated list of namespaces that the operator would be " + "watching for Spark resources. If unset, operator would " + @@ -242,7 +241,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption TrimAttemptStateTransitionHistory = ConfigOption.builder() - .key(PREFIX + "trim.attempt.state.transition.history") + .key("spark.operator.trim.attempt.state.transition.history") .description( "When enabled, operator would trim state transition history when a " + "new attempt starts, keeping previous attempt summary only.") @@ -251,7 +250,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption JOSDKMetricsEnabled = ConfigOption.builder() - .key(PREFIX + "josdk.metrics.enabled") + .key("spark.operator.josdk.metrics.enabled") .description( "When enabled, the josdk metrics will be added in metrics source and " + "configured for operator.") @@ -260,7 +259,7 @@ public class SparkOperatorConf { public static final ConfigOption KubernetesClientMetricsEnabled = ConfigOption.builder() - .key(PREFIX + "kubernetes.client.metrics.enabled") + .key("spark.operator.kubernetes.client.metrics.enabled") .defaultValue(true) .description( "Enable KubernetesClient metrics for measuring the HTTP traffic to " + @@ -271,7 +270,7 @@ public class SparkOperatorConf { public static final ConfigOption KubernetesClientMetricsGroupByResponseCodeGroupEnabled = ConfigOption.builder() - .key(PREFIX + "kubernetes.client.metrics.group.by.response.code.group.enable") + .key("spark.operator.kubernetes.client.metrics.group.by.response.code.group.enable") .description( "When enabled, additional metrics group by http response code group(1xx, " + "2xx, 3xx, 4xx, 5xx) received from API server will be added. Users " + @@ -280,7 +279,7 @@ public class SparkOperatorConf { .defaultValue(true) .build(); public static final ConfigOption OperatorProbePort = ConfigOption.builder() - .key(PREFIX + "probe.port") + .key("spark.operator.probe.port") .defaultValue(18080) .description("The port used for health/readiness check probe status.") .typeParameterClass(Integer.class) @@ -288,7 +287,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption OperatorMetricsPort = ConfigOption.builder() - .key(PREFIX + "metrics.port") + .key("spark.operator.metrics.port") .defaultValue(19090) .description("The port used for checking metrics") .typeParameterClass(Integer.class) @@ -297,7 +296,7 @@ public class SparkOperatorConf { public static final ConfigOption SentinelExecutorServicePoolSize = ConfigOption.builder() - .key(PREFIX + "sentinel.executor.pool.size") + .key("spark.operator.sentinel.executor.pool.size") .description( "Size of executor service in Sentinel Managers to check the health " + "of sentinel resources.") @@ -308,7 +307,7 @@ public class SparkOperatorConf { public static final ConfigOption SENTINEL_RESOURCE_RECONCILIATION_DELAY = ConfigOption.builder() - .key(PREFIX + "health.sentinel.resource.reconciliation.delay.seconds") + .key("spark.operator.health.sentinel.resource.reconciliation.delay.seconds") .defaultValue(60L) .description( "Allowed max time(seconds) between spec update and reconciliation " + @@ -318,7 +317,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption APPLICATION_STATUS_LISTENER_CLASS_NAMES = ConfigOption.builder() - .key(PREFIX + "application.status.listener.class.names") + .key("spark.operator.application.status.listener.class.names") .defaultValue("") .description( "Comma-separated names of ApplicationStatusListener class " + @@ -328,7 +327,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption LEADER_ELECTION_ENABLED = ConfigOption.builder() - .key(PREFIX + "leader.election.enabled") + .key("spark.operator.leader.election.enabled") .defaultValue(false) .description( "Enable leader election for the operator to allow running standby " + @@ -338,7 +337,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption LEADER_ELECTION_LEASE_NAME = ConfigOption.builder() - .key(PREFIX + "leader.election.lease.name") + .key("spark.operator.leader.election.lease.name") .defaultValue("spark-operator-lease") .description( "Leader election lease name, must be unique for leases in the same " + @@ -348,7 +347,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption LEADER_ELECTION_LEASE_DURATION_SECONDS = ConfigOption.builder() - .key(PREFIX + "leader.election.lease.duration.seconds") + .key("spark.operator.leader.election.lease.duration.seconds") .defaultValue(1200L) .description("Leader election lease duration.") .enableDynamicOverride(false) @@ -356,7 +355,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption LEADER_ELECTION_RENEW_DEADLINE_SECONDS = ConfigOption.builder() - .key(PREFIX + "leader.election.renew.deadline.seconds") + .key("spark.operator.leader.election.renew.deadline.seconds") .defaultValue(600L) .description("Leader election renew deadline.") .enableDynamicOverride(false) @@ -364,7 +363,7 @@ public class SparkOperatorConf { .build(); public static final ConfigOption LEADER_ELECTION_RETRY_PERIOD_SECONDS = ConfigOption.builder() - .key(PREFIX + "leader.election.retry.period.seconds") + .key("spark.operator.leader.election.retry.period.seconds") .defaultValue(180L) .description("Leader election retry period.") .enableDynamicOverride(false) From 91bc916728d8a820774233c4eef7260092d427c3 Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Mon, 8 Apr 2024 17:28:35 -0700 Subject: [PATCH 08/14] Support resource retention policy for apps. --- .../sparkapplications.org.apache.spark-v1.yml | 8 ++++-- .../operator/spec/ApplicationTolerations.java | 11 ++++---- .../spec/ResourceRetentionPolicy.java | 26 +++++++++++++++++++ spark-operator-docs/getting_started.md | 2 +- spark-operator-docs/spark_application.md | 23 ++++++++++++---- .../reconcilesteps/AppCleanUpStep.java | 17 +++++++++++- .../src/main/resources/spark-pi.yaml | 2 +- 7 files changed, 74 insertions(+), 15 deletions(-) create mode 100644 spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ResourceRetentionPolicy.java diff --git a/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml b/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml index 87cd3e09..49617f36 100644 --- a/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml +++ b/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml @@ -99,8 +99,12 @@ spec: maxExecutors: type: integer type: object - deleteOnTermination: - type: boolean + resourceRetentionPolicy: + enum: + - AlwaysDelete + - RetainOnFailure + - NeverDelete + type: string type: object driverSpec: properties: diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java index ceeaa12c..1f20e021 100644 --- a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ApplicationTolerations.java @@ -42,11 +42,12 @@ public class ApplicationTolerations { @Builder.Default protected InstanceConfig instanceConfig = new InstanceConfig(); /** - * If disabled, operator would not attempt to delete resources after app terminates. - * While this can be helpful in dev phase, it shall not be enabled for prod use cases. - * Caution: in order to avoid resource conflicts among multiple attempts, this can be disabled - * iff restart policy is set to Never. + * Configure operator to delete / retain resources for an app after it terminates. + * While this can be helpful in dev phase, it shall not be enabled (or enabled with caution) for + * prod use cases: this could cause resource quota usage increase unexpectedly. + * Caution: in order to avoid resource conflicts among multiple attempts, this should be set to + * 'AlwaysDelete' unless restart policy is set to 'Never'. */ @Builder.Default - protected Boolean deleteOnTermination = true; + protected ResourceRetentionPolicy resourceRetentionPolicy = ResourceRetentionPolicy.AlwaysDelete; } diff --git a/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ResourceRetentionPolicy.java b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ResourceRetentionPolicy.java new file mode 100644 index 00000000..39014d8b --- /dev/null +++ b/spark-operator-api/src/main/java/org/apache/spark/kubernetes/operator/spec/ResourceRetentionPolicy.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.spark.kubernetes.operator.spec; + +public enum ResourceRetentionPolicy { + AlwaysDelete, + RetainOnFailure, + NeverDelete +} diff --git a/spark-operator-docs/getting_started.md b/spark-operator-docs/getting_started.md index 31757a56..40e43764 100644 --- a/spark-operator-docs/getting_started.md +++ b/spark-operator-docs/getting_started.md @@ -116,7 +116,7 @@ spec: forceTerminationGracePeriodMillis: 300000 sparkSessionStartTimeoutMillis: 300000 terminationRequeuePeriodMillis: 2000 - deleteOnTermination: false + resourceRetentionPolicy: OnFailure instanceConfig: initExecutors: 0 maxExecutors: 0 diff --git a/spark-operator-docs/spark_application.md b/spark-operator-docs/spark_application.md index bcb50377..b74dc739 100644 --- a/spark-operator-docs/spark_application.md +++ b/spark-operator-docs/spark_application.md @@ -149,7 +149,17 @@ applicationTimeoutConfig: # time to wait for force delete resources at the end of attempt forceTerminationGracePeriodMillis: 300000 ``` - + + +| Field | Type | Default Value | Descritpion | +|-----------------------------------------------------------------------------------------|---------|---------------|--------------------------------------------------------------------------------------------------------------------| +| .spec.applicationTolerations.applicationTimeoutConfig.driverStartTimeoutMillis | integer | 300000 | Time to wait for driver reaches running state after requested driver. | +| .spec.applicationTolerations.applicationTimeoutConfig.executorStartTimeoutMillis | integer | 300000 | Time to wait for driver to acquire minimal number of running executors. | +| .spec.applicationTolerations.applicationTimeoutConfig.forceTerminationGracePeriodMillis | integer | 300000 | Time to wait for force delete resources at the end of attempt. | +| .spec.applicationTolerations.applicationTimeoutConfig.sparkSessionStartTimeoutMillis | integer | 300000 | Time to wait for driver reaches ready state. | +| .spec.applicationTolerations.applicationTimeoutConfig.terminationRequeuePeriodMillis | integer | 2000 | Back-off time when releasing resource need to be re-attempted for application. | + + ### Instance Config Instance Config helps operator to decide whether an application is running healthy. When @@ -191,12 +201,15 @@ On the other hand, when developing an application, it's possible to configure ```yaml applicationTolerations: - deleteOnTermination: false + # Acceptable values are 'AlwaysDelete', 'RetainOnFailure', 'NeverDelete' + resourceRetentionPolicy: RetainOnFailure ``` -So operator would not attempt to delete resources after app terminates. Note that this -applies only to operator-created resources (driver .etc). You may also want to tune -`spark.kubernetes.executor.deleteOnTermination` to control the behavior of driver-created +So operator would not attempt to delete driver pod and driver resources if app fails. Similarly, +if resourceRetentionPolicy is set to `NeverDelete`, operator would not delete driver resources +when app ends. Note that this applies only to operator-created resources (driver pod, SparkConf +configmap .etc). You may also want to tune `spark.kubernetes.driver.service.deleteOnTermination` +and `spark.kubernetes.executor.deleteOnTermination` to control the behavior of driver-created resources. ## Supported Spark Versions diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java index 75fce0ba..d9dd3d6c 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java @@ -37,6 +37,7 @@ import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconcileUtils; import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; import org.apache.spark.kubernetes.operator.spec.ApplicationTolerations; +import org.apache.spark.kubernetes.operator.spec.ResourceRetentionPolicy; import org.apache.spark.kubernetes.operator.spec.RestartPolicy; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; @@ -59,8 +60,10 @@ public ReconcileProgress reconcile(SparkApplicationContext context, ApplicationStatus currentStatus = context.getSparkApplication().getStatus(); ApplicationTolerations tolerations = context.getSparkApplication().getSpec().getApplicationTolerations(); + ResourceRetentionPolicy resourceRetentionPolicy = tolerations.getResourceRetentionPolicy(); String stateMessage = null; - if (!tolerations.getDeleteOnTermination()) { + + if (retainReleaseResource(resourceRetentionPolicy, currentStatus.getCurrentState())) { if (tolerations.getRestartConfig() != null && !RestartPolicy.Never.equals( tolerations.getRestartConfig().getRestartPolicy())) { @@ -139,6 +142,18 @@ public ReconcileProgress reconcile(SparkApplicationContext context, } + protected boolean retainReleaseResource(ResourceRetentionPolicy resourceRetentionPolicy, + ApplicationState currentState) { + switch (resourceRetentionPolicy) { + case AlwaysDelete: + return false; + case RetainOnFailure: + return currentState.getCurrentStateSummary().isFailure(); + default: + return true; + } + } + private ReconcileProgress updateStateAndProceed(SparkApplicationContext context, StatusRecorder statusRecorder, ApplicationStatus updatedStatus, diff --git a/spark-operator/src/main/resources/spark-pi.yaml b/spark-operator/src/main/resources/spark-pi.yaml index 52b10681..a93871c3 100644 --- a/spark-operator/src/main/resources/spark-pi.yaml +++ b/spark-operator/src/main/resources/spark-pi.yaml @@ -29,7 +29,7 @@ spec: spark.kubernetes.namespace: "spark-test" spark.kubernetes.authenticate.driver.serviceAccountName: "spark" applicationTolerations: - deleteOnTermination: false + resourceRetentionPolicy: RetainOnFailure runtimeVersions: scalaVersion: v2_12 sparkVersion: v3_5_1 From f0a2e742dc98cec12bcde166223a778128b038cd Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Mon, 8 Apr 2024 17:36:24 -0700 Subject: [PATCH 09/14] Fix deprecated versions in doc & remove unused. --- README.md | 19 ------------------- spark-operator-docs/spark_application.md | 4 ++-- 2 files changed, 2 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 89422a94..ae604944 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,3 @@ - - # Spark-Kubernetes-Operator Welcome to the **Spark-Kubernetes-Operator**, a Kubernetes operator designed to simplify and diff --git a/spark-operator-docs/spark_application.md b/spark-operator-docs/spark_application.md index b74dc739..381dd0d0 100644 --- a/spark-operator-docs/spark_application.md +++ b/spark-operator-docs/spark_application.md @@ -220,6 +220,6 @@ single submission-worker mode to support all listed versions. ```yaml runtimeVersions: # Supported values are: - # v3_5_1, v3_5_0, v3_4_1, v3_4_0, v3_3_3, v3_3_1, v3_3_0, v3_2_0 - sparkVersion: v3_4_0 + # v3_5_1, v3_4_2 + sparkVersion: v3_4_2 ``` From 39a74e19604f08698f19853def37f08142a023be Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Mon, 8 Apr 2024 17:45:38 -0700 Subject: [PATCH 10/14] remove auto generated CRD and add docs to generate it. --- .../sparkapplications.org.apache.spark-v1.yml | 6944 ----------------- 1 file changed, 6944 deletions(-) delete mode 100644 build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml diff --git a/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml b/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml deleted file mode 100644 index 49617f36..00000000 --- a/build-tools/helm/spark-kubernetes-operator/crds/sparkapplications.org.apache.spark-v1.yml +++ /dev/null @@ -1,6944 +0,0 @@ -# Generated by Fabric8 CRDGenerator, manual edits might get overwritten! -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: sparkapplications.org.apache.spark -spec: - group: org.apache.spark - names: - kind: SparkApplication - plural: sparkapplications - shortNames: - - sparkapp - singular: sparkapplication - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - spec: - properties: - mainClass: - type: string - runtimeVersions: - properties: - sparkVersion: - enum: - - v3_5_1 - - v3_4_2 - type: string - scalaVersion: - enum: - - v2_12 - - v2_13 - type: string - jdkVersion: - enum: - - Java11 - - Java17 - - Java21 - type: string - required: - - sparkVersion - type: object - jars: - type: string - pyFiles: - type: string - sparkRFiles: - type: string - files: - type: string - deploymentMode: - enum: - - ClusterMode - - ClientMode - type: string - proxyUser: - type: string - driverArgs: - items: - type: string - type: array - applicationTolerations: - properties: - restartConfig: - properties: - restartPolicy: - enum: - - Always - - Never - - OnFailure - - OnInfrastructureFailure - type: string - maxRestartAttempts: - type: integer - restartBackoffMillis: - type: integer - type: object - applicationTimeoutConfig: - properties: - driverStartTimeoutMillis: - type: integer - sparkSessionStartTimeoutMillis: - type: integer - executorStartTimeoutMillis: - type: integer - forceTerminationGracePeriodMillis: - type: integer - terminationRequeuePeriodMillis: - type: integer - type: object - instanceConfig: - properties: - initExecutors: - type: integer - minExecutors: - type: integer - maxExecutors: - type: integer - type: object - resourceRetentionPolicy: - enum: - - AlwaysDelete - - RetainOnFailure - - NeverDelete - type: string - type: object - driverSpec: - properties: - podTemplateSpec: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - creationTimestamp: - type: string - deletionGracePeriodSeconds: - type: integer - deletionTimestamp: - type: string - finalizers: - items: - type: string - type: array - generateName: - type: string - generation: - type: integer - labels: - additionalProperties: - type: string - type: object - managedFields: - items: - properties: - apiVersion: - type: string - fieldsType: - type: string - fieldsV1: - type: object - manager: - type: string - operation: - type: string - subresource: - type: string - time: - type: string - type: object - type: array - name: - type: string - namespace: - type: string - ownerReferences: - items: - properties: - apiVersion: - type: string - blockOwnerDeletion: - type: boolean - controller: - type: boolean - kind: - type: string - name: - type: string - uid: - type: string - type: object - type: array - resourceVersion: - type: string - selfLink: - type: string - uid: - type: string - type: object - spec: - properties: - activeDeadlineSeconds: - type: integer - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - type: object - weight: - type: integer - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - type: object - type: array - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - type: object - weight: - type: integer - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - type: object - weight: - type: integer - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - type: object - type: array - type: object - type: object - automountServiceAccountToken: - type: boolean - containers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - type: string - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - type: object - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - type: integer - hostIP: - type: string - hostPort: - type: integer - name: - type: string - protocol: - type: string - type: object - type: array - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - type: object - type: array - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - type: object - type: array - workingDir: - type: string - type: object - type: array - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - dnsPolicy: - type: string - enableServiceLinks: - type: boolean - ephemeralContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - type: string - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - type: object - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - type: integer - hostIP: - type: string - hostPort: - type: integer - name: - type: string - protocol: - type: string - type: object - type: array - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - type: object - type: array - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - targetContainerName: - type: string - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - type: object - type: array - workingDir: - type: string - type: object - type: array - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostIPC: - type: boolean - hostNetwork: - type: boolean - hostPID: - type: boolean - hostUsers: - type: boolean - hostname: - type: string - imagePullSecrets: - items: - properties: - name: - type: string - type: object - type: array - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - type: string - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - type: object - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - type: integer - hostIP: - type: string - hostPort: - type: integer - name: - type: string - protocol: - type: string - type: object - type: array - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - type: object - type: array - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - type: object - type: array - workingDir: - type: string - type: object - type: array - nodeName: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - os: - properties: - name: - type: string - type: object - overhead: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - preemptionPolicy: - type: string - priority: - type: integer - priorityClassName: - type: string - readinessGates: - items: - properties: - conditionType: - type: string - type: object - type: array - resourceClaims: - items: - properties: - name: - type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object - type: object - type: array - restartPolicy: - type: string - runtimeClassName: - type: string - schedulerName: - type: string - schedulingGates: - items: - properties: - name: - type: string - type: object - type: array - securityContext: - properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - type: object - supplementalGroups: - items: - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAccountName: - type: string - setHostnameAsFQDN: - type: boolean - shareProcessNamespace: - type: boolean - subdomain: - type: string - terminationGracePeriodSeconds: - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - type: object - type: array - topologySpreadConstraints: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - matchLabelKeys: - items: - type: string - type: array - maxSkew: - type: integer - minDomains: - type: integer - nodeAffinityPolicy: - type: string - nodeTaintsPolicy: - type: string - topologyKey: - type: string - whenUnsatisfiable: - type: string - type: object - type: array - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - type: integer - readOnly: - type: boolean - volumeID: - type: string - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - type: object - configMap: - properties: - defaultMode: - type: integer - items: - items: - properties: - key: - type: string - mode: - type: integer - path: - type: string - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - csi: - properties: - driver: - type: string - fsType: - type: string - nodePublishSecretRef: - properties: - name: - type: string - type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - type: object - downwardAPI: - properties: - defaultMode: - type: integer - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - type: object - mode: - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - type: string - type: object - type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - creationTimestamp: - type: string - deletionGracePeriodSeconds: - type: integer - deletionTimestamp: - type: string - finalizers: - items: - type: string - type: array - generateName: - type: string - generation: - type: integer - labels: - additionalProperties: - type: string - type: object - managedFields: - items: - properties: - apiVersion: - type: string - fieldsType: - type: string - fieldsV1: - type: object - manager: - type: string - operation: - type: string - subresource: - type: string - time: - type: string - type: object - type: array - name: - type: string - namespace: - type: string - ownerReferences: - items: - properties: - apiVersion: - type: string - blockOwnerDeletion: - type: boolean - controller: - type: boolean - kind: - type: string - name: - type: string - uid: - type: string - type: object - type: array - resourceVersion: - type: string - selfLink: - type: string - uid: - type: string - type: object - spec: - properties: - accessModes: - items: - type: string - type: array - dataSource: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - type: object - dataSourceRef: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - type: object - type: array - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - storageClassName: - type: string - volumeMode: - type: string - volumeName: - type: string - type: object - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - type: integer - pdName: - type: string - readOnly: - type: boolean - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - type: object - hostPath: - properties: - path: - type: string - type: - type: string - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - type: object - projected: - properties: - defaultMode: - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - type: integer - path: - type: string - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - type: object - mode: - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - type: string - type: object - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - type: integer - path: - type: string - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - serviceAccountToken: - properties: - audience: - type: string - expirationSeconds: - type: integer - path: - type: string - type: object - type: object - type: array - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - type: object - secret: - properties: - defaultMode: - type: integer - items: - items: - properties: - key: - type: string - mode: - type: integer - path: - type: string - type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string - type: object - type: object - type: array - type: object - type: object - type: object - executorSpec: - properties: - podTemplateSpec: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - creationTimestamp: - type: string - deletionGracePeriodSeconds: - type: integer - deletionTimestamp: - type: string - finalizers: - items: - type: string - type: array - generateName: - type: string - generation: - type: integer - labels: - additionalProperties: - type: string - type: object - managedFields: - items: - properties: - apiVersion: - type: string - fieldsType: - type: string - fieldsV1: - type: object - manager: - type: string - operation: - type: string - subresource: - type: string - time: - type: string - type: object - type: array - name: - type: string - namespace: - type: string - ownerReferences: - items: - properties: - apiVersion: - type: string - blockOwnerDeletion: - type: boolean - controller: - type: boolean - kind: - type: string - name: - type: string - uid: - type: string - type: object - type: array - resourceVersion: - type: string - selfLink: - type: string - uid: - type: string - type: object - spec: - properties: - activeDeadlineSeconds: - type: integer - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - type: object - weight: - type: integer - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - type: object - type: array - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - type: object - weight: - type: integer - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - type: object - weight: - type: integer - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - type: object - type: array - type: object - type: object - automountServiceAccountToken: - type: boolean - containers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - type: string - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - type: object - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - type: integer - hostIP: - type: string - hostPort: - type: integer - name: - type: string - protocol: - type: string - type: object - type: array - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - type: object - type: array - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - type: object - type: array - workingDir: - type: string - type: object - type: array - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - dnsPolicy: - type: string - enableServiceLinks: - type: boolean - ephemeralContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - type: string - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - type: object - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - type: integer - hostIP: - type: string - hostPort: - type: integer - name: - type: string - protocol: - type: string - type: object - type: array - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - type: object - type: array - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - targetContainerName: - type: string - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - type: object - type: array - workingDir: - type: string - type: object - type: array - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostIPC: - type: boolean - hostNetwork: - type: boolean - hostPID: - type: boolean - hostUsers: - type: boolean - hostname: - type: string - imagePullSecrets: - items: - properties: - name: - type: string - type: object - type: array - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - type: string - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - type: object - type: object - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - type: integer - hostIP: - type: string - hostPort: - type: integer - name: - type: string - protocol: - type: string - type: object - type: array - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - type: object - type: array - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - type: integer - grpc: - properties: - port: - type: integer - service: - type: string - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - type: object - initialDelaySeconds: - type: integer - periodSeconds: - type: integer - successThreshold: - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - terminationGracePeriodSeconds: - type: integer - timeoutSeconds: - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - type: object - type: array - workingDir: - type: string - type: object - type: array - nodeName: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - os: - properties: - name: - type: string - type: object - overhead: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - preemptionPolicy: - type: string - priority: - type: integer - priorityClassName: - type: string - readinessGates: - items: - properties: - conditionType: - type: string - type: object - type: array - resourceClaims: - items: - properties: - name: - type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object - type: object - type: array - restartPolicy: - type: string - runtimeClassName: - type: string - schedulerName: - type: string - schedulingGates: - items: - properties: - name: - type: string - type: object - type: array - securityContext: - properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - type: object - supplementalGroups: - items: - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAccountName: - type: string - setHostnameAsFQDN: - type: boolean - shareProcessNamespace: - type: boolean - subdomain: - type: string - terminationGracePeriodSeconds: - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - type: object - type: array - topologySpreadConstraints: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - matchLabelKeys: - items: - type: string - type: array - maxSkew: - type: integer - minDomains: - type: integer - nodeAffinityPolicy: - type: string - nodeTaintsPolicy: - type: string - topologyKey: - type: string - whenUnsatisfiable: - type: string - type: object - type: array - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - type: integer - readOnly: - type: boolean - volumeID: - type: string - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - type: object - configMap: - properties: - defaultMode: - type: integer - items: - items: - properties: - key: - type: string - mode: - type: integer - path: - type: string - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - csi: - properties: - driver: - type: string - fsType: - type: string - nodePublishSecretRef: - properties: - name: - type: string - type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - type: object - downwardAPI: - properties: - defaultMode: - type: integer - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - type: object - mode: - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - type: string - type: object - type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - creationTimestamp: - type: string - deletionGracePeriodSeconds: - type: integer - deletionTimestamp: - type: string - finalizers: - items: - type: string - type: array - generateName: - type: string - generation: - type: integer - labels: - additionalProperties: - type: string - type: object - managedFields: - items: - properties: - apiVersion: - type: string - fieldsType: - type: string - fieldsV1: - type: object - manager: - type: string - operation: - type: string - subresource: - type: string - time: - type: string - type: object - type: array - name: - type: string - namespace: - type: string - ownerReferences: - items: - properties: - apiVersion: - type: string - blockOwnerDeletion: - type: boolean - controller: - type: boolean - kind: - type: string - name: - type: string - uid: - type: string - type: object - type: array - resourceVersion: - type: string - selfLink: - type: string - uid: - type: string - type: object - spec: - properties: - accessModes: - items: - type: string - type: array - dataSource: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - type: object - dataSourceRef: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - type: object - type: array - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - storageClassName: - type: string - volumeMode: - type: string - volumeName: - type: string - type: object - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - type: integer - pdName: - type: string - readOnly: - type: boolean - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - type: object - hostPath: - properties: - path: - type: string - type: - type: string - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - type: object - projected: - properties: - defaultMode: - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - type: integer - path: - type: string - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - type: object - mode: - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - type: string - type: object - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - type: integer - path: - type: string - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - serviceAccountToken: - properties: - audience: - type: string - expirationSeconds: - type: integer - path: - type: string - type: object - type: object - type: array - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - type: object - secret: - properties: - defaultMode: - type: integer - items: - items: - properties: - key: - type: string - mode: - type: integer - path: - type: string - type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string - type: object - type: object - type: array - type: object - type: object - type: object - sparkConf: - additionalProperties: - type: string - type: object - required: - - runtimeVersions - type: object - status: - properties: - currentState: - properties: - lastObservedDriverStatus: - properties: - conditions: - items: - properties: - lastProbeTime: - type: string - lastTransitionTime: - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - containerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - ephemeralContainerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - hostIP: - type: string - initContainerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - message: - type: string - nominatedNodeName: - type: string - phase: - type: string - podIP: - type: string - podIPs: - items: - properties: - ip: - type: string - type: object - type: array - qosClass: - type: string - reason: - type: string - startTime: - type: string - type: object - currentStateSummary: - enum: - - SUBMITTED - - SCHEDULED_TO_RESTART - - DRIVER_REQUESTED - - DRIVER_STARTED - - DRIVER_READY - - INITIALIZED_BELOW_THRESHOLD_EXECUTORS - - RUNNING_HEALTHY - - RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS - - DRIVER_LAUNCH_TIMED_OUT - - EXECUTORS_LAUNCH_TIMED_OUT - - SPARK_SESSION_INITIALIZATION_TIMED_OUT - - SUCCEEDED - - FAILED - - SCHEDULING_FAILURE - - DRIVER_EVICTED - - RESOURCE_RELEASED - - TERMINATED_WITHOUT_RELEASE_RESOURCES - type: string - lastTransitionTime: - type: string - message: - type: string - type: object - stateTransitionHistory: - additionalProperties: - properties: - lastObservedDriverStatus: - properties: - conditions: - items: - properties: - lastProbeTime: - type: string - lastTransitionTime: - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - containerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - ephemeralContainerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - hostIP: - type: string - initContainerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - message: - type: string - nominatedNodeName: - type: string - phase: - type: string - podIP: - type: string - podIPs: - items: - properties: - ip: - type: string - type: object - type: array - qosClass: - type: string - reason: - type: string - startTime: - type: string - type: object - currentStateSummary: - enum: - - SUBMITTED - - SCHEDULED_TO_RESTART - - DRIVER_REQUESTED - - DRIVER_STARTED - - DRIVER_READY - - INITIALIZED_BELOW_THRESHOLD_EXECUTORS - - RUNNING_HEALTHY - - RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS - - DRIVER_LAUNCH_TIMED_OUT - - EXECUTORS_LAUNCH_TIMED_OUT - - SPARK_SESSION_INITIALIZATION_TIMED_OUT - - SUCCEEDED - - FAILED - - SCHEDULING_FAILURE - - DRIVER_EVICTED - - RESOURCE_RELEASED - - TERMINATED_WITHOUT_RELEASE_RESOURCES - type: string - lastTransitionTime: - type: string - message: - type: string - type: object - type: object - previousAttemptSummary: - properties: - stateTransitionHistory: - additionalProperties: - properties: - lastObservedDriverStatus: - properties: - conditions: - items: - properties: - lastProbeTime: - type: string - lastTransitionTime: - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - containerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - ephemeralContainerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - hostIP: - type: string - initContainerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - message: - type: string - nominatedNodeName: - type: string - phase: - type: string - podIP: - type: string - podIPs: - items: - properties: - ip: - type: string - type: object - type: array - qosClass: - type: string - reason: - type: string - startTime: - type: string - type: object - currentStateSummary: - enum: - - SUBMITTED - - SCHEDULED_TO_RESTART - - DRIVER_REQUESTED - - DRIVER_STARTED - - DRIVER_READY - - INITIALIZED_BELOW_THRESHOLD_EXECUTORS - - RUNNING_HEALTHY - - RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS - - DRIVER_LAUNCH_TIMED_OUT - - EXECUTORS_LAUNCH_TIMED_OUT - - SPARK_SESSION_INITIALIZATION_TIMED_OUT - - SUCCEEDED - - FAILED - - SCHEDULING_FAILURE - - DRIVER_EVICTED - - RESOURCE_RELEASED - - TERMINATED_WITHOUT_RELEASE_RESOURCES - type: string - lastTransitionTime: - type: string - message: - type: string - type: object - type: object - attemptInfo: - properties: - id: - type: integer - type: object - type: object - currentAttemptSummary: - properties: - stateTransitionHistory: - additionalProperties: - properties: - lastObservedDriverStatus: - properties: - conditions: - items: - properties: - lastProbeTime: - type: string - lastTransitionTime: - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - containerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - ephemeralContainerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - hostIP: - type: string - initContainerStatuses: - items: - properties: - containerID: - type: string - image: - type: string - imageID: - type: string - lastState: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - name: - type: string - ready: - type: boolean - restartCount: - type: integer - started: - type: boolean - state: - properties: - running: - properties: - startedAt: - type: string - type: object - terminated: - properties: - containerID: - type: string - exitCode: - type: integer - finishedAt: - type: string - message: - type: string - reason: - type: string - signal: - type: integer - startedAt: - type: string - type: object - waiting: - properties: - message: - type: string - reason: - type: string - type: object - type: object - type: object - type: array - message: - type: string - nominatedNodeName: - type: string - phase: - type: string - podIP: - type: string - podIPs: - items: - properties: - ip: - type: string - type: object - type: array - qosClass: - type: string - reason: - type: string - startTime: - type: string - type: object - currentStateSummary: - enum: - - SUBMITTED - - SCHEDULED_TO_RESTART - - DRIVER_REQUESTED - - DRIVER_STARTED - - DRIVER_READY - - INITIALIZED_BELOW_THRESHOLD_EXECUTORS - - RUNNING_HEALTHY - - RUNNING_WITH_BELOW_THRESHOLD_EXECUTORS - - DRIVER_LAUNCH_TIMED_OUT - - EXECUTORS_LAUNCH_TIMED_OUT - - SPARK_SESSION_INITIALIZATION_TIMED_OUT - - SUCCEEDED - - FAILED - - SCHEDULING_FAILURE - - DRIVER_EVICTED - - RESOURCE_RELEASED - - TERMINATED_WITHOUT_RELEASE_RESOURCES - type: string - lastTransitionTime: - type: string - message: - type: string - type: object - type: object - attemptInfo: - properties: - id: - type: integer - type: object - type: object - type: object - type: object - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .status.currentState.currentStateSummary - name: Current State - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date From d4480921b72331e4d835d43c35597d9af7e11006 Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Mon, 8 Apr 2024 17:46:28 -0700 Subject: [PATCH 11/14] update docs for generating crd --- .gitignore | 1 + build-tools/helm/spark-kubernetes-operator/.helmignore | 5 ----- spark-operator-docs/getting_started.md | 7 +++++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 96bb0346..0e6c867c 100644 --- a/.gitignore +++ b/.gitignore @@ -64,3 +64,4 @@ build/ **/generated lib target +build-tools/helm/spark-kubernetes-operator/crds/ diff --git a/build-tools/helm/spark-kubernetes-operator/.helmignore b/build-tools/helm/spark-kubernetes-operator/.helmignore index 0e8a0eb3..e37341e8 100644 --- a/build-tools/helm/spark-kubernetes-operator/.helmignore +++ b/build-tools/helm/spark-kubernetes-operator/.helmignore @@ -5,11 +5,6 @@ # Common VCS dirs .git/ .gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ # Common backup files *.swp *.bak diff --git a/spark-operator-docs/getting_started.md b/spark-operator-docs/getting_started.md index 40e43764..e5b90a30 100644 --- a/spark-operator-docs/getting_started.md +++ b/spark-operator-docs/getting_started.md @@ -46,13 +46,16 @@ minikube start eval $(minikube docker-env) ``` -### Build Spark Operator Image Locally +### Build Spark Operator Locally ```bash # Build a local container image which can be used for minikube.etc. # For testing in remote k8s cluster, please also do `docker push` to make it available # to the cluster / nodes - docker build --build-arg BASE_VERSION=1.0.0-alpha -t spark-kubernetes-operator:1.0.0-alpha . + docker build --build-arg BASE_VERSION=1.0.0-alpha -t spark-kubernetes-operator:1.0.0-alpha . + + # Generate CRD yaml and make it available for chart deployment + ./gradlew spark-operator-api:copyGeneratedCRD ``` ### Install the Spark Operator From 4bf6d5961fea1a5d5fe305aaf80a81807af1015b Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Wed, 17 Apr 2024 16:54:43 -0700 Subject: [PATCH 12/14] Remove redundant mdc keys and rename to match josdk mdc pattern --- .../operator/utils/LoggingUtils.java | 27 +++++-------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java index 868359fe..55ca051b 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/LoggingUtils.java @@ -22,39 +22,26 @@ import java.util.Set; import java.util.concurrent.locks.ReentrantLock; -import org.apache.commons.lang3.StringUtils; import org.slf4j.MDC; import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.status.ApplicationAttemptSummary; public class LoggingUtils { public static final class TrackedMDC { - public static final String NamespaceKey = "app_namespace"; - public static final String NameKey = "app_name"; - public static final String UuidKey = "app_uuid"; - public static final String GenerationKey = "app_generation"; + public static final String AppAttemptIdKey = "resource.app.attemptId"; private final ReentrantLock lock = new ReentrantLock(); private Set keys = new HashSet<>(); public void set(final SparkApplication application) { - if (application != null && application.getMetadata() != null) { + if (application != null && application.getStatus() != null) { try { lock.lock(); - if (StringUtils.isNotEmpty(application.getMetadata().getNamespace())) { - MDC.put(NamespaceKey, application.getMetadata().getNamespace()); - keys.add(NamespaceKey); + ApplicationAttemptSummary summary = application.getStatus().getCurrentAttemptSummary(); + if (summary != null && summary.getAttemptInfo() != null) { + MDC.put(AppAttemptIdKey, summary.getAttemptInfo().getId().toString()); + keys.add(AppAttemptIdKey); } - if (StringUtils.isNotEmpty(application.getMetadata().getName())) { - MDC.put(NameKey, application.getMetadata().getName()); - keys.add(NameKey); - } - if (StringUtils.isNotEmpty(application.getMetadata().getUid())) { - MDC.put(UuidKey, application.getMetadata().getUid()); - keys.add(UuidKey); - } - MDC.put(GenerationKey, - String.valueOf(application.getMetadata().getGeneration())); - keys.add(GenerationKey); } finally { lock.unlock(); } From 8ec7d7317a239d4aa4456d71713caa12adae3c09 Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Thu, 18 Apr 2024 14:56:06 -0700 Subject: [PATCH 13/14] Update operator version and gradle version used in builder --- Dockerfile | 2 +- build-tools/helm/spark-kubernetes-operator/Chart.yaml | 4 ++-- gradle.properties | 2 +- spark-operator-docs/developer_guide.md | 2 +- spark-operator-docs/getting_started.md | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index a0233a5d..4596f276 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,7 +16,7 @@ # # -FROM gradle:8.6-jdk17-alpine AS builder +FROM gradle:8.7-jdk17-alpine AS builder ARG BASE_VERSION WORKDIR /app COPY . . diff --git a/build-tools/helm/spark-kubernetes-operator/Chart.yaml b/build-tools/helm/spark-kubernetes-operator/Chart.yaml index e62a167a..81b501bd 100644 --- a/build-tools/helm/spark-kubernetes-operator/Chart.yaml +++ b/build-tools/helm/spark-kubernetes-operator/Chart.yaml @@ -21,5 +21,5 @@ apiVersion: v2 name: spark-kubernetes-operator description: A Helm chart for the Apache Spark Kubernetes Operator type: application -version: 1.0.0-alpha -appVersion: 1.0.0-alpha +version: 0.1.0 +appVersion: 0.1.0 diff --git a/gradle.properties b/gradle.properties index 0c46dac0..a7fe26e1 100644 --- a/gradle.properties +++ b/gradle.properties @@ -16,7 +16,7 @@ # # group=org.apache.spark.kubernetes.operator -version=1.0.0-alpha +version=0.1.0 commonsLang3Version=3.12.0 commonsIOVersion=2.11.0 commonsConfigurationVersion=2.9.0 diff --git a/spark-operator-docs/developer_guide.md b/spark-operator-docs/developer_guide.md index 35bc0fcd..bcef70e2 100644 --- a/spark-operator-docs/developer_guide.md +++ b/spark-operator-docs/developer_guide.md @@ -41,7 +41,7 @@ If you are working on API (CRD) changes, remember to update CRD yaml in chart as # Build a local container image which can be used for minikube.etc. # For testing in remote k8s cluster, please also do `docker push` to make it available # to the cluster / nodes - docker build --build-arg BASE_VERSION=1.0.0-alpha -t spark-kubernetes-operator:1.0.0-alpha . + docker build --build-arg BASE_VERSION=0.1.0 -t spark-kubernetes-operator:0.1.0 . ``` ## Deploy Operator diff --git a/spark-operator-docs/getting_started.md b/spark-operator-docs/getting_started.md index e5b90a30..8d92cd42 100644 --- a/spark-operator-docs/getting_started.md +++ b/spark-operator-docs/getting_started.md @@ -33,7 +33,7 @@ cluster. ### Compatibility -- JDK11, 17, or 23 +- JDK17, or 23 - Operator used fabric8 which assumes to be compatible with available k8s versions. - Spark versions 3.4 and above @@ -52,7 +52,7 @@ eval $(minikube docker-env) # Build a local container image which can be used for minikube.etc. # For testing in remote k8s cluster, please also do `docker push` to make it available # to the cluster / nodes - docker build --build-arg BASE_VERSION=1.0.0-alpha -t spark-kubernetes-operator:1.0.0-alpha . + docker build --build-arg BASE_VERSION=0.1.0 -t spark-kubernetes-operator:0.1.0 . # Generate CRD yaml and make it available for chart deployment ./gradlew spark-operator-api:copyGeneratedCRD From 110e1a8ff517ab74925a552297cdaa04bbda0c09 Mon Sep 17 00:00:00 2001 From: zhou-jiang Date: Thu, 18 Apr 2024 17:31:30 -0700 Subject: [PATCH 14/14] Rename inconsistent class prefix to SparkApp, and misc clean up. --- spark-operator-docs/developer_guide.md | 2 +- spark-operator-docs/getting_started.md | 6 +- spark-operator-docs/spark_application.md | 2 - .../kubernetes/operator/CancelAppTest.java | 23 ---- ....java => SparkAppSubmitToSucceedTest.java} | 6 +- .../kubernetes/operator/SparkOperator.java | 17 +-- .../operator/config/SparkOperatorConf.java | 20 +-- .../operator/controller/BaseContext.java | 29 +++++ ...ationContext.java => SparkAppContext.java} | 24 ++-- .../decorators/DriverResourceDecorator.java | 1 + ...tener.java => SparkAppStatusListener.java} | 2 +- .../reconciler/ReconcileProgress.java | 6 +- ...Utils.java => SparkAppReconcileUtils.java} | 19 ++- ...econciler.java => SparkAppReconciler.java} | 28 +++-- .../observers/AppDriverTimeoutObserver.java | 8 +- .../reconcilesteps/AppCleanUpStep.java | 21 ++-- .../reconcilesteps/AppInitStep.java | 22 ++-- .../reconcilesteps/AppReconcileStep.java | 16 +-- .../AppResourceObserveStep.java | 8 +- .../reconcilesteps/AppRunningStep.java | 19 ++- .../reconcilesteps/AppTerminatedStep.java | 12 +- .../reconcilesteps/AppValidateStep.java | 16 +-- .../reconcilesteps/UnknownStateStep.java | 11 +- .../utils/SparkAppStatusRecorder.java | 33 +++++ ...tusUtils.java => SparkAppStatusUtils.java} | 2 +- .../operator/utils/StatusRecorder.java | 114 +++++------------- .../src/main/resources/spark-pi.yaml | 1 - .../src/main/resources/streaming.yaml | 1 - .../source/OperatorJosdkMetricsTest.java | 20 +-- ...t.java => SparkAppReconcileUtilsTest.java} | 56 ++++----- ...rTest.java => SparkAppReconcilerTest.java} | 20 +-- ...riverConf.java => SparkAppDriverConf.java} | 29 +++-- ...rceSpec.java => SparkAppResourceSpec.java} | 8 +- ...ker.java => SparkAppSubmissionWorker.java} | 20 +-- ...est.java => SparkAppResourceSpecTest.java} | 24 ++-- ...java => SparkAppSubmissionWorkerTest.java} | 45 +++---- 36 files changed, 340 insertions(+), 351 deletions(-) delete mode 100644 spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/CancelAppTest.java rename spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/{AppSubmitToSucceedTest.java => SparkAppSubmitToSucceedTest.java} (97%) create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/BaseContext.java rename spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/{SparkApplicationContext.java => SparkAppContext.java} (80%) rename spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/{ApplicationStatusListener.java => SparkAppStatusListener.java} (92%) rename spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/{SparkApplicationReconcileUtils.java => SparkAppReconcileUtils.java} (91%) rename spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/{SparkApplicationReconciler.java => SparkAppReconciler.java} (89%) create mode 100644 spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkAppStatusRecorder.java rename spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/{ApplicationStatusUtils.java => SparkAppStatusUtils.java} (98%) rename spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/{SparkApplicationReconcileUtilsTest.java => SparkAppReconcileUtilsTest.java} (54%) rename spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/{SparkApplicationReconcilerTest.java => SparkAppReconcilerTest.java} (83%) rename spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/{ApplicationDriverConf.java => SparkAppDriverConf.java} (67%) rename spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/{ApplicationResourceSpec.java => SparkAppResourceSpec.java} (94%) rename spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/{ApplicationClientWorker.java => SparkAppSubmissionWorker.java} (87%) rename spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/{ApplicationResourceSpecTest.java => SparkAppResourceSpecTest.java} (80%) rename spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/{ApplicationClientWorkerTest.java => SparkAppSubmissionWorkerTest.java} (82%) diff --git a/spark-operator-docs/developer_guide.md b/spark-operator-docs/developer_guide.md index bcef70e2..4a298611 100644 --- a/spark-operator-docs/developer_guide.md +++ b/spark-operator-docs/developer_guide.md @@ -80,5 +80,5 @@ For now, in order to manually run e2e tests: ```shell java -cp /path/to/spark-operator-test.jar \ -Dspark.operator.test.app.yaml.files.dir=/path/to/e2e-tests/ \ - org.apache.spark.kubernetes.operator.AppSubmitToSucceedTest + org.apache.spark.kubernetes.operator.SparkAppSubmitToSucceedTest ``` diff --git a/spark-operator-docs/getting_started.md b/spark-operator-docs/getting_started.md index 8d92cd42..6a87ccf2 100644 --- a/spark-operator-docs/getting_started.md +++ b/spark-operator-docs/getting_started.md @@ -139,7 +139,6 @@ spec: spark.executor.instances: "5" spark.kubernetes.authenticate.driver.serviceAccountName: spark spark.kubernetes.container.image: spark:3.5.1-scala2.12-java11-python3-r-ubuntu - spark.kubernetes.namespace: spark-test status: currentAttemptSummary: attemptInfo: @@ -180,10 +179,13 @@ Delete application Spark-pi and its secondary resources with #### Uninstallation -To remove the installed resources from your cluster, use: +To remove the installed resources from your cluster, reset environment to the defaults and +shutdown the cluster: ```bash helm uninstall spark-kubernetes-operator +eval $(minikube docker-env --unset) +minikube stop ``` ### More examples diff --git a/spark-operator-docs/spark_application.md b/spark-operator-docs/spark_application.md index 381dd0d0..30d96609 100644 --- a/spark-operator-docs/spark_application.md +++ b/spark-operator-docs/spark_application.md @@ -20,14 +20,12 @@ apiVersion: org.apache.spark/v1alpha1 kind: SparkApplication metadata: name: spark-pi - namespace: spark-test spec: mainClass: "org.apache.spark.examples.SparkPi" jars: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.1.jar" sparkConf: spark.executor.instances: "5" spark.kubernetes.container.image: "spark:3.5.1-scala2.12-java17-python3-ubuntu" - spark.kubernetes.namespace: "spark-test" spark.kubernetes.authenticate.driver.serviceAccountName: "spark" runtimeVersions: scalaVersion: v2_12 diff --git a/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/CancelAppTest.java b/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/CancelAppTest.java deleted file mode 100644 index 6495eba2..00000000 --- a/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/CancelAppTest.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.spark.kubernetes.operator; - -class CancelAppTest { - -} diff --git a/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java b/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/SparkAppSubmitToSucceedTest.java similarity index 97% rename from spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java rename to spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/SparkAppSubmitToSucceedTest.java index 9d404316..1dfa196d 100644 --- a/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/AppSubmitToSucceedTest.java +++ b/spark-operator-tests/src/test/java/org/apache/spark/kubernetes/operator/SparkAppSubmitToSucceedTest.java @@ -40,8 +40,8 @@ import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; -class AppSubmitToSucceedTest { - private static final Logger logger = LoggerFactory.getLogger(AppSubmitToSucceedTest.class); +class SparkAppSubmitToSucceedTest { + private static final Logger logger = LoggerFactory.getLogger(SparkAppSubmitToSucceedTest.class); /** * Create Spark app(s) & wait them for complete. @@ -71,7 +71,7 @@ public static void main(String[] args) throws InterruptedException { String testAppYamlFilesDir = System.getProperty("spark.operator.test.app.yaml.files.dir", "e2e-tests/spark-apps/"); String testAppNamespace = System.getProperty("spark.operator.test.app.namespace", - "spark-test"); + "default"); Set testApps = loadSparkAppsFromFile(client, new File(testAppYamlFilesDir)); diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java index 113e2d0f..f2484c84 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/SparkOperator.java @@ -47,9 +47,9 @@ import org.apache.spark.kubernetes.operator.metrics.MetricsSystemFactory; import org.apache.spark.kubernetes.operator.metrics.source.OperatorJosdkMetrics; import org.apache.spark.kubernetes.operator.probe.ProbeService; -import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconciler; +import org.apache.spark.kubernetes.operator.reconciler.SparkAppReconciler; import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.DynamicConfigEnabled; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.DynamicConfigSelectorStr; @@ -67,11 +67,12 @@ public class SparkOperator { private Operator sparkOperator; private Operator sparkOperatorConfMonitor; private KubernetesClient client; - private StatusRecorder statusRecorder; - private MetricsSystem metricsSystem; + private SparkAppSubmissionWorker appSubmissionWorker; + private SparkAppStatusRecorder sparkAppStatusRecorder; protected Set> registeredSparkControllers; protected Set watchedNamespaces; + private MetricsSystem metricsSystem; private SentinelManager sentinelManager; private ProbeService probeService; private MetricsService metricsService; @@ -80,7 +81,9 @@ public class SparkOperator { public SparkOperator() { this.metricsSystem = MetricsSystemFactory.createMetricsSystem(); this.client = KubernetesClientFactory.buildKubernetesClient(metricsSystem); - this.statusRecorder = new StatusRecorder(SparkOperatorConf.getApplicationStatusListener()); + this.appSubmissionWorker = new SparkAppSubmissionWorker(); + this.sparkAppStatusRecorder = new SparkAppStatusRecorder( + SparkOperatorConf.getAppStatusListener()); this.registeredSparkControllers = new HashSet<>(); this.watchedNamespaces = SparkReconcilerUtils.getWatchedNamespaces(); this.sentinelManager = new SentinelManager(); @@ -96,8 +99,8 @@ public SparkOperator() { protected Operator createOperator() { Operator op = new Operator(this::overrideOperatorConfigs); registeredSparkControllers.add( - op.register(new SparkApplicationReconciler(statusRecorder, sentinelManager), - this::overrideControllerConfigs)); + op.register(new SparkAppReconciler(appSubmissionWorker, sparkAppStatusRecorder, + sentinelManager), this::overrideControllerConfigs)); return op; } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java index 32dbd71a..ac73bddd 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/config/SparkOperatorConf.java @@ -28,7 +28,7 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; -import org.apache.spark.kubernetes.operator.listeners.ApplicationStatusListener; +import org.apache.spark.kubernetes.operator.listeners.SparkAppStatusListener; import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.defaultOperatorConfigLabels; import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.labelsAsStr; @@ -208,9 +208,9 @@ public class SparkOperatorConf { .defaultValue(3L) .typeParameterClass(Long.class) .build(); - public static final ConfigOption AppReconcileIntervalSeconds = + public static final ConfigOption SparkAppReconcileIntervalSeconds = ConfigOption.builder() - .key("spark.operator.app.reconcile.interval.seconds") + .key("spark.operator.application.reconcile.interval.seconds") .description( "Interval (in seconds) to reconcile when application is is starting " + "up. Note that reconcile is always expected to be triggered " + @@ -315,12 +315,12 @@ public class SparkOperatorConf { .enableDynamicOverride(true) .typeParameterClass(Long.class) .build(); - public static final ConfigOption APPLICATION_STATUS_LISTENER_CLASS_NAMES = + public static final ConfigOption SPARK_APP_STATUS_LISTENER_CLASS_NAMES = ConfigOption.builder() .key("spark.operator.application.status.listener.class.names") .defaultValue("") .description( - "Comma-separated names of ApplicationStatusListener class " + + "Comma-separated names of SparkAppStatusListener class " + "implementations") .enableDynamicOverride(false) .typeParameterClass(String.class) @@ -370,10 +370,10 @@ public class SparkOperatorConf { .typeParameterClass(Long.class) .build(); - public static List getApplicationStatusListener() { - List listeners = new ArrayList<>(); + public static List getAppStatusListener() { + List listeners = new ArrayList<>(); String listenerNamesStr = - SparkOperatorConf.APPLICATION_STATUS_LISTENER_CLASS_NAMES.getValue(); + SparkOperatorConf.SPARK_APP_STATUS_LISTENER_CLASS_NAMES.getValue(); if (StringUtils.isNotBlank(listenerNamesStr)) { try { List listenerNames = @@ -381,8 +381,8 @@ public static List getApplicationStatusListener() { .collect(Collectors.toList()); for (String name : listenerNames) { Class listenerClass = Class.forName(name); - if (ApplicationStatusListener.class.isAssignableFrom(listenerClass)) { - listeners.add((ApplicationStatusListener) + if (SparkAppStatusListener.class.isAssignableFrom(listenerClass)) { + listeners.add((SparkAppStatusListener) listenerClass.getConstructor().newInstance()); } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/BaseContext.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/BaseContext.java new file mode 100644 index 00000000..6eafc440 --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/BaseContext.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.spark.kubernetes.operator.controller; + +import io.fabric8.kubernetes.client.KubernetesClient; + +import org.apache.spark.kubernetes.operator.BaseResource; + +public abstract class BaseContext> { + public abstract CR getResource(); + public abstract KubernetesClient getClient(); +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkAppContext.java similarity index 80% rename from spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java rename to spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkAppContext.java index f13b389f..ab4884b9 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkApplicationContext.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/controller/SparkAppContext.java @@ -27,13 +27,13 @@ import io.fabric8.kubernetes.api.model.Pod; import io.fabric8.kubernetes.client.KubernetesClient; import io.javaoperatorsdk.operator.api.reconciler.Context; -import lombok.Getter; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; +import org.apache.spark.kubernetes.operator.SparkAppResourceSpec; +import org.apache.spark.kubernetes.operator.SparkAppSubmissionWorker; import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconcileUtils; +import org.apache.spark.kubernetes.operator.reconciler.SparkAppReconcileUtils; import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.driverLabels; import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.executorLabels; @@ -44,11 +44,11 @@ */ @RequiredArgsConstructor @Slf4j -public class SparkApplicationContext { - @Getter +public class SparkAppContext extends BaseContext { private final SparkApplication sparkApplication; private final Context josdkContext; - private ApplicationResourceSpec secondaryResourceSpec; + private final SparkAppSubmissionWorker submissionWorker; + private SparkAppResourceSpec secondaryResourceSpec; public Optional getDriverPod() { return josdkContext.getSecondaryResourcesAsStream(Pod.class) @@ -64,16 +64,22 @@ public Set getExecutorsForApplication() { .collect(Collectors.toSet()); } - private ApplicationResourceSpec getSecondaryResourceSpec() { + private SparkAppResourceSpec getSecondaryResourceSpec() { synchronized (this) { if (secondaryResourceSpec == null) { - secondaryResourceSpec = SparkApplicationReconcileUtils.buildResourceSpec( - sparkApplication, josdkContext.getClient()); + secondaryResourceSpec = SparkAppReconcileUtils.buildResourceSpec(sparkApplication, + josdkContext.getClient(), submissionWorker); } return secondaryResourceSpec; } } + @Override + public SparkApplication getResource() { + return sparkApplication; + } + + @Override public KubernetesClient getClient() { return josdkContext.getClient(); } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java index 2d840048..00ef9af6 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/decorators/DriverResourceDecorator.java @@ -57,6 +57,7 @@ public T decorate(T resource) { if (!ownerReferenceExists) { ObjectMeta metaData = new ObjectMetaBuilder(resource.getMetadata()) .addToOwnerReferences(buildOwnerReferenceTo(driverPod)) + .addToLabels(driverPod.getMetadata().getLabels()) .build(); resource.setMetadata(metaData); } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/SparkAppStatusListener.java similarity index 92% rename from spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java rename to spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/SparkAppStatusListener.java index 2a3afe83..e9043361 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/ApplicationStatusListener.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/listeners/SparkAppStatusListener.java @@ -24,6 +24,6 @@ /** * Custom listeners, if added, would be listening to Spark App status change */ -public abstract class ApplicationStatusListener extends BaseStatusListener { } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java index b6716d16..b115733c 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/ReconcileProgress.java @@ -22,7 +22,7 @@ import lombok.Data; -import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.AppReconcileIntervalSeconds; +import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.SparkAppReconcileIntervalSeconds; /** * Represents the progress of a reconcile request @@ -44,12 +44,12 @@ private ReconcileProgress(boolean completed, boolean requeue, Duration requeueAf public static ReconcileProgress proceed() { return new ReconcileProgress(false, true, - Duration.ofSeconds(AppReconcileIntervalSeconds.getValue())); + Duration.ofSeconds(SparkAppReconcileIntervalSeconds.getValue())); } public static ReconcileProgress completeAndDefaultRequeue() { return new ReconcileProgress(true, true, - Duration.ofSeconds(AppReconcileIntervalSeconds.getValue())); + Duration.ofSeconds(SparkAppReconcileIntervalSeconds.getValue())); } public static ReconcileProgress completeAndRequeueAfter(Duration requeueAfterDuration) { diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconcileUtils.java similarity index 91% rename from spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java rename to spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconcileUtils.java index 6ea9eece..5f582145 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtils.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconcileUtils.java @@ -32,8 +32,8 @@ import io.fabric8.kubernetes.client.KubernetesClient; import lombok.extern.slf4j.Slf4j; -import org.apache.spark.kubernetes.operator.ApplicationClientWorker; -import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; +import org.apache.spark.kubernetes.operator.SparkAppResourceSpec; +import org.apache.spark.kubernetes.operator.SparkAppSubmissionWorker; import org.apache.spark.kubernetes.operator.SparkApplication; import org.apache.spark.kubernetes.operator.decorators.DriverDecorator; import org.apache.spark.kubernetes.operator.utils.ModelUtils; @@ -44,7 +44,7 @@ import static org.apache.spark.kubernetes.operator.utils.ModelUtils.overrideExecutorTemplate; @Slf4j -public class SparkApplicationReconcileUtils { +public class SparkAppReconcileUtils { public static boolean enableForceDelete(SparkApplication app) { long timeoutThreshold = app.getSpec().getApplicationTolerations() .getApplicationTimeoutConfig().getForceTerminationGracePeriodMillis(); @@ -53,19 +53,18 @@ public static boolean enableForceDelete(SparkApplication app) { return lastTransitionTime.plusMillis(timeoutThreshold).isBefore(Instant.now()); } - public static ApplicationResourceSpec buildResourceSpec(final SparkApplication app, - final KubernetesClient client) { - Map confOverrides = overrideMetadataForSecondaryResources(app); - ApplicationResourceSpec resourceSpec = - ApplicationClientWorker.getResourceSpec(app, client, confOverrides); + public static SparkAppResourceSpec buildResourceSpec(final SparkApplication app, + final KubernetesClient client, + final SparkAppSubmissionWorker worker) { + Map confOverrides = overrideDependencyConf(app); + SparkAppResourceSpec resourceSpec = worker.getResourceSpec(app, client, confOverrides); cleanUpTempResourcesForApp(app, confOverrides); DriverDecorator decorator = new DriverDecorator(app); decorator.decorate(resourceSpec.getConfiguredPod()); return resourceSpec; } - private static Map overrideMetadataForSecondaryResources( - final SparkApplication app) { + private static Map overrideDependencyConf(final SparkApplication app) { Map confOverrides = new HashMap<>(); SparkReconcilerUtils.sparkAppResourceLabels(app).forEach((k, v) -> { confOverrides.put("spark.kubernetes.driver.label." + k, v); diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconciler.java similarity index 89% rename from spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java rename to spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconciler.java index 67562790..eb2cf2d5 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconciler.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconciler.java @@ -41,9 +41,10 @@ import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.apache.spark.kubernetes.operator.SparkAppSubmissionWorker; import org.apache.spark.kubernetes.operator.Constants; import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.controller.SparkAppContext; import org.apache.spark.kubernetes.operator.health.SentinelManager; import org.apache.spark.kubernetes.operator.reconciler.observers.AppDriverReadyObserver; import org.apache.spark.kubernetes.operator.reconciler.observers.AppDriverRunningObserver; @@ -57,9 +58,9 @@ import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppTerminatedStep; import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.AppValidateStep; import org.apache.spark.kubernetes.operator.reconciler.reconcilesteps.UnknownStateStep; -import org.apache.spark.kubernetes.operator.utils.ApplicationStatusUtils; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusUtils; import org.apache.spark.kubernetes.operator.utils.LoggingUtils; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndDefaultRequeue; import static org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils.commonResourceLabelsStr; @@ -72,12 +73,13 @@ @ControllerConfiguration @Slf4j @RequiredArgsConstructor -public class SparkApplicationReconciler +public class SparkAppReconciler implements Reconciler, ErrorStatusHandler, EventSourceInitializer, Cleaner { - private final StatusRecorder statusRecorder; + private final SparkAppSubmissionWorker submissionWorker; + private final SparkAppStatusRecorder sparkAppStatusRecorder; private final SentinelManager sentinelManager; @Override @@ -92,11 +94,12 @@ public UpdateControl reconcile(SparkApplication sparkApplicati return UpdateControl.noUpdate(); } log.debug("Start reconciliation."); - statusRecorder.updateStatusFromCache(sparkApplication); - SparkApplicationContext ctx = new SparkApplicationContext(sparkApplication, context); + sparkAppStatusRecorder.updateStatusFromCache(sparkApplication); + SparkAppContext ctx = new SparkAppContext(sparkApplication, context, + submissionWorker); List reconcileSteps = getReconcileSteps(sparkApplication); for (AppReconcileStep step : reconcileSteps) { - ReconcileProgress progress = step.reconcile(ctx, statusRecorder); + ReconcileProgress progress = step.reconcile(ctx, sparkAppStatusRecorder); if (progress.isCompleted()) { return SparkReconcilerUtils.toUpdateControl(sparkApplication, progress); } @@ -201,13 +204,14 @@ public DeleteControl cleanup(SparkApplication sparkApplication, try { trackedMDC.set(sparkApplication); log.info("Cleaning up resources for SparkApp."); - SparkApplicationContext ctx = new SparkApplicationContext(sparkApplication, context); + SparkAppContext ctx = new SparkAppContext(sparkApplication, context, + submissionWorker); List cleanupSteps = new ArrayList<>(); cleanupSteps.add(new AppValidateStep()); cleanupSteps.add(new AppTerminatedStep()); - cleanupSteps.add(new AppCleanUpStep(ApplicationStatusUtils::appCancelled)); + cleanupSteps.add(new AppCleanUpStep(SparkAppStatusUtils::appCancelled)); for (AppReconcileStep step : cleanupSteps) { - ReconcileProgress progress = step.reconcile(ctx, statusRecorder); + ReconcileProgress progress = step.reconcile(ctx, sparkAppStatusRecorder); if (progress.isCompleted()) { if (progress.isRequeue()) { return DeleteControl.noFinalizerRemoval().rescheduleAfter( @@ -221,7 +225,7 @@ public DeleteControl cleanup(SparkApplication sparkApplication, log.info("Cleanup completed"); trackedMDC.reset(); } - statusRecorder.removeCachedStatus(sparkApplication); + sparkAppStatusRecorder.removeCachedStatus(sparkApplication); return deleteControl; } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java index 0b2cffe6..a6e61426 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/observers/AppDriverTimeoutObserver.java @@ -28,7 +28,7 @@ import org.apache.spark.kubernetes.operator.spec.ApplicationTimeoutConfig; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStatus; -import org.apache.spark.kubernetes.operator.utils.ApplicationStatusUtils; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusUtils; /** * Observes driver status and time-out as configured in app spec @@ -65,16 +65,16 @@ public Optional observe(Pod driver, switch (currentStatus.getCurrentState().getCurrentStateSummary()) { case DRIVER_REQUESTED: timeoutThreshold = timeoutConfig.getDriverStartTimeoutMillis(); - supplier = ApplicationStatusUtils::driverLaunchTimedOut; + supplier = SparkAppStatusUtils::driverLaunchTimedOut; break; case DRIVER_STARTED: timeoutThreshold = timeoutConfig.getSparkSessionStartTimeoutMillis(); - supplier = ApplicationStatusUtils::driverReadyTimedOut; + supplier = SparkAppStatusUtils::driverReadyTimedOut; break; case DRIVER_READY: case INITIALIZED_BELOW_THRESHOLD_EXECUTORS: timeoutThreshold = timeoutConfig.getExecutorStartTimeoutMillis(); - supplier = ApplicationStatusUtils::executorLaunchTimedOut; + supplier = SparkAppStatusUtils::executorLaunchTimedOut; break; default: // No timeout check needed for other states diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java index d9dd3d6c..8e214e6e 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppCleanUpStep.java @@ -32,9 +32,9 @@ import org.apache.commons.lang3.StringUtils; import org.apache.spark.kubernetes.operator.config.SparkOperatorConf; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.controller.SparkAppContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; -import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconcileUtils; +import org.apache.spark.kubernetes.operator.reconciler.SparkAppReconcileUtils; import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; import org.apache.spark.kubernetes.operator.spec.ApplicationTolerations; import org.apache.spark.kubernetes.operator.spec.ResourceRetentionPolicy; @@ -42,7 +42,7 @@ import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; import org.apache.spark.kubernetes.operator.status.ApplicationStatus; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; /** * Cleanup all secondary resources when application is deleted, or at the end of each attempt @@ -55,11 +55,11 @@ public class AppCleanUpStep extends AppReconcileStep { private Supplier cleanUpSuccessStateSupplier; @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - ApplicationStatus currentStatus = context.getSparkApplication().getStatus(); + public ReconcileProgress reconcile(SparkAppContext context, + SparkAppStatusRecorder statusRecorder) { + ApplicationStatus currentStatus = context.getResource().getStatus(); ApplicationTolerations tolerations = - context.getSparkApplication().getSpec().getApplicationTolerations(); + context.getResource().getSpec().getApplicationTolerations(); ResourceRetentionPolicy resourceRetentionPolicy = tolerations.getResourceRetentionPolicy(); String stateMessage = null; @@ -110,8 +110,7 @@ public ReconcileProgress reconcile(SparkApplicationContext context, Optional driver = context.getDriverPod(); driver.ifPresent(resourcesToRemove::add); } - boolean forceDelete = - SparkApplicationReconcileUtils.enableForceDelete(context.getSparkApplication()); + boolean forceDelete = SparkAppReconcileUtils.enableForceDelete(context.getResource()); for (HasMetadata resource : resourcesToRemove) { SparkReconcilerUtils.deleteResourceIfExists(context.getClient(), resource, forceDelete); } @@ -154,8 +153,8 @@ protected boolean retainReleaseResource(ResourceRetentionPolicy resourceRetentio } } - private ReconcileProgress updateStateAndProceed(SparkApplicationContext context, - StatusRecorder statusRecorder, + private ReconcileProgress updateStateAndProceed(SparkAppContext context, + SparkAppStatusRecorder statusRecorder, ApplicationStatus updatedStatus, long requeueAfterMillis) { statusRecorder.persistStatus(context, updatedStatus); diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java index 38bb7d62..28647ea1 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppInitStep.java @@ -30,14 +30,14 @@ import org.apache.spark.kubernetes.operator.Constants; import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.controller.SparkAppContext; import org.apache.spark.kubernetes.operator.decorators.DriverResourceDecorator; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; import org.apache.spark.kubernetes.operator.reconciler.SparkReconcilerUtils; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; import org.apache.spark.kubernetes.operator.status.ApplicationStatus; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; import static org.apache.spark.kubernetes.operator.Constants.ScheduleFailureMessage; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndDefaultRequeue; @@ -51,13 +51,13 @@ @Slf4j public class AppInitStep extends AppReconcileStep { @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - ApplicationState currentState = context.getSparkApplication().getStatus().getCurrentState(); + public ReconcileProgress reconcile(SparkAppContext context, + SparkAppStatusRecorder statusRecorder) { + ApplicationState currentState = context.getResource().getStatus().getCurrentState(); if (!currentState.getCurrentStateSummary().isInitializing()) { return proceed(); } - SparkApplication app = context.getSparkApplication(); + SparkApplication app = context.getResource(); if (app.getStatus().getPreviousAttemptSummary() != null) { Instant lastTransitionTime = Instant.parse(currentState.getLastTransitionTime()); Instant restartTime = lastTransitionTime.plusMillis( @@ -102,7 +102,7 @@ public ReconcileProgress reconcile(SparkApplicationContext context, } } } - ApplicationStatus updatedStatus = context.getSparkApplication().getStatus() + ApplicationStatus updatedStatus = context.getResource().getStatus() .appendNewState(new ApplicationState(ApplicationStateSummary.DRIVER_REQUESTED, Constants.DriverRequestedMessage)); statusRecorder.persistStatus(context, updatedStatus); @@ -114,21 +114,21 @@ public ReconcileProgress reconcile(SparkApplicationContext context, String errorMessage = ScheduleFailureMessage + " StackTrace: " + buildGeneralErrorMessage(e); - statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() + statusRecorder.persistStatus(context, context.getResource().getStatus() .appendNewState(new ApplicationState(ApplicationStateSummary.SCHEDULING_FAILURE, errorMessage))); return completeAndImmediateRequeue(); } } - private void updateStatusForCreationFailure(SparkApplicationContext context, + private void updateStatusForCreationFailure(SparkAppContext context, HasMetadata resourceSpec, - StatusRecorder statusRecorder) { + SparkAppStatusRecorder statusRecorder) { if (log.isErrorEnabled()) { log.error("Failed all attempts to request driver resource {}.", resourceSpec.getMetadata()); } - statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() + statusRecorder.persistStatus(context, context.getResource().getStatus() .appendNewState(new ApplicationState(ApplicationStateSummary.SCHEDULING_FAILURE, "Failed to request resource for driver with kind: " + resourceSpec.getKind() diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java index 8e792901..6508105e 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppReconcileStep.java @@ -25,29 +25,29 @@ import io.fabric8.kubernetes.api.model.Pod; import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.controller.SparkAppContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; import org.apache.spark.kubernetes.operator.reconciler.observers.BaseAppDriverObserver; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStatus; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndImmediateRequeue; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.proceed; -import static org.apache.spark.kubernetes.operator.utils.ApplicationStatusUtils.driverUnexpectedRemoved; +import static org.apache.spark.kubernetes.operator.utils.SparkAppStatusUtils.driverUnexpectedRemoved; /** * Basic reconcile step for application */ public abstract class AppReconcileStep { - public abstract ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder); + public abstract ReconcileProgress reconcile(SparkAppContext context, + SparkAppStatusRecorder statusRecorder); - protected ReconcileProgress observeDriver(final SparkApplicationContext context, - final StatusRecorder statusRecorder, + protected ReconcileProgress observeDriver(final SparkAppContext context, + final SparkAppStatusRecorder statusRecorder, final List observers) { Optional driverPodOptional = context.getDriverPod(); - SparkApplication app = context.getSparkApplication(); + SparkApplication app = context.getResource(); ApplicationStatus currentStatus = app.getStatus(); if (driverPodOptional.isPresent()) { List stateUpdates = observers.stream() diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java index b61662bb..57937ee0 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppResourceObserveStep.java @@ -22,10 +22,10 @@ import lombok.RequiredArgsConstructor; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.controller.SparkAppContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; import org.apache.spark.kubernetes.operator.reconciler.observers.BaseAppDriverObserver; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; /** * Observes secondary resource and update app status if needed @@ -36,8 +36,8 @@ public class AppResourceObserveStep extends AppReconcileStep { private final List observers; @Override - public ReconcileProgress reconcile(final SparkApplicationContext context, - final StatusRecorder statusRecorder) { + public ReconcileProgress reconcile(final SparkAppContext context, + final SparkAppStatusRecorder statusRecorder) { return observeDriver(context, statusRecorder, observers); } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java index 651cb1ba..477e4b73 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppRunningStep.java @@ -24,14 +24,14 @@ import io.fabric8.kubernetes.api.model.Pod; import org.apache.spark.kubernetes.operator.Constants; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.controller.SparkAppContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; import org.apache.spark.kubernetes.operator.reconciler.observers.AppDriverRunningObserver; import org.apache.spark.kubernetes.operator.spec.InstanceConfig; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; import org.apache.spark.kubernetes.operator.utils.PodUtils; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndDefaultRequeue; @@ -40,17 +40,14 @@ */ public class AppRunningStep extends AppReconcileStep { @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - InstanceConfig instanceConfig = - context.getSparkApplication().getSpec().getApplicationTolerations() + public ReconcileProgress reconcile(SparkAppContext context, + SparkAppStatusRecorder statusRecorder) { + InstanceConfig instanceConfig = context.getResource().getSpec().getApplicationTolerations() .getInstanceConfig(); - ApplicationStateSummary prevStateSummary = - context.getSparkApplication().getStatus().getCurrentState() + ApplicationStateSummary prevStateSummary = context.getResource().getStatus().getCurrentState() .getCurrentStateSummary(); ApplicationStateSummary proposedStateSummary; - String stateMessage = - context.getSparkApplication().getStatus().getCurrentState().getMessage(); + String stateMessage = context.getResource().getStatus().getCurrentState().getMessage(); if (instanceConfig == null || instanceConfig.getInitExecutors() == 0L || (!prevStateSummary.isStarting() && instanceConfig.getMinExecutors() == 0L)) { @@ -85,7 +82,7 @@ public ReconcileProgress reconcile(SparkApplicationContext context, } } if (!proposedStateSummary.equals(prevStateSummary)) { - statusRecorder.persistStatus(context, context.getSparkApplication().getStatus() + statusRecorder.persistStatus(context, context.getResource().getStatus() .appendNewState(new ApplicationState(proposedStateSummary, stateMessage))); return completeAndDefaultRequeue(); } else { diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java index d5d91868..b16a6be9 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppTerminatedStep.java @@ -18,9 +18,9 @@ package org.apache.spark.kubernetes.operator.reconciler.reconcilesteps; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.controller.SparkAppContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.proceed; @@ -29,11 +29,11 @@ */ public class AppTerminatedStep extends AppReconcileStep { @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - if (context.getSparkApplication().getStatus().getCurrentState().getCurrentStateSummary() + public ReconcileProgress reconcile(SparkAppContext context, + SparkAppStatusRecorder statusRecorder) { + if (context.getResource().getStatus().getCurrentState().getCurrentStateSummary() .isTerminated()) { - statusRecorder.removeCachedStatus(context.getSparkApplication()); + statusRecorder.removeCachedStatus(context.getResource()); return ReconcileProgress.completeAndNoRequeue(); } return proceed(); diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java index afd0c755..76fbbf77 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/AppValidateStep.java @@ -20,17 +20,17 @@ import lombok.extern.slf4j.Slf4j; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.controller.SparkAppContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; import org.apache.spark.kubernetes.operator.spec.DeploymentMode; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; import org.apache.spark.kubernetes.operator.status.ApplicationStatus; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.completeAndImmediateRequeue; import static org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress.proceed; -import static org.apache.spark.kubernetes.operator.utils.ApplicationStatusUtils.isValidApplicationStatus; +import static org.apache.spark.kubernetes.operator.utils.SparkAppStatusUtils.isValidApplicationStatus; /** * Validates the submitted app. This can be re-factored into webhook in future. @@ -38,17 +38,17 @@ @Slf4j public class AppValidateStep extends AppReconcileStep { @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { - if (!isValidApplicationStatus(context.getSparkApplication())) { + public ReconcileProgress reconcile(SparkAppContext context, + SparkAppStatusRecorder statusRecorder) { + if (!isValidApplicationStatus(context.getResource())) { log.warn("Spark application found with empty status. Resetting to initial state."); statusRecorder.persistStatus(context, new ApplicationStatus()); } - if (DeploymentMode.ClientMode.equals(context.getSparkApplication().getSpec())) { + if (DeploymentMode.ClientMode.equals(context.getResource().getSpec())) { ApplicationState failure = new ApplicationState(ApplicationStateSummary.FAILED, "Client mode is not supported yet."); statusRecorder.persistStatus(context, - context.getSparkApplication().getStatus().appendNewState(failure)); + context.getResource().getStatus().appendNewState(failure)); return completeAndImmediateRequeue(); } return proceed(); diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java index 37200e02..199abde7 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/reconciler/reconcilesteps/UnknownStateStep.java @@ -22,11 +22,11 @@ import io.fabric8.kubernetes.api.model.Pod; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.controller.SparkAppContext; import org.apache.spark.kubernetes.operator.reconciler.ReconcileProgress; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; import static org.apache.spark.kubernetes.operator.Constants.UnknownStateMessage; @@ -35,14 +35,13 @@ */ public class UnknownStateStep extends AppReconcileStep { @Override - public ReconcileProgress reconcile(SparkApplicationContext context, - StatusRecorder statusRecorder) { + public ReconcileProgress reconcile(SparkAppContext context, + SparkAppStatusRecorder statusRecorder) { ApplicationState state = new ApplicationState(ApplicationStateSummary.FAILED, UnknownStateMessage); Optional driver = context.getDriverPod(); driver.ifPresent(pod -> state.setLastObservedDriverStatus(pod.getStatus())); - statusRecorder.persistStatus(context, - context.getSparkApplication().getStatus().appendNewState(state)); + statusRecorder.persistStatus(context, context.getResource().getStatus().appendNewState(state)); return ReconcileProgress.completeAndImmediateRequeue(); } } diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkAppStatusRecorder.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkAppStatusRecorder.java new file mode 100644 index 00000000..5edef8ab --- /dev/null +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkAppStatusRecorder.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.spark.kubernetes.operator.utils; + +import java.util.List; + +import org.apache.spark.kubernetes.operator.SparkApplication; +import org.apache.spark.kubernetes.operator.listeners.SparkAppStatusListener; +import org.apache.spark.kubernetes.operator.status.ApplicationStatus; + +public class SparkAppStatusRecorder extends StatusRecorder { + public SparkAppStatusRecorder(List statusListeners) { + super(statusListeners, ApplicationStatus.class, SparkApplication.class); + } +} diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkAppStatusUtils.java similarity index 98% rename from spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java rename to spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkAppStatusUtils.java index 7817ad2f..1307d0ef 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/ApplicationStatusUtils.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/SparkAppStatusUtils.java @@ -26,7 +26,7 @@ /** * Handy utils for create & manage Application Status */ -public class ApplicationStatusUtils { +public class SparkAppStatusUtils { public static boolean isValidApplicationStatus(SparkApplication app) { // null check diff --git a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java index 56b055b7..23499c2b 100644 --- a/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java +++ b/spark-operator/src/main/java/org/apache/spark/kubernetes/operator/utils/StatusRecorder.java @@ -22,7 +22,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; -import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import io.fabric8.kubernetes.client.KubernetesClient; @@ -31,10 +30,10 @@ import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; -import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; -import org.apache.spark.kubernetes.operator.listeners.ApplicationStatusListener; -import org.apache.spark.kubernetes.operator.status.ApplicationStatus; +import org.apache.spark.kubernetes.operator.BaseResource; +import org.apache.spark.kubernetes.operator.controller.BaseContext; +import org.apache.spark.kubernetes.operator.listeners.BaseStatusListener; +import org.apache.spark.kubernetes.operator.status.BaseStatus; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.StatusPatchFailureBackoffSeconds; import static org.apache.spark.kubernetes.operator.config.SparkOperatorConf.StatusPatchMaxRetry; @@ -42,20 +41,28 @@ /** *

  * Note - this is inspired by
- * Flink Operator Status Recorder
+ * Flink Operator Status Recorder
  * 
* Enables additional (extendable) observers for Spark App status. * Cache & version locking might be removed in future version as batch app does not expect * spec change after submitted. */ @Slf4j -public class StatusRecorder { - protected final List appStatusListeners; +public class StatusRecorder, + CR extends BaseResource, + LISTENER extends BaseStatusListener> { + protected final List statusListeners; protected final ObjectMapper objectMapper = new ObjectMapper(); + protected final Class statusClass; + protected final Class resourceClass; protected final ConcurrentHashMap statusCache; - public StatusRecorder(List appStatusListeners) { - this.appStatusListeners = appStatusListeners; + protected StatusRecorder(List statusListeners, + Class statusClass, + Class resourceClass) { + this.statusListeners = statusListeners; + this.statusClass = statusClass; + this.resourceClass = resourceClass; this.statusCache = new ConcurrentHashMap<>(); } @@ -68,7 +75,7 @@ public StatusRecorder(List appStatusListeners) { * @param resource Resource for which status update should be performed */ @SneakyThrows - private void patchAndCacheStatus(SparkApplication resource, KubernetesClient client) { + private void patchAndStatusWithVersionLocked(CR resource, KubernetesClient client) { ObjectNode newStatusNode = objectMapper.convertValue(resource.getStatus(), ObjectNode.class); ResourceID resourceId = ResourceID.fromResource(resource); @@ -79,17 +86,18 @@ private void patchAndCacheStatus(SparkApplication resource, KubernetesClient cli return; } - ApplicationStatus prevStatus = - objectMapper.convertValue(previousStatusNode, ApplicationStatus.class); + STATUS prevStatus = objectMapper.convertValue(previousStatusNode, statusClass); Exception err = null; - for (long i = 0; i < StatusPatchMaxRetry.getValue(); i++) { + long maxRetry = StatusPatchMaxRetry.getValue(); + for (long i = 0; i < maxRetry; i++) { // We retry the status update 3 times to avoid some intermittent connectivity errors try { - replaceStatus(resource, prevStatus, client); + var updated = client.resource(resource).lockResourceVersion().updateStatus(); + resource.getMetadata().setResourceVersion(updated.getMetadata().getResourceVersion()); err = null; } catch (KubernetesClientException e) { - log.error("Error while patching status, retrying {}/3...", (i + 1), e); + log.error("Error while patching status, retrying {}/{}...", (i + 1), maxRetry, e); Thread.sleep( TimeUnit.SECONDS.toMillis(StatusPatchFailureBackoffSeconds.getValue())); err = e; @@ -101,72 +109,15 @@ private void patchAndCacheStatus(SparkApplication resource, KubernetesClient cli } statusCache.put(resourceId, newStatusNode); - appStatusListeners.forEach(listener -> { + statusListeners.forEach(listener -> { listener.listenStatus(resource, prevStatus, resource.getStatus()); }); } - public void persistStatus(SparkApplicationContext context, - ApplicationStatus newStatus) { - context.getSparkApplication().setStatus(newStatus); - patchAndCacheStatus(context.getSparkApplication(), context.getClient()); - } - - private void replaceStatus(SparkApplication resource, ApplicationStatus prevStatus, - KubernetesClient client) - throws JsonProcessingException { - int retries = 0; - while (true) { - try { - var updated = client.resource(resource).lockResourceVersion().updateStatus(); - - // If we successfully replaced the status, update the resource version so we know - // what to lock next in the same reconciliation loop - resource.getMetadata() - .setResourceVersion(updated.getMetadata().getResourceVersion()); - return; - } catch (KubernetesClientException kce) { - // 409 is the error code for conflicts resulting from the locking - if (kce.getCode() == 409) { - var currentVersion = resource.getMetadata().getResourceVersion(); - log.debug( - "Could not apply status update for resource version {}", - currentVersion); - - var latest = client.resource(resource).get(); - var latestVersion = latest.getMetadata().getResourceVersion(); - - if (latestVersion.equals(currentVersion)) { - // This should not happen as long as the client works consistently - log.error("Unable to fetch latest resource version"); - throw kce; - } - - if (latest.getStatus().equals(prevStatus)) { - if (retries++ < 3) { - log.debug( - "Retrying status update for latest version {}", latestVersion); - resource.getMetadata().setResourceVersion(latestVersion); - } else { - // If we cannot get the latest version in 3 tries we throw the error to - // retry with delay - throw kce; - } - } else { - throw new RuntimeException( - "Status have been modified externally in version " - + latestVersion - + " Previous: " - + objectMapper.writeValueAsString(prevStatus) - + " Latest: " - + objectMapper.writeValueAsString(latest.getStatus()), kce); - } - } else { - // We simply throw non conflict errors, to trigger retry with delay - throw kce; - } - } - } + public void persistStatus(BaseContext context, + STATUS newStatus) { + context.getResource().setStatus(newStatus); + patchAndStatusWithVersionLocked(context.getResource(), context.getClient()); } /** @@ -179,13 +130,12 @@ private void replaceStatus(SparkApplication resource, ApplicationStatus prevStat * * @param resource Resource for which the status should be updated from the cache */ - public void updateStatusFromCache(SparkApplication resource) { + public void updateStatusFromCache(CR resource) { var key = ResourceID.fromResource(resource); var cachedStatus = statusCache.get(key); if (cachedStatus != null) { resource.setStatus( - objectMapper.convertValue( - cachedStatus, resource.getStatus().getClass())); + objectMapper.convertValue(cachedStatus, statusClass)); } else { // Initialize cache with current status copy statusCache.put(key, objectMapper.convertValue(resource.getStatus(), ObjectNode.class)); @@ -195,7 +145,7 @@ public void updateStatusFromCache(SparkApplication resource) { /** * Remove cached status */ - public void removeCachedStatus(SparkApplication resource) { + public void removeCachedStatus(CR resource) { statusCache.remove(ResourceID.fromResource(resource)); } } diff --git a/spark-operator/src/main/resources/spark-pi.yaml b/spark-operator/src/main/resources/spark-pi.yaml index a93871c3..c7e3ae4b 100644 --- a/spark-operator/src/main/resources/spark-pi.yaml +++ b/spark-operator/src/main/resources/spark-pi.yaml @@ -26,7 +26,6 @@ spec: sparkConf: spark.executor.instances: "5" spark.kubernetes.container.image: "spark:3.5.1-scala2.12-java11-python3-r-ubuntu" - spark.kubernetes.namespace: "spark-test" spark.kubernetes.authenticate.driver.serviceAccountName: "spark" applicationTolerations: resourceRetentionPolicy: RetainOnFailure diff --git a/spark-operator/src/main/resources/streaming.yaml b/spark-operator/src/main/resources/streaming.yaml index 4c9ebe99..89513e7d 100644 --- a/spark-operator/src/main/resources/streaming.yaml +++ b/spark-operator/src/main/resources/streaming.yaml @@ -83,7 +83,6 @@ spec: sparkConf: spark.executor.instances: "5" spark.kubernetes.container.image: "spark:3.5.1-scala2.12-java11-python3-r-ubuntu" - spark.kubernetes.namespace: "spark-test" spark.kubernetes.authenticate.driver.serviceAccountName: "spark" spark.kubernetes.driver.podTemplateContainerName: "main-container" runtimeVersions: diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java index 14202360..254829e3 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/metrics/source/OperatorJosdkMetricsTest.java @@ -36,7 +36,7 @@ import org.junit.jupiter.api.Test; import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.reconciler.SparkApplicationReconciler; +import org.apache.spark.kubernetes.operator.reconciler.SparkAppReconciler; class OperatorJosdkMetricsTest { public static final String DEFAULT_NAMESPACE = "default"; @@ -46,7 +46,7 @@ class OperatorJosdkMetricsTest { private static final Map metadata = Map.of(Constants.RESOURCE_GVK_KEY, GroupVersionKind.gvkFor(SparkApplication.class), Constants.CONTROLLER_NAME, "test-controller-name"); - private static final String controllerName = SparkApplicationReconciler.class.getSimpleName(); + private static final String controllerName = SparkAppReconciler.class.getSimpleName(); private OperatorJosdkMetrics operatorMetrics; @@ -63,13 +63,13 @@ void testTimeControllerExecution() throws Exception { Map metrics = operatorMetrics.metricRegistry().getMetrics(); Assertions.assertEquals(4, metrics.size()); Assertions.assertTrue( - metrics.containsKey("sparkapplication.sparkapplicationreconciler.reconcile.both")); + metrics.containsKey("sparkapplication.sparkappreconciler.reconcile.both")); Assertions.assertTrue(metrics.containsKey( - "sparkapplication.testns.sparkapplicationreconciler.reconcile.both")); + "sparkapplication.testns.sparkappreconciler.reconcile.both")); Assertions.assertTrue(metrics.containsKey( - "sparkapplication.sparkapplicationreconciler.reconcile.success.both")); + "sparkapplication.sparkappreconciler.reconcile.success.both")); Assertions.assertTrue(metrics.containsKey( - "sparkapplication.testns.sparkapplicationreconciler.reconcile.success.both")); + "sparkapplication.testns.sparkappreconciler.reconcile.success.both")); var failedExecution = new FooTestingExecutionBase<>(); try { @@ -78,14 +78,14 @@ void testTimeControllerExecution() throws Exception { Assertions.assertEquals(e.getMessage(), "Foo exception"); Assertions.assertEquals(8, metrics.size()); Assertions.assertTrue(metrics.containsKey( - "sparkapplication.sparkapplicationreconciler.reconcile.failure")); + "sparkapplication.sparkappreconciler.reconcile.failure")); Assertions.assertTrue(metrics.containsKey( - "sparkapplication.sparkapplicationreconciler.reconcile.failure.exception" + + "sparkapplication.sparkappreconciler.reconcile.failure.exception" + ".nosuchfieldexception")); Assertions.assertTrue(metrics.containsKey( - "sparkapplication.testns.sparkapplicationreconciler.reconcile.failure")); + "sparkapplication.testns.sparkappreconciler.reconcile.failure")); Assertions.assertTrue(metrics.containsKey( - "sparkapplication.testns.sparkapplicationreconciler.reconcile.failure." + + "sparkapplication.testns.sparkappreconciler.reconcile.failure." + "exception.nosuchfieldexception")); } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconcileUtilsTest.java similarity index 54% rename from spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java rename to spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconcileUtilsTest.java index d6ac27d1..faae3fa5 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcileUtilsTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconcileUtilsTest.java @@ -28,11 +28,9 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.apache.spark.kubernetes.operator.ApplicationClientWorker; -import org.apache.spark.kubernetes.operator.ApplicationResourceSpec; +import org.apache.spark.kubernetes.operator.SparkAppSubmissionWorker; +import org.apache.spark.kubernetes.operator.SparkAppResourceSpec; import org.apache.spark.kubernetes.operator.SparkApplication; import static org.mockito.ArgumentMatchers.any; @@ -41,7 +39,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -class SparkApplicationReconcileUtilsTest { +class SparkAppReconcileUtilsTest { @Test void testForceDeleteEnabled() { @@ -50,7 +48,7 @@ void testForceDeleteEnabled() { Instant.now().minusSeconds(5).toString()); app.getSpec().getApplicationTolerations().getApplicationTimeoutConfig() .setForceTerminationGracePeriodMillis(3000L); - Assertions.assertTrue(SparkApplicationReconcileUtils.enableForceDelete(app)); + Assertions.assertTrue(SparkAppReconcileUtils.enableForceDelete(app)); } @Test @@ -64,32 +62,24 @@ void testBuildResourceSpecCoversBasicOverride() { KubernetesClient mockClient = mock(KubernetesClient.class); Pod mockDriver = mock(Pod.class); when(mockDriver.getMetadata()).thenReturn(new ObjectMeta()); - try (MockedStatic worker = - Mockito.mockStatic(ApplicationClientWorker.class)) { - ApplicationResourceSpec mockSpec = mock(ApplicationResourceSpec.class); - when(mockSpec.getConfiguredPod()).thenReturn(mockDriver); - ArgumentCaptor> captor = ArgumentCaptor.forClass(Map.class); - worker.when(() -> ApplicationClientWorker.getResourceSpec( - any(), any(), captor.capture())).thenReturn(mockSpec); - ApplicationResourceSpec spec = SparkApplicationReconcileUtils.buildResourceSpec(app, - mockClient); - worker.verify(() -> ApplicationClientWorker.getResourceSpec(eq(app), - eq(mockClient), any())); - Map props = captor.getValue(); - Assertions.assertTrue(props.containsKey("spark.kubernetes.namespace")); - Assertions.assertEquals("foo", props.get("spark.kubernetes.namespace")); - ArgumentCaptor metaArgumentCaptor = - ArgumentCaptor.forClass(ObjectMeta.class); - verify(mockDriver).setMetadata(metaArgumentCaptor.capture()); - Assertions.assertEquals(mockSpec, spec); - ObjectMeta metaOverride = metaArgumentCaptor.getValue(); - Assertions.assertEquals(1, metaOverride.getOwnerReferences().size()); - Assertions.assertEquals("bar-app", - metaOverride.getOwnerReferences().get(0).getName()); - Assertions.assertEquals("uid", - metaOverride.getOwnerReferences().get(0).getUid()); - Assertions.assertEquals(app.getKind(), - metaOverride.getOwnerReferences().get(0).getKind()); - } + SparkAppResourceSpec mockSpec = mock(SparkAppResourceSpec.class); + when(mockSpec.getConfiguredPod()).thenReturn(mockDriver); + ArgumentCaptor> captor = ArgumentCaptor.forClass(Map.class); + SparkAppSubmissionWorker mockWorker = mock(SparkAppSubmissionWorker.class); + when(mockWorker.getResourceSpec(any(), any(), captor.capture())).thenReturn(mockSpec); + SparkAppResourceSpec spec = SparkAppReconcileUtils.buildResourceSpec(app, mockClient, + mockWorker); + verify(mockWorker).getResourceSpec(eq(app), eq(mockClient), any()); + Map props = captor.getValue(); + Assertions.assertTrue(props.containsKey("spark.kubernetes.namespace")); + Assertions.assertEquals("foo", props.get("spark.kubernetes.namespace")); + ArgumentCaptor metaArgumentCaptor = ArgumentCaptor.forClass(ObjectMeta.class); + verify(mockDriver).setMetadata(metaArgumentCaptor.capture()); + Assertions.assertEquals(mockSpec, spec); + ObjectMeta metaOverride = metaArgumentCaptor.getValue(); + Assertions.assertEquals(1, metaOverride.getOwnerReferences().size()); + Assertions.assertEquals("bar-app", metaOverride.getOwnerReferences().get(0).getName()); + Assertions.assertEquals("uid", metaOverride.getOwnerReferences().get(0).getUid()); + Assertions.assertEquals(app.getKind(), metaOverride.getOwnerReferences().get(0).getKind()); } } diff --git a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconcilerTest.java similarity index 83% rename from spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java rename to spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconcilerTest.java index 7f4212cb..bc50e394 100644 --- a/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkApplicationReconcilerTest.java +++ b/spark-operator/src/test/java/org/apache/spark/kubernetes/operator/reconciler/SparkAppReconcilerTest.java @@ -32,13 +32,14 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; +import org.apache.spark.kubernetes.operator.SparkAppSubmissionWorker; import org.apache.spark.kubernetes.operator.SparkApplication; -import org.apache.spark.kubernetes.operator.controller.SparkApplicationContext; +import org.apache.spark.kubernetes.operator.controller.SparkAppContext; import org.apache.spark.kubernetes.operator.health.SentinelManager; import org.apache.spark.kubernetes.operator.status.ApplicationState; import org.apache.spark.kubernetes.operator.status.ApplicationStateSummary; import org.apache.spark.kubernetes.operator.status.ApplicationStatus; -import org.apache.spark.kubernetes.operator.utils.StatusRecorder; +import org.apache.spark.kubernetes.operator.utils.SparkAppStatusRecorder; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -47,14 +48,15 @@ import static org.mockito.Mockito.mockConstruction; import static org.mockito.Mockito.when; -class SparkApplicationReconcilerTest { - private StatusRecorder mockRecorder = mock(StatusRecorder.class); +class SparkAppReconcilerTest { + private SparkAppStatusRecorder mockRecorder = mock(SparkAppStatusRecorder.class); private SentinelManager mockSentinelManager = mock(SentinelManager.class); private KubernetesClient mockClient = mock(KubernetesClient.class); private Context mockContext = mock(Context.class); private Pod mockDriver = mock(Pod.class); + private SparkAppSubmissionWorker mockWorker = mock(SparkAppSubmissionWorker.class); SparkApplication app = new SparkApplication(); - SparkApplicationReconciler reconciler = new SparkApplicationReconciler(mockRecorder, + SparkAppReconciler reconciler = new SparkAppReconciler(mockWorker, mockRecorder, mockSentinelManager); @BeforeEach @@ -64,15 +66,15 @@ public void beforeEach() { doAnswer(invocation -> { app.setStatus(invocation.getArgument(1)); return null; - }).when(mockRecorder).persistStatus(any(SparkApplicationContext.class), + }).when(mockRecorder).persistStatus(any(SparkAppContext.class), any(ApplicationStatus.class)); } @Test void testCleanupRunningApp() { - try (MockedConstruction mockAppContext = mockConstruction( - SparkApplicationContext.class, (mock, context) -> { - when(mock.getSparkApplication()).thenReturn(app); + try (MockedConstruction mockAppContext = mockConstruction( + SparkAppContext.class, (mock, context) -> { + when(mock.getResource()).thenReturn(app); when(mock.getClient()).thenReturn(mockClient); when(mock.getDriverPod()).thenReturn(Optional.of(mockDriver)); when(mock.getDriverPodSpec()).thenReturn(mockDriver); diff --git a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/SparkAppDriverConf.java similarity index 67% rename from spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java rename to spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/SparkAppDriverConf.java index 9b96a88f..42b44432 100644 --- a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationDriverConf.java +++ b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/SparkAppDriverConf.java @@ -27,27 +27,26 @@ import org.apache.spark.deploy.k8s.submit.KubernetesClientUtils; import org.apache.spark.deploy.k8s.submit.MainAppResource; -public class ApplicationDriverConf extends KubernetesDriverConf { - private ApplicationDriverConf(SparkConf sparkConf, - String appId, - MainAppResource mainAppResource, - String mainClass, - String[] appArgs, - Option proxyUser) { +public class SparkAppDriverConf extends KubernetesDriverConf { + private SparkAppDriverConf(SparkConf sparkConf, + String appId, + MainAppResource mainAppResource, + String mainClass, + String[] appArgs, + Option proxyUser) { super(sparkConf, appId, mainAppResource, mainClass, appArgs, proxyUser); } - public static ApplicationDriverConf create(SparkConf sparkConf, - String appId, - MainAppResource mainAppResource, - String mainClass, - String[] appArgs, - Option proxyUser) { + public static SparkAppDriverConf create(SparkConf sparkConf, + String appId, + MainAppResource mainAppResource, + String mainClass, + String[] appArgs, + Option proxyUser) { // pre-create check only KubernetesVolumeUtils.parseVolumesWithPrefix(sparkConf, Config.KUBERNETES_EXECUTOR_VOLUMES_PREFIX()); - return new ApplicationDriverConf(sparkConf, appId, mainAppResource, mainClass, appArgs, - proxyUser); + return new SparkAppDriverConf(sparkConf, appId, mainAppResource, mainClass, appArgs, proxyUser); } /** diff --git a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/SparkAppResourceSpec.java similarity index 94% rename from spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java rename to spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/SparkAppResourceSpec.java index 56e24ea8..0c7256dc 100644 --- a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpec.java +++ b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/SparkAppResourceSpec.java @@ -47,17 +47,17 @@ *

* This is not thread safe */ -public class ApplicationResourceSpec { +public class SparkAppResourceSpec { @Getter private final Pod configuredPod; @Getter private final List driverPreResources; @Getter private final List driverResources; - private final ApplicationDriverConf kubernetesDriverConf; + private final SparkAppDriverConf kubernetesDriverConf; - public ApplicationResourceSpec(ApplicationDriverConf kubernetesDriverConf, - KubernetesDriverSpec kubernetesDriverSpec) { + public SparkAppResourceSpec(SparkAppDriverConf kubernetesDriverConf, + KubernetesDriverSpec kubernetesDriverSpec) { this.kubernetesDriverConf = kubernetesDriverConf; String namespace = kubernetesDriverConf.sparkConf().get(Config.KUBERNETES_NAMESPACE().key()); diff --git a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/SparkAppSubmissionWorker.java similarity index 87% rename from spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java rename to spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/SparkAppSubmissionWorker.java index 631e57e4..8bcd8b22 100644 --- a/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/ApplicationClientWorker.java +++ b/spark-submission-worker/src/main/java/org/apache/spark/kubernetes/operator/SparkAppSubmissionWorker.java @@ -38,17 +38,17 @@ * Similar to org.apache.spark.deploy.k8s.submit.KubernetesClientApplication * this reads args from SparkApplication instead of starting separate spark-submit process */ -public class ApplicationClientWorker { +public class SparkAppSubmissionWorker { - public static ApplicationResourceSpec getResourceSpec( + public SparkAppResourceSpec getResourceSpec( org.apache.spark.kubernetes.operator.SparkApplication app, KubernetesClient client, Map confOverrides) { - ApplicationDriverConf applicationDriverConf = buildDriverConf(app, confOverrides); - return buildResourceSpec(applicationDriverConf, client); + SparkAppDriverConf appDriverConf = buildDriverConf(app, confOverrides); + return buildResourceSpec(appDriverConf, client); } - protected static ApplicationDriverConf buildDriverConf( + protected SparkAppDriverConf buildDriverConf( org.apache.spark.kubernetes.operator.SparkApplication app, Map confOverrides) { ApplicationSpec applicationSpec = app.getSpec(); @@ -76,7 +76,7 @@ protected static ApplicationDriverConf buildDriverConf( } effectiveSparkConf.setMaster( "k8s://https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT"); - return ApplicationDriverConf.create(effectiveSparkConf, + return SparkAppDriverConf.create(effectiveSparkConf, createSparkAppId(app), primaryResource, applicationSpec.getMainClass(), @@ -84,20 +84,20 @@ protected static ApplicationDriverConf buildDriverConf( Option.apply(applicationSpec.getProxyUser())); } - protected static ApplicationResourceSpec buildResourceSpec( - ApplicationDriverConf kubernetesDriverConf, + protected SparkAppResourceSpec buildResourceSpec( + SparkAppDriverConf kubernetesDriverConf, KubernetesClient client) { KubernetesDriverBuilder builder = new KubernetesDriverBuilder(); KubernetesDriverSpec kubernetesDriverSpec = builder.buildFromFeatures(kubernetesDriverConf, client); - return new ApplicationResourceSpec(kubernetesDriverConf, kubernetesDriverSpec); + return new SparkAppResourceSpec(kubernetesDriverConf, kubernetesDriverSpec); } /** * Spark application id need to be deterministic per attempt per Spark App. * This is to ensure operator reconciliation idempotency */ - protected static String createSparkAppId( + protected String createSparkAppId( final org.apache.spark.kubernetes.operator.SparkApplication app) { long attemptId = 0L; if (app.getStatus() != null && app.getStatus().getCurrentAttemptSummary() != null) { diff --git a/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/SparkAppResourceSpecTest.java similarity index 80% rename from spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java rename to spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/SparkAppResourceSpecTest.java index 58e793bb..b8146cdb 100644 --- a/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationResourceSpecTest.java +++ b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/SparkAppResourceSpecTest.java @@ -38,11 +38,11 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -class ApplicationResourceSpecTest { +class SparkAppResourceSpecTest { @Test void testDriverResourceIncludesConfigMap() { - ApplicationDriverConf mockConf = mock(ApplicationDriverConf.class); + SparkAppDriverConf mockConf = mock(SparkAppDriverConf.class); when(mockConf.configMapNameDriver()).thenReturn("foo-configmap"); when(mockConf.sparkConf()).thenReturn( new SparkConf().set("spark.kubernetes.namespace", "foo-namespace")); @@ -74,33 +74,33 @@ void testDriverResourceIncludesConfigMap() { when(mockSpec.pod()).thenReturn(sparkPod); when(mockSpec.systemProperties()).thenReturn(new HashMap<>()); - ApplicationResourceSpec applicationResourceSpec = - new ApplicationResourceSpec(mockConf, mockSpec); + SparkAppResourceSpec appResourceSpec = + new SparkAppResourceSpec(mockConf, mockSpec); - Assertions.assertEquals(1, applicationResourceSpec.getDriverResources().size()); + Assertions.assertEquals(1, appResourceSpec.getDriverResources().size()); Assertions.assertEquals(ConfigMap.class, - applicationResourceSpec.getDriverResources().get(0).getClass()); + appResourceSpec.getDriverResources().get(0).getClass()); ConfigMap proposedConfigMap = - (ConfigMap) applicationResourceSpec.getDriverResources().get(0); + (ConfigMap) appResourceSpec.getDriverResources().get(0); Assertions.assertEquals("foo-configmap", proposedConfigMap.getMetadata().getName()); Assertions.assertEquals("foo-namespace", proposedConfigMap.getData().get("spark.kubernetes.namespace")); Assertions.assertEquals("foo-namespace", proposedConfigMap.getMetadata().getNamespace()); Assertions.assertEquals(2, - applicationResourceSpec.getConfiguredPod().getSpec().getVolumes().size()); + appResourceSpec.getConfiguredPod().getSpec().getVolumes().size()); Volume proposedConfigVolume = - applicationResourceSpec.getConfiguredPod().getSpec().getVolumes().get(1); + appResourceSpec.getConfiguredPod().getSpec().getVolumes().get(1); Assertions.assertEquals("foo-configmap", proposedConfigVolume.getConfigMap().getName()); Assertions.assertEquals(2, - applicationResourceSpec.getConfiguredPod().getSpec().getContainers().size()); + appResourceSpec.getConfiguredPod().getSpec().getContainers().size()); Assertions.assertEquals(2, - applicationResourceSpec.getConfiguredPod().getSpec().getContainers().get(1) + appResourceSpec.getConfiguredPod().getSpec().getContainers().get(1) .getVolumeMounts().size()); VolumeMount proposedConfigVolumeMount = - applicationResourceSpec.getConfiguredPod().getSpec().getContainers().get(1) + appResourceSpec.getConfiguredPod().getSpec().getContainers().get(1) .getVolumeMounts().get(1); Assertions.assertEquals(proposedConfigVolume.getName(), proposedConfigVolumeMount.getName()); diff --git a/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/SparkAppSubmissionWorkerTest.java similarity index 82% rename from spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java rename to spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/SparkAppSubmissionWorkerTest.java index 6fb68721..07b116d1 100644 --- a/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/ApplicationClientWorkerTest.java +++ b/spark-submission-worker/src/test/java/org/apache/spark/kubernetes/operator/SparkAppSubmissionWorkerTest.java @@ -43,12 +43,12 @@ import static org.mockito.Mockito.mockConstruction; import static org.mockito.Mockito.when; -class ApplicationClientWorkerTest { +class SparkAppSubmissionWorkerTest { @Test void buildDriverConfShouldApplySpecAndPropertiesOverride() { - Map> constructorArgs = new HashMap<>(); - try (MockedConstruction mocked = mockConstruction( - ApplicationDriverConf.class, + Map> constructorArgs = new HashMap<>(); + try (MockedConstruction mocked = mockConstruction( + SparkAppDriverConf.class, (mock, context) -> constructorArgs.put(mock, new ArrayList<>(context.arguments())))) { SparkApplication mockApp = mock(SparkApplication.class); @@ -71,8 +71,8 @@ void buildDriverConfShouldApplySpecAndPropertiesOverride() { when(mockSpec.getMainClass()).thenReturn("foo-class"); when(mockSpec.getDriverArgs()).thenReturn(List.of("a", "b")); - ApplicationDriverConf conf = - ApplicationClientWorker.buildDriverConf(mockApp, overrides); + SparkAppSubmissionWorker submissionWorker = new SparkAppSubmissionWorker(); + SparkAppDriverConf conf = submissionWorker.buildDriverConf(mockApp, overrides); Assertions.assertEquals(6, constructorArgs.get(conf).size()); // validate SparkConf with override @@ -102,9 +102,9 @@ void buildDriverConfShouldApplySpecAndPropertiesOverride() { @Test void buildDriverConfForPythonApp() { - Map> constructorArgs = new HashMap<>(); - try (MockedConstruction mocked = mockConstruction( - ApplicationDriverConf.class, + Map> constructorArgs = new HashMap<>(); + try (MockedConstruction mocked = mockConstruction( + SparkAppDriverConf.class, (mock, context) -> constructorArgs.put(mock, new ArrayList<>(context.arguments())))) { SparkApplication mockApp = mock(SparkApplication.class); @@ -117,8 +117,9 @@ void buildDriverConfForPythonApp() { when(mockApp.getMetadata()).thenReturn(appMeta); when(mockSpec.getPyFiles()).thenReturn("foo"); - ApplicationDriverConf conf = - ApplicationClientWorker.buildDriverConf(mockApp, Collections.emptyMap()); + SparkAppSubmissionWorker submissionWorker = new SparkAppSubmissionWorker(); + SparkAppDriverConf conf = + submissionWorker.buildDriverConf(mockApp, Collections.emptyMap()); Assertions.assertEquals(6, constructorArgs.get(conf).size()); // validate main resources @@ -132,9 +133,9 @@ void buildDriverConfForPythonApp() { @Test void buildDriverConfForRApp() { - Map> constructorArgs = new HashMap<>(); - try (MockedConstruction mocked = mockConstruction( - ApplicationDriverConf.class, + Map> constructorArgs = new HashMap<>(); + try (MockedConstruction mocked = mockConstruction( + SparkAppDriverConf.class, (mock, context) -> constructorArgs.put(mock, new ArrayList<>(context.arguments())))) { SparkApplication mockApp = mock(SparkApplication.class); @@ -147,8 +148,9 @@ void buildDriverConfForRApp() { when(mockApp.getMetadata()).thenReturn(appMeta); when(mockSpec.getSparkRFiles()).thenReturn("foo"); - ApplicationDriverConf conf = - ApplicationClientWorker.buildDriverConf(mockApp, Collections.emptyMap()); + SparkAppSubmissionWorker submissionWorker = new SparkAppSubmissionWorker(); + SparkAppDriverConf conf = + submissionWorker.buildDriverConf(mockApp, Collections.emptyMap()); Assertions.assertEquals(6, constructorArgs.get(conf).size()); // validate main resources @@ -179,13 +181,14 @@ void sparkAppIdShouldBeDeterministicPerAppPerAttempt() { when(mockApp1.getStatus()).thenReturn(mockStatus1); when(mockApp2.getStatus()).thenReturn(mockStatus2); - String appId1 = ApplicationClientWorker.createSparkAppId(mockApp1); - String appId2 = ApplicationClientWorker.createSparkAppId(mockApp2); + SparkAppSubmissionWorker submissionWorker = new SparkAppSubmissionWorker(); + String appId1 = submissionWorker.createSparkAppId(mockApp1); + String appId2 = submissionWorker.createSparkAppId(mockApp2); Assertions.assertNotEquals(appId1, appId2); Assertions.assertTrue(appId1.contains(appName1)); // multiple invoke shall give same result - Assertions.assertEquals(appId1, ApplicationClientWorker.createSparkAppId(mockApp1)); + Assertions.assertEquals(appId1, submissionWorker.createSparkAppId(mockApp1)); ApplicationAttemptSummary mockAttempt = mock(ApplicationAttemptSummary.class); AttemptInfo mockAttemptInfo = mock(AttemptInfo.class); @@ -193,10 +196,10 @@ void sparkAppIdShouldBeDeterministicPerAppPerAttempt() { when(mockAttemptInfo.getId()).thenReturn(2L); when(mockStatus1.getCurrentAttemptSummary()).thenReturn(mockAttempt); - String appId1Attempt2 = ApplicationClientWorker.createSparkAppId(mockApp1); + String appId1Attempt2 = submissionWorker.createSparkAppId(mockApp1); Assertions.assertTrue(appId1Attempt2.contains(appName1)); Assertions.assertNotEquals(appId1, appId1Attempt2); - Assertions.assertEquals(appId1Attempt2, ApplicationClientWorker.createSparkAppId(mockApp1)); + Assertions.assertEquals(appId1Attempt2, submissionWorker.createSparkAppId(mockApp1)); } }