-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-18278] [Scheduler] Spark on Kubernetes - Basic Scheduler Backend #19468
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
f6fdd6a
75e31a9
cf82b21
488c535
82b79a7
c052212
c565c9f
2fb596d
992acbe
b0a5839
a4f9797
2b5dcac
018f4d8
4b32134
6cf4ed7
1f271be
71a971f
0ab9ca7
7f14b71
7afce3f
b75b413
3b587b4
cb12fec
ae396cf
f8e3249
a44c29e
4bed817
c386186
b85cfc4
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
- Loading branch information
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -40,13 +40,13 @@ private[spark] object SparkKubernetesClientFactory { | |
| namespace: Option[String], | ||
| kubernetesAuthConfPrefix: String, | ||
| sparkConf: SparkConf, | ||
| maybeServiceAccountToken: Option[File], | ||
| maybeServiceAccountCaCert: Option[File]): KubernetesClient = { | ||
| defaultServiceAccountToken: Option[File], | ||
| defaultServiceAccountCaCert: Option[File]): KubernetesClient = { | ||
| val oauthTokenFileConf = s"$kubernetesAuthConfPrefix.$OAUTH_TOKEN_FILE_CONF_SUFFIX" | ||
| val oauthTokenConf = s"$kubernetesAuthConfPrefix.$OAUTH_TOKEN_CONF_SUFFIX" | ||
| val oauthTokenFile = sparkConf.getOption(oauthTokenFileConf) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why not create constants like for other config options?
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This lacks context from the We intend to have two different sets of authentication options for the Kubernetes API. The first is the credentials for creating a driver pod and all the Kubernetes resources that the application requires outside of executor pods. The second is a set of credentials that the driver can use to create executor pods. These options will have shared suffixes in the configuration keys but different prefixes. The reasoning for two sets of credentials is twofold:
|
||
| .map(new File(_)) | ||
| .orElse(maybeServiceAccountToken) | ||
| .orElse(defaultServiceAccountToken) | ||
| val oauthTokenValue = sparkConf.getOption(oauthTokenConf) | ||
| ConfigurationUtils.requireNandDefined( | ||
| oauthTokenFile, | ||
|
|
@@ -56,7 +56,7 @@ private[spark] object SparkKubernetesClientFactory { | |
|
|
||
| val caCertFile = sparkConf | ||
| .getOption(s"$kubernetesAuthConfPrefix.$CA_CERT_FILE_CONF_SUFFIX") | ||
| .orElse(maybeServiceAccountCaCert.map(_.getAbsolutePath)) | ||
| .orElse(defaultServiceAccountCaCert.map(_.getAbsolutePath)) | ||
| val clientKeyFile = sparkConf | ||
| .getOption(s"$kubernetesAuthConfPrefix.$CLIENT_KEY_FILE_CONF_SUFFIX") | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If I understand from the response to @vanzin, this is used in both spark-submit and in driver ? Assuming both are true - how is the private key transmitted and secured ? Are there any security concerns ?
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The submission client will have a bootstrap to send these over via a Kubernetes secret volume. This secret material isn't necessarily used by spark-submit itself, but spark-submit provides this secret material through said volume. This should only be read by the driver; we don't mount secrets for Kubernetes credentials into executor pods in
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for clarifying.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The client is configured to always use this value in HTTP requests once the client object is constructed by the builder. |
||
| val clientCertFile = sparkConf | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -16,45 +16,45 @@ | |
| */ | ||
| package org.apache.spark.deploy.k8s | ||
|
|
||
| import org.apache.spark.{SPARK_VERSION => sparkVersion} | ||
| import org.apache.spark.SPARK_VERSION | ||
| import org.apache.spark.internal.Logging | ||
| import org.apache.spark.internal.config.ConfigBuilder | ||
| import org.apache.spark.network.util.ByteUnit | ||
|
|
||
| package object config extends Logging { | ||
| private[spark] object config extends Logging { | ||
|
|
||
| private[spark] val KUBERNETES_NAMESPACE = | ||
| val KUBERNETES_NAMESPACE = | ||
| ConfigBuilder("spark.kubernetes.namespace") | ||
| .doc("The namespace that will be used for running the driver and executor pods. When using" + | ||
| " spark-submit in cluster mode, this can also be passed to spark-submit via the" + | ||
| " --kubernetes-namespace command line argument.") | ||
| .stringConf | ||
| .createWithDefault("default") | ||
|
|
||
| private[spark] val EXECUTOR_DOCKER_IMAGE = | ||
| val EXECUTOR_DOCKER_IMAGE = | ||
| ConfigBuilder("spark.kubernetes.executor.docker.image") | ||
| .doc("Docker image to use for the executors. Specify this using the standard Docker tag" + | ||
| " format.") | ||
| .stringConf | ||
| .createWithDefault(s"spark-executor:$sparkVersion") | ||
| .createWithDefault(s"spark-executor:$SPARK_VERSION") | ||
|
|
||
| private[spark] val DOCKER_IMAGE_PULL_POLICY = | ||
| val DOCKER_IMAGE_PULL_POLICY = | ||
| ConfigBuilder("spark.kubernetes.docker.image.pullPolicy") | ||
| .doc("Docker image pull policy when pulling any docker image in Kubernetes integration") | ||
| .stringConf | ||
| .createWithDefault("IfNotPresent") | ||
|
|
||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: duplicated empty lines.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Removed. |
||
| private[spark] val APISERVER_AUTH_DRIVER_CONF_PREFIX = | ||
| val APISERVER_AUTH_DRIVER_CONF_PREFIX = | ||
| "spark.kubernetes.authenticate.driver" | ||
| private[spark] val APISERVER_AUTH_DRIVER_MOUNTED_CONF_PREFIX = | ||
| val APISERVER_AUTH_DRIVER_MOUNTED_CONF_PREFIX = | ||
| "spark.kubernetes.authenticate.driver.mounted" | ||
| private[spark] val OAUTH_TOKEN_CONF_SUFFIX = "oauthToken" | ||
| private[spark] val OAUTH_TOKEN_FILE_CONF_SUFFIX = "oauthTokenFile" | ||
| private[spark] val CLIENT_KEY_FILE_CONF_SUFFIX = "clientKeyFile" | ||
| private[spark] val CLIENT_CERT_FILE_CONF_SUFFIX = "clientCertFile" | ||
| private[spark] val CA_CERT_FILE_CONF_SUFFIX = "caCertFile" | ||
| val OAUTH_TOKEN_CONF_SUFFIX = "oauthToken" | ||
| val OAUTH_TOKEN_FILE_CONF_SUFFIX = "oauthTokenFile" | ||
| val CLIENT_KEY_FILE_CONF_SUFFIX = "clientKeyFile" | ||
| val CLIENT_CERT_FILE_CONF_SUFFIX = "clientCertFile" | ||
| val CA_CERT_FILE_CONF_SUFFIX = "caCertFile" | ||
|
|
||
| private[spark] val KUBERNETES_SERVICE_ACCOUNT_NAME = | ||
| val KUBERNETES_SERVICE_ACCOUNT_NAME = | ||
| ConfigBuilder(s"$APISERVER_AUTH_DRIVER_CONF_PREFIX.serviceAccountName") | ||
| .doc("Service account that is used when running the driver pod. The driver pod uses" + | ||
| " this service account when requesting executor pods from the API server. If specific" + | ||
|
|
@@ -66,49 +66,49 @@ package object config extends Logging { | |
| // Note that while we set a default for this when we start up the | ||
| // scheduler, the specific default value is dynamically determined | ||
| // based on the executor memory. | ||
| private[spark] val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD = | ||
| val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD = | ||
| ConfigBuilder("spark.kubernetes.executor.memoryOverhead") | ||
| .doc("The amount of off-heap memory (in megabytes) to be allocated per executor. This" + | ||
| " is memory that accounts for things like VM overheads, interned strings, other native" + | ||
| " overheads, etc. This tends to grow with the executor size. (typically 6-10%).") | ||
| .bytesConf(ByteUnit.MiB) | ||
| .createOptional | ||
|
|
||
| private[spark] val KUBERNETES_EXECUTOR_LABEL_PREFIX = "spark.kubernetes.executor.label." | ||
| private[spark] val KUBERNETES_EXECUTOR_ANNOTATION_PREFIX = "spark.kubernetes.executor.annotation." | ||
| val KUBERNETES_EXECUTOR_LABEL_PREFIX = "spark.kubernetes.executor.label." | ||
| val KUBERNETES_EXECUTOR_ANNOTATION_PREFIX = "spark.kubernetes.executor.annotation." | ||
|
|
||
| private[spark] val KUBERNETES_DRIVER_POD_NAME = | ||
| val KUBERNETES_DRIVER_POD_NAME = | ||
| ConfigBuilder("spark.kubernetes.driver.pod.name") | ||
| .doc("Name of the driver pod.") | ||
| .stringConf | ||
| .createOptional | ||
|
|
||
| private[spark] val KUBERNETES_EXECUTOR_POD_NAME_PREFIX = | ||
| val KUBERNETES_EXECUTOR_POD_NAME_PREFIX = | ||
| ConfigBuilder("spark.kubernetes.executor.podNamePrefix") | ||
| .doc("Prefix to use in front of the executor pod names.") | ||
| .internal() | ||
| .stringConf | ||
| .createWithDefault("spark") | ||
|
|
||
| private[spark] val KUBERNETES_ALLOCATION_BATCH_SIZE = | ||
| val KUBERNETES_ALLOCATION_BATCH_SIZE = | ||
| ConfigBuilder("spark.kubernetes.allocation.batch.size") | ||
| .doc("Number of pods to launch at once in each round of executor allocation.") | ||
| .intConf | ||
| .checkValue(value => value > 0, "Allocation batch size should be a positive integer") | ||
| .createWithDefault(5) | ||
|
|
||
| private[spark] val KUBERNETES_ALLOCATION_BATCH_DELAY = | ||
| val KUBERNETES_ALLOCATION_BATCH_DELAY = | ||
| ConfigBuilder("spark.kubernetes.allocation.batch.delay") | ||
| .doc("Number of seconds to wait between each round of executor allocation.") | ||
| .longConf | ||
| .checkValue(value => value > 0, s"Allocation batch delay should be a positive integer") | ||
| .createWithDefault(1) | ||
|
|
||
| private[spark] val KUBERNETES_EXECUTOR_LIMIT_CORES = | ||
| val KUBERNETES_EXECUTOR_LIMIT_CORES = | ||
| ConfigBuilder("spark.kubernetes.executor.limit.cores") | ||
| .doc("Specify the hard cpu limit for a single executor pod") | ||
| .stringConf | ||
| .createOptional | ||
|
|
||
| private[spark] val KUBERNETES_NODE_SELECTOR_PREFIX = "spark.kubernetes.node.selector." | ||
| val KUBERNETES_NODE_SELECTOR_PREFIX = "spark.kubernetes.node.selector." | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -27,10 +27,13 @@ import org.apache.spark.deploy.k8s.constants._ | |
| import org.apache.spark.util.Utils | ||
|
|
||
| /** | ||
| * Configures executor pods. Construct one of these with a SparkConf to set up properties that are | ||
| * common across all executors. Then, pass in dynamic parameters into createExecutorPod. | ||
| * A factory class for configuring and creating executor pods. | ||
| */ | ||
| private[spark] trait ExecutorPodFactory { | ||
|
|
||
| /** | ||
| * Configure and construct an executor pod with the given parameters. | ||
| */ | ||
| def createExecutorPod( | ||
| executorId: String, | ||
| applicationId: String, | ||
|
|
@@ -161,12 +164,12 @@ private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf) | |
| val requiredPorts = Seq( | ||
| (EXECUTOR_PORT_NAME, executorPort), | ||
| (BLOCK_MANAGER_PORT_NAME, blockManagerPort)) | ||
| .map(port => { | ||
| .map { case (name, port) => | ||
| new ContainerPortBuilder() | ||
| .withName(port._1) | ||
| .withContainerPort(port._2) | ||
| .withName(name) | ||
| .withContainerPort(port) | ||
| .build() | ||
| }) | ||
| } | ||
|
|
||
| val executorContainer = new ContainerBuilder() | ||
| .withName(s"executor") | ||
|
||
|
|
@@ -202,16 +205,15 @@ private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf) | |
| .endSpec() | ||
| .build() | ||
|
|
||
| val containerWithExecutorLimitCores = executorLimitCores.map { | ||
| limitCores => | ||
| val executorCpuLimitQuantity = new QuantityBuilder(false) | ||
| .withAmount(limitCores) | ||
| .build() | ||
| new ContainerBuilder(executorContainer) | ||
| .editResources() | ||
| .addToLimits("cpu", executorCpuLimitQuantity) | ||
| .endResources() | ||
| .build() | ||
| val containerWithExecutorLimitCores = executorLimitCores.map { limitCores => | ||
| val executorCpuLimitQuantity = new QuantityBuilder(false) | ||
| .withAmount(limitCores) | ||
| .build() | ||
| new ContainerBuilder(executorContainer) | ||
| .editResources() | ||
| .addToLimits("cpu", executorCpuLimitQuantity) | ||
| .endResources() | ||
| .build() | ||
| }.getOrElse(executorContainer) | ||
|
|
||
| new PodBuilder(executorPod) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
where do we use this parameter?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We are not really using it in the context of this PR. Removed this parameter.