diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala index d3c5f0eaf0341..632d23b73cc2d 100644 --- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala +++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala @@ -308,21 +308,22 @@ private[spark] class SparkSubmit extends Logging { args.repositories, args.ivyRepoPath, args.ivySettingsPath) if (resolvedMavenCoordinates.nonEmpty) { - // In K8s client mode, when in the driver, add resolved jars early as we might need - // them at the submit time for artifact downloading. - // For example we might use the dependencies for downloading - // files from a Hadoop Compatible fs e.g. S3. In this case the user might pass: - // --packages com.amazonaws:aws-java-sdk:1.7.4:org.apache.hadoop:hadoop-aws:2.7.6 - if (isKubernetesClusterModeDriver) { - val loader = getSubmitClassLoader(sparkConf) - for (jar <- resolvedMavenCoordinates) { - addJarToClasspath(jar, loader) - } - } else if (isKubernetesCluster) { + if (isKubernetesCluster) { // We need this in K8s cluster mode so that we can upload local deps // via the k8s application, like in cluster mode driver childClasspath ++= resolvedMavenCoordinates } else { + // In K8s client mode, when in the driver, resolved jars are added in the driver. + // For example, we might use the dependencies for downloading + // files from a Hadoop Compatible fs e.g. S3. In this case the user might pass: + // --packages com.amazonaws:aws-java-sdk:1.7.4:org.apache.hadoop:hadoop-aws:2.7.6 + if (isKubernetesClusterModeDriver) { + val loader = getSubmitClassLoader(sparkConf) + for (jar <- resolvedMavenCoordinates) { + addJarToClasspath(jar, loader) + } + } + args.jars = mergeFileLists(args.jars, mergeFileLists(resolvedMavenCoordinates: _*)) if (args.isPython || isInternal(args.primaryResource)) { args.pyFiles = mergeFileLists(args.pyFiles,