Skip to content

Commit 299ddca

Browse files
committed
Fix scalastyle
1 parent a94c627 commit 299ddca

File tree

2 files changed

+24
-20
lines changed

2 files changed

+24
-20
lines changed

core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,12 +113,14 @@ object SparkSubmit {
113113
new OptionAssigner(appArgs.numExecutors, YARN, true, clOption = "--num-workers"),
114114
new OptionAssigner(appArgs.numExecutors, YARN, false, sysProp = "spark.worker.instances"),
115115
new OptionAssigner(appArgs.executorMemory, YARN, true, clOption = "--worker-memory"),
116-
new OptionAssigner(appArgs.executorMemory, STANDALONE | MESOS | YARN, false, sysProp = "spark.executor.memory"),
116+
new OptionAssigner(appArgs.executorMemory, STANDALONE | MESOS | YARN, false,
117+
sysProp = "spark.executor.memory"),
117118
new OptionAssigner(appArgs.driverMemory, STANDALONE, true, clOption = "--memory"),
118119
new OptionAssigner(appArgs.executorCores, YARN, true, clOption = "--worker-cores"),
119120
new OptionAssigner(appArgs.executorCores, YARN, false, sysProp = "spark.executor.cores"),
120121
new OptionAssigner(appArgs.driverCores, STANDALONE, true, clOption = "--cores"),
121-
new OptionAssigner(appArgs.totalExecutorCores, STANDALONE | MESOS, true, sysProp = "spark.cores.max"),
122+
new OptionAssigner(appArgs.totalExecutorCores, STANDALONE | MESOS, true,
123+
sysProp = "spark.cores.max"),
122124
new OptionAssigner(appArgs.files, YARN, false, sysProp = "spark.yarn.dist.files"),
123125
new OptionAssigner(appArgs.files, YARN, true, clOption = "--files"),
124126
new OptionAssigner(appArgs.archives, YARN, false, sysProp = "spark.yarn.dist.archives"),

core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -143,30 +143,32 @@ private[spark] class SparkSubmitArguments(args: Array[String]) {
143143
System.err.println(
144144
"""Usage: spark-submit <primary binary> [options]
145145
|Options:
146-
| --master MASTER_URL spark://host:port, mesos://host:port, yarn, or local.
147-
| --deploy-mode DEPLOY_MODE Mode to deploy the app in, either \"client\" or \"cluster\".
148-
| --class CLASS_NAME Name of your application's main class (required for Java apps).
149-
| --arg ARG Argument to be passed to your application's main class.
150-
| Multiple invocations are possible, each will be passed in order.
151-
| --driver-memory MEM Memory for driver (e.g. 1000M, 2G) (Default: 512 Mb).
152-
| --name NAME The name of your application (Default: Spark).
146+
| --master MASTER_URL spark://host:port, mesos://host:port, yarn, or local.
147+
| --deploy-mode DEPLOY_MODE Mode to deploy the app in, either 'client' or 'cluster'.
148+
| --class CLASS_NAME Name of your app's main class (required for Java apps).
149+
| --arg ARG Argument to be passed to your application's main class. This
150+
| option can be specified multiple times for multiple args.
151+
| --driver-memory MEM Memory for driver (e.g. 1000M, 2G) (Default: 512M).
152+
| --name NAME The name of your application (Default: 'Spark').
153153
|
154154
| Spark standalone with cluster deploy mode only:
155-
| --driver-cores CORES Cores for driver.
156-
| --supervise Whether to restart the driver on failure.
155+
| --driver-cores NUM Cores for driver (Default: 1).
156+
| --supervise If given, restarts the driver on failure.
157157
|
158158
| Spark standalone and Mesos only:
159-
| --total-executor-cores CORES Total cores for all executors.
159+
| --total-executor-cores NUM Total cores for all executors.
160160
|
161161
| YARN-only:
162-
| --executor-cores NUM Number of cores per executor (Default: 1).
163-
| --executor-memory MEM Memory per executor (e.g. 1000M, 2G) (Default: 1G).
164-
| --more-jars JARS For \"cluster\" deploy mode, a comma-separated list of local jars
165-
| that you want SparkContext.addJar to work with.
166-
| --queue QUEUE The YARN queue to submit the application to (Default: 'default').
167-
| --num-executors NUM Number of executors to start (Default: 2).
168-
| --files FILES Comma separated list of files to be placed next to all executors.
169-
| --archives ARCHIVES Comma separated list of archives to be extracted next to all executors.""".stripMargin
162+
| --executor-cores NUM Number of cores per executor (Default: 1).
163+
| --executor-memory MEM Memory per executor (e.g. 1000M, 2G) (Default: 1G).
164+
| --more-jars JARS For 'cluster' deploy mode, a comma-separated list of local
165+
| jars that you want SparkContext.addJar to work with.
166+
| --queue QUEUE_NAME The YARN queue to submit to (Default: 'default').
167+
| --num-executors NUM Number of executors to start (Default: 2).
168+
| --files FILES Comma separated list of files to be placed next to all
169+
| executors.
170+
| --archives ARCHIVES Comma separated list of archives to be extracted next to
171+
| all executors.""".stripMargin
170172
)
171173
System.exit(exitCode)
172174
}

0 commit comments

Comments
 (0)