@@ -143,30 +143,32 @@ private[spark] class SparkSubmitArguments(args: Array[String]) {
143143 System .err.println(
144144 """ Usage: spark-submit <primary binary> [options]
145145 |Options:
146- | --master MASTER_URL spark://host:port, mesos://host:port, yarn, or local.
147- | --deploy-mode DEPLOY_MODE Mode to deploy the app in, either \" client\" or \" cluster\" .
148- | --class CLASS_NAME Name of your application 's main class (required for Java apps).
149- | --arg ARG Argument to be passed to your application's main class.
150- | Multiple invocations are possible, each will be passed in order .
151- | --driver-memory MEM Memory for driver (e.g. 1000M, 2G) (Default: 512 Mb ).
152- | --name NAME The name of your application (Default: Spark).
146+ | --master MASTER_URL spark://host:port, mesos://host:port, yarn, or local.
147+ | --deploy-mode DEPLOY_MODE Mode to deploy the app in, either ' client' or ' cluster' .
148+ | --class CLASS_NAME Name of your app 's main class (required for Java apps).
149+ | --arg ARG Argument to be passed to your application's main class. This
150+ | option can be specified multiple times for multiple args .
151+ | --driver-memory MEM Memory for driver (e.g. 1000M, 2G) (Default: 512M ).
152+ | --name NAME The name of your application (Default: ' Spark' ).
153153 |
154154 | Spark standalone with cluster deploy mode only:
155- | --driver-cores CORES Cores for driver.
156- | --supervise Whether to restart the driver on failure.
155+ | --driver-cores NUM Cores for driver (Default: 1) .
156+ | --supervise If given, restarts the driver on failure.
157157 |
158158 | Spark standalone and Mesos only:
159- | --total-executor-cores CORES Total cores for all executors.
159+ | --total-executor-cores NUM Total cores for all executors.
160160 |
161161 | YARN-only:
162- | --executor-cores NUM Number of cores per executor (Default: 1).
163- | --executor-memory MEM Memory per executor (e.g. 1000M, 2G) (Default: 1G).
164- | --more-jars JARS For \"cluster\" deploy mode, a comma-separated list of local jars
165- | that you want SparkContext.addJar to work with.
166- | --queue QUEUE The YARN queue to submit the application to (Default: 'default').
167- | --num-executors NUM Number of executors to start (Default: 2).
168- | --files FILES Comma separated list of files to be placed next to all executors.
169- | --archives ARCHIVES Comma separated list of archives to be extracted next to all executors.""" .stripMargin
162+ | --executor-cores NUM Number of cores per executor (Default: 1).
163+ | --executor-memory MEM Memory per executor (e.g. 1000M, 2G) (Default: 1G).
164+ | --more-jars JARS For 'cluster' deploy mode, a comma-separated list of local
165+ | jars that you want SparkContext.addJar to work with.
166+ | --queue QUEUE_NAME The YARN queue to submit to (Default: 'default').
167+ | --num-executors NUM Number of executors to start (Default: 2).
168+ | --files FILES Comma separated list of files to be placed next to all
169+ | executors.
170+ | --archives ARCHIVES Comma separated list of archives to be extracted next to
171+ | all executors.""" .stripMargin
170172 )
171173 System .exit(exitCode)
172174 }
0 commit comments