Skip to content
This repository was archived by the owner on Nov 23, 2017. It is now read-only.

Commit d418517

Browse files
committed
Moves from the deprecated optparse to argparse.
1 parent 31f7927 commit d418517

File tree

1 file changed

+81
-78
lines changed

1 file changed

+81
-78
lines changed

spark_ec2.py

Lines changed: 81 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
import time
4141
import warnings
4242
from datetime import datetime
43-
from optparse import OptionParser
43+
from argparse import ArgumentParser
4444
from sys import stderr
4545

4646
if sys.version < "3":
@@ -169,170 +169,173 @@ class UsageError(Exception):
169169

170170
# Configure and parse our command-line arguments
171171
def parse_args():
172-
parser = OptionParser(
172+
parser = ArgumentParser(
173173
prog="spark-ec2",
174-
version="%prog {v}".format(v=SPARK_EC2_VERSION),
175-
usage="%prog [options] <action> <cluster_name>\n\n"
174+
usage="%(prog)s [options] <action> <cluster_name>\n\n"
176175
+ "<action> can be: launch, destroy, login, stop, start, get-master, reboot-slaves")
177176

178-
parser.add_option(
179-
"-s", "--slaves", type="int", default=1,
180-
help="Number of slaves to launch (default: %default)")
181-
parser.add_option(
182-
"-w", "--wait", type="int",
177+
# Positional arguments
178+
parser.add_argument('action')
179+
parser.add_argument('cluster_name')
180+
181+
# Optional arguments
182+
parser.add_argument(
183+
"-s", "--slaves", type=int, default=1,
184+
help="Number of slaves to launch (default: %(default)s)")
185+
parser.add_argument(
186+
"-w", "--wait", type=int,
183187
help="DEPRECATED (no longer necessary) - Seconds to wait for nodes to start")
184-
parser.add_option(
188+
parser.add_argument(
185189
"-k", "--key-pair",
186190
help="Key pair to use on instances")
187-
parser.add_option(
191+
parser.add_argument(
188192
"-i", "--identity-file",
189193
help="SSH private key file to use for logging into instances")
190-
parser.add_option(
194+
parser.add_argument(
191195
"-p", "--profile", default=None,
192196
help="If you have multiple profiles (AWS or boto config), you can configure " +
193-
"additional, named profiles by using this option (default: %default)")
194-
parser.add_option(
197+
"additional, named profiles by using this option (default: %(default)s)")
198+
parser.add_argument(
195199
"-t", "--instance-type", default="m1.large",
196-
help="Type of instance to launch (default: %default). " +
200+
help="Type of instance to launch (default: %(default)s). " +
197201
"WARNING: must be 64-bit; small instances won't work")
198-
parser.add_option(
202+
parser.add_argument(
199203
"-m", "--master-instance-type", default="",
200204
help="Master instance type (leave empty for same as instance-type)")
201-
parser.add_option(
205+
parser.add_argument(
202206
"-r", "--region", default="us-east-1",
203-
help="EC2 region used to launch instances in, or to find them in (default: %default)")
204-
parser.add_option(
207+
help="EC2 region used to launch instances in, or to find them in (default: %(default)s)")
208+
parser.add_argument(
205209
"-z", "--zone", default="",
206210
help="Availability zone to launch instances in, or 'all' to spread " +
207211
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
208212
"between zones applies) (default: a single zone chosen at random)")
209-
parser.add_option(
213+
parser.add_argument(
210214
"-a", "--ami",
211215
help="Amazon Machine Image ID to use")
212-
parser.add_option(
216+
parser.add_argument(
213217
"-v", "--spark-version", default=DEFAULT_SPARK_VERSION,
214-
help="Version of Spark to use: 'X.Y.Z' or a specific git hash (default: %default)")
215-
parser.add_option(
218+
help="Version of Spark to use: 'X.Y.Z' or a specific git hash (default: %(default)s)")
219+
parser.add_argument(
216220
"--spark-git-repo",
217221
default=DEFAULT_SPARK_GITHUB_REPO,
218-
help="Github repo from which to checkout supplied commit hash (default: %default)")
219-
parser.add_option(
222+
help="Github repo from which to checkout supplied commit hash (default: %(default)s)")
223+
parser.add_argument(
220224
"--spark-ec2-git-repo",
221225
default=DEFAULT_SPARK_EC2_GITHUB_REPO,
222-
help="Github repo from which to checkout spark-ec2 (default: %default)")
223-
parser.add_option(
226+
help="Github repo from which to checkout spark-ec2 (default: %(default)s)")
227+
parser.add_argument(
224228
"--spark-ec2-git-branch",
225229
default=DEFAULT_SPARK_EC2_BRANCH,
226-
help="Github repo branch of spark-ec2 to use (default: %default)")
227-
parser.add_option(
230+
help="Github repo branch of spark-ec2 to use (default: %(default)s)")
231+
parser.add_argument(
228232
"--deploy-root-dir",
229233
default=None,
230234
help="A directory to copy into / on the first master. " +
231235
"Must be absolute. Note that a trailing slash is handled as per rsync: " +
232236
"If you omit it, the last directory of the --deploy-root-dir path will be created " +
233237
"in / before copying its contents. If you append the trailing slash, " +
234238
"the directory is not created and its contents are copied directly into /. " +
235-
"(default: %default).")
236-
parser.add_option(
239+
"(default: %(default)s).")
240+
parser.add_argument(
237241
"--hadoop-major-version", default="1",
238242
help="Major version of Hadoop. Valid options are 1 (Hadoop 1.0.4), 2 (CDH 4.2.0), yarn " +
239-
"(Hadoop 2.4.0) (default: %default)")
240-
parser.add_option(
243+
"(Hadoop 2.4.0) (default: %(default)s)")
244+
parser.add_argument(
241245
"-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
242246
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
243247
"the given local address (for use with login)")
244-
parser.add_option(
248+
parser.add_argument(
245249
"--resume", action="store_true", default=False,
246250
help="Resume installation on a previously launched cluster " +
247251
"(for debugging)")
248-
parser.add_option(
249-
"--ebs-vol-size", metavar="SIZE", type="int", default=0,
252+
parser.add_argument(
253+
"--ebs-vol-size", metavar="SIZE", type=int, default=0,
250254
help="Size (in GB) of each EBS volume.")
251-
parser.add_option(
255+
parser.add_argument(
252256
"--ebs-vol-type", default="standard",
253257
help="EBS volume type (e.g. 'gp2', 'standard').")
254-
parser.add_option(
255-
"--ebs-vol-num", type="int", default=1,
258+
parser.add_argument(
259+
"--ebs-vol-num", type=int, default=1,
256260
help="Number of EBS volumes to attach to each node as /vol[x]. " +
257261
"The volumes will be deleted when the instances terminate. " +
258262
"Only possible on EBS-backed AMIs. " +
259263
"EBS volumes are only attached if --ebs-vol-size > 0. " +
260264
"Only support up to 8 EBS volumes.")
261-
parser.add_option(
262-
"--placement-group", type="string", default=None,
265+
parser.add_argument(
266+
"--placement-group", type=str, default=None,
263267
help="Which placement group to try and launch " +
264268
"instances into. Assumes placement group is already " +
265269
"created.")
266-
parser.add_option(
267-
"--swap", metavar="SWAP", type="int", default=1024,
268-
help="Swap space to set up per node, in MB (default: %default)")
269-
parser.add_option(
270-
"--spot-price", metavar="PRICE", type="float",
270+
parser.add_argument(
271+
"--swap", metavar="SWAP", type=int, default=1024,
272+
help="Swap space to set up per node, in MB (default: %(default)s)")
273+
parser.add_argument(
274+
"--spot-price", metavar="PRICE", type=float,
271275
help="If specified, launch slaves as spot instances with the given " +
272276
"maximum price (in dollars)")
273-
parser.add_option(
277+
parser.add_argument(
274278
"--ganglia", action="store_true", default=True,
275-
help="Setup Ganglia monitoring on cluster (default: %default). NOTE: " +
279+
help="Setup Ganglia monitoring on cluster (default: %(default)s). NOTE: " +
276280
"the Ganglia page will be publicly accessible")
277-
parser.add_option(
281+
parser.add_argument(
278282
"--no-ganglia", action="store_false", dest="ganglia",
279283
help="Disable Ganglia monitoring for the cluster")
280-
parser.add_option(
284+
parser.add_argument(
281285
"-u", "--user", default="root",
282-
help="The SSH user you want to connect as (default: %default)")
283-
parser.add_option(
286+
help="The SSH user you want to connect as (default: %(default)s)")
287+
parser.add_argument(
284288
"--delete-groups", action="store_true", default=False,
285289
help="When destroying a cluster, delete the security groups that were created")
286-
parser.add_option(
290+
parser.add_argument(
287291
"--use-existing-master", action="store_true", default=False,
288292
help="Launch fresh slaves, but use an existing stopped master if possible")
289-
parser.add_option(
290-
"--worker-instances", type="int", default=1,
293+
parser.add_argument(
294+
"--worker-instances", type=int, default=1,
291295
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES. Not used if YARN " +
292-
"is used as Hadoop major version (default: %default)")
293-
parser.add_option(
294-
"--master-opts", type="string", default="",
296+
"is used as Hadoop major version (default: %(default)s)")
297+
parser.add_argument(
298+
"--master-opts", type=str, default="",
295299
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
296300
"(e.g -Dspark.worker.timeout=180)")
297-
parser.add_option(
298-
"--user-data", type="string", default="",
301+
parser.add_argument(
302+
"--user-data", type=str, default="",
299303
help="Path to a user-data file (most AMIs interpret this as an initialization script)")
300-
parser.add_option(
301-
"--authorized-address", type="string", default="0.0.0.0/0",
302-
help="Address to authorize on created security groups (default: %default)")
303-
parser.add_option(
304-
"--additional-security-group", type="string", default="",
304+
parser.add_argument(
305+
"--authorized-address", type=str, default="0.0.0.0/0",
306+
help="Address to authorize on created security groups (default: %(default)s)")
307+
parser.add_argument(
308+
"--additional-security-group", type=str, default="",
305309
help="Additional security group to place the machines in")
306-
parser.add_option(
307-
"--additional-tags", type="string", default="",
310+
parser.add_argument(
311+
"--additional-tags", type=str, default="",
308312
help="Additional tags to set on the machines; tags are comma-separated, while name and " +
309313
"value are colon separated; ex: \"Task:MySparkProject,Env:production\"")
310-
parser.add_option(
314+
parser.add_argument(
311315
"--copy-aws-credentials", action="store_true", default=False,
312316
help="Add AWS credentials to hadoop configuration to allow Spark to access S3")
313-
parser.add_option(
317+
parser.add_argument(
314318
"--subnet-id", default=None,
315319
help="VPC subnet to launch instances in")
316-
parser.add_option(
320+
parser.add_argument(
317321
"--vpc-id", default=None,
318322
help="VPC to launch instances in")
319-
parser.add_option(
323+
parser.add_argument(
320324
"--private-ips", action="store_true", default=False,
321325
help="Use private IPs for instances rather than public if VPC/subnet " +
322326
"requires that.")
323-
parser.add_option(
327+
parser.add_argument(
324328
"--instance-initiated-shutdown-behavior", default="stop",
325329
choices=["stop", "terminate"],
326330
help="Whether instances should terminate when shut down or just stop")
327-
parser.add_option(
331+
parser.add_argument(
328332
"--instance-profile-name", default=None,
329333
help="IAM profile name to launch instances under")
330334

331-
(opts, args) = parser.parse_args()
332-
if len(args) != 2:
333-
parser.print_help()
334-
sys.exit(1)
335-
(action, cluster_name) = args
335+
336+
opts = parser.parse_args()
337+
action = opts.action
338+
cluster_name = opts.cluster_name
336339

337340
# Boto config check
338341
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html

0 commit comments

Comments
 (0)