Skip to content

Commit 0ae5067

Browse files
committedJan 19, 2015
Cleaned Up the code
I made some modifications to the code, I mainly made the README better With Usage and prerequisites. Signed-off-by: Aditya Prasad <adityaprasadiskool@gmail.com>
1 parent 8355ff9 commit 0ae5067

File tree

6 files changed

+86
-25
lines changed

6 files changed

+86
-25
lines changed
 

‎.gitignore

+2
Original file line numberDiff line numberDiff line change
@@ -52,3 +52,5 @@ docs/_build/
5252

5353
# PyBuilder
5454
target/
55+
# thumb files are used by windows to cache a image for thumbnails
56+
[Tt]humbs.db

‎README.md

+12-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,16 @@
1-
##Python-Scripts
2-
====================
1+
## Python-Scripts
32

43
**A _motley_ collection of python scripts. These are the different programs I wrote for fun.**
54

5+
### Usage
6+
7+
* Install python 2.7.9 in your system. You can find it [here][5].
8+
9+
* Just download this file to your computer OR copy the code into a text editor and save it as filename.py
10+
11+
* Double click on the file to run OR open the command line there and type
12+
'python filename.py'
13+
614
* **Root.py**
715
This is a program which takes a integer greater than or equal to one and
816
finds its root to the specified accuracy. It was made mainly to compute
@@ -43,4 +51,5 @@
4351
[1]: https://superuser.com/questions/330297/automate-logging-in-through-sonicwall/785792?noredirect=1#comment1023176_785792
4452
[2]: http://relativetoaditya.blogspot.in/2014/12/maximum-subarray.html
4553
[3]: https://en.wikipedia.org/wiki/Minimum_spanning_tree
46-
[4]: https://stackoverflow.com/questions/14369739/creating-adjacency-lists-from-dicts-in-python/27380835#27380835
54+
[4]: https://stackoverflow.com/questions/14369739/creating-adjacency-lists-from-dicts-in-python/27380835#27380835
55+
[5]: https://www.python.org/ftp/python/2.7.9/python-2.7.9.msi

‎root.py

+28-5
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,44 @@
1+
# btw I KNOW that import math.sqrt() and sqrt(<number>) can do this :p
12
resp="n"
23
while(not(resp=="y" or resp=="Y")):
3-
print "enter the number of which u want to find root (greater than or equal to 1)"
4+
print "Enter the number of which u want to find root (greater than 1)"
45
userinput=raw_input()
6+
57
length=len(userinput)
6-
no=int(userinput)
7-
print "enter the no of decimals to which u want to find root"
8-
deci=int(raw_input())
8+
try:
9+
no=int(userinput)
10+
except ValueError:
11+
print "INVALID INPUT!!!! Exiting program"
12+
break
13+
if(no<1):
14+
print "Please enter number greater than 1"
15+
break
16+
17+
print "Enter the no of decimals to which u want to find root, Maximum 11 digits"
18+
try:
19+
deci=int(raw_input())
20+
except ValueError:
21+
print "INVALID INPUT!!!! Exiting program"
22+
break
23+
24+
if(deci>11):
25+
print "sorry This script cannot perform that function, sorry"
26+
# We can increase this using the decimal module.
27+
break
28+
929
#now we need to get the intial amount by which ans should be varied if no is a single
1030
#digit no then by .1 if its a n digit no then by 10 to the power n-1
1131
if length==1:
1232
multi=10**(-2)
1333
elif length>1:
1434
multi=10**(length-3)
1535
ans=no/2
36+
1637
#to find root(no) to a high degree of precision
1738
while(not(len(str(ans))==deci+length+1)):
1839
#if the point at which if we add prec*opti we overshoot and if we subtract we undersell..
1940
#then we optimize the number that is added, ie make it smaller
20-
41+
2142
#basically better way to do this, use patterns
2243
if( ans**2<no and (ans+multi)**2>no):
2344
multi=multi/10
@@ -41,3 +62,5 @@
4162
print "Do You want to exit the program??! y/n"
4263
resp=raw_input()
4364

65+
dummy = raw_input()
66+

‎scrape_results.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@
1111
# because otherwise python doesnt recognise the error I specified and throws a NameError: name 'NoSuchElementException' is not defined
1212
from selenium.common.exceptions import NoSuchElementException
1313

14+
START_ROLL_NUMBER = 110113006;
15+
END_ROLL_NUMBER = 110113099
16+
1417
# open a window of firefox
1518
driver = webdriver.Firefox()
1619

@@ -46,7 +49,7 @@ class student:
4649

4750
GPA = '7.57'
4851

49-
for roll_number in range(110113000,110113100):
52+
for roll_number in range(START_ROLL_NUMBER,END_ROLL_NUMBER+1):
5053

5154
# intialize a empty list in which the students data will be stored
5255
temp_data=[]

‎sonicwall_login.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,8 @@
1313
driver.get("https://192.168.20.1/auth1.html")
1414
# The next line is an assertion to confirm that title has “Sonic” word in it: (not really neccesary :p)
1515
# This is used to confirm that the webpage is the right one
16-
assert "Sonic" in driver.title
16+
# some sites may not have it
17+
# assert "Sonic" in driver.title
1718
# we use the 'name' tag to get a handle to the username and password this finds the appropriate box.
1819
user = driver.find_element_by_name("userName")
1920
passwd = driver.find_element_by_name("pwd")

‎xkcd.py

+38-15
Original file line numberDiff line numberDiff line change
@@ -3,40 +3,63 @@
33
from bs4 import BeautifulSoup
44

55

6-
7-
print "Enter from which xkcd comic onwards should be downloaded"
6+
print "\nEnter from which xkcd comic onwards should be downloaded"
87
start=int(raw_input())
9-
print "Enter till which xkcd comic should be downloaded (latest is 1445)"
8+
print "\nEnter till which xkcd comic should be downloaded \n(latest is somewhere around 1475)"
9+
print "If you want till the latest one just enter a large value \nbut keep a eye on whether you see a lot of 404's cause that signifies the end."
1010
end=int(raw_input())
1111

1212
def download(url, filepath):
13-
page=urllib2.urlopen(url)
14-
15-
soup=BeautifulSoup(page)
13+
try:
14+
page = urllib2.urlopen(url)
15+
except urllib2.HTTPError:
16+
print "lol, not funny joke, No comic for 404 guys! "
17+
print "if u see this a lot, all available comics have been downloaded"
18+
return
1619

17-
finalurl=soup.find("div", {"id": "comic"}).contents[1].get("src")
18-
19-
image=urllib.urlopen(finalurl)
20+
soup = BeautifulSoup(page)
21+
22+
finalurl = soup.find("div", {"id": "comic"}).contents[1].get("src")
2023

24+
try:
25+
check = "to check, the url ="+finalurl
26+
except TypeError:
27+
# Now as finalurl is not a string, we have not scraped the image url,
28+
# the tag in this page is 'href'!
29+
finalurl = soup.find("div", {"id": "comic"}).contents[1].get('href')
30+
print "rare image, we somehow got the URL -> "+soup.find("div", {"id": "comic"}).contents[1].get('href')
31+
32+
# what if we didnt get a image!?
33+
if(finalurl[:4:-1][0:4] != 'gnp.'):
34+
print "Not a .png image!!"
35+
print "backup plan, we use this ->"+soup.find("div", {"id": "comic"}).contents[1].img.get("src")
36+
finalurl = soup.find("div", {"id": "comic"}).contents[1].img.get("src")
37+
38+
try:
39+
image = urllib.urlopen(finalurl)
40+
except AttributeError:
41+
print "Anomaly - final url was not properly set"
42+
2143
if image.headers.maintype=="image":
2244

23-
buf=image.read()
45+
buf = image.read()
2446

2547
print "Image stored to "+filepath
2648

27-
download=file(filepath,"wb")
49+
download = file(filepath,"wb")
2850

2951
download.write(buf)
3052

3153
download.close()
3254
image.close()
55+
print "\n"
56+
57+
base="http://xkcd.com/"
3358

34-
base="https://xkcd.com/"
35-
#change the path here, to whereever you want the directory to be
36-
path='C:\\Users\\Aditya\\Desktop\\Work\\xkcd\\'
59+
# Change the path here, to whereever you want the directory to be
60+
path = "F:\\Work\\xkcd\\"
3761

3862
for i in range(start,end+1):
39-
4063
tempurl=base+str(i)+"/"
4164
temppath=path+str(i)+".png"
4265
download(tempurl,temppath)

0 commit comments

Comments
 (0)
Please sign in to comment.