|
1 | 1 | # KISSANIME - http://kissanime.com/ ANIME DOWNLOADER
|
2 |
| -import urllib, urllib2, httplib |
3 |
| -httplib.HTTPConnection.debuglevel = 1 |
| 2 | + |
4 | 3 | from bs4 import BeautifulSoup
|
5 | 4 | from selenium import webdriver
|
| 5 | +from selenium.webdriver.common.keys import Keys |
| 6 | +import time |
6 | 7 |
|
7 | 8 | BASE_URL = "http://kissanime.com/Anime/"
|
8 |
| -# EDIT THIS AND ADD YOUR REQUIRED ANIME NAME |
| 9 | +DELAY = 10 # change it depending on your internet connectivity |
| 10 | +episodeURLs = [] |
| 11 | +downloadURLs = [] |
| 12 | + |
| 13 | +#------------------------------- EDIT THIS AND ADD YOUR REQUIRED ANIME NAME |
9 | 14 | AnimeName = "Nodame-Cantabile"
|
| 15 | +#------------------------------- |
| 16 | + |
10 | 17 | URL = BASE_URL + AnimeName
|
11 | 18 |
|
| 19 | +print "Opening firefox Browser" |
| 20 | +driver = webdriver.Firefox() |
12 | 21 |
|
13 |
| -episodeURLs = [] |
14 |
| -downloadURLs = [] |
| 22 | +print "Navigating to Login Page" |
| 23 | +driver.get("http://kissanime.com/Login") |
15 | 24 |
|
16 |
| -def getDownloadURLs(url): |
17 |
| - print url |
18 |
| - driver = webdriver.Firefox() |
19 |
| - driver.get(url) |
20 |
| -# because they block scrapers, we use "magic browser"! lol |
21 |
| -req = urllib2.Request(URL, headers={'User-Agent' : "Magic Browser"}) |
| 25 | +print "DELAY start" |
| 26 | +time.sleep(DELAY) |
| 27 | +print "DELAY end" |
| 28 | + |
| 29 | +print "Logging in" |
| 30 | +user = driver.find_element_by_name("username") |
| 31 | +passwd = driver.find_element_by_name("password") |
| 32 | +user.send_keys("<ur username>") |
| 33 | +passwd.send_keys("<ur password>") |
| 34 | +passwd.send_keys(Keys.RETURN) |
22 | 35 |
|
23 |
| -con = urllib2.urlopen(req) |
| 36 | +print "DELAY start" |
| 37 | +time.sleep(DELAY) |
| 38 | +print "DELAY end" |
24 | 39 |
|
25 |
| -soup = BeautifulSoup(con) |
| 40 | +print "Navigating to anime episode page" |
| 41 | +driver.get(URL) |
26 | 42 |
|
27 |
| -# gets all the tables |
28 |
| -tables = soup.findAll('td') |
| 43 | +print "DELAY start" |
| 44 | +time.sleep(DELAY) |
| 45 | +print "DELAY end" |
29 | 46 |
|
30 |
| -# we go through the tables |
31 |
| -for table in tables: |
| 47 | +html = driver.page_source |
| 48 | +soup = BeautifulSoup(html) |
| 49 | +epListTable = soup.find("table", {"class" : "listing"}) |
| 50 | + |
| 51 | +for row in epListTable.findAll('tr'): |
| 52 | + # each row is <td> tag enclosed |
32 | 53 | try:
|
33 |
| - # whenever we get a 'a' tag we extract the 'href' attribute |
34 |
| - episodeURLs.append(table.findAll('a')[0].get('href')) |
35 |
| - # In every alternate line no 'a' exists trying to access the |
36 |
| - # first element ([0]) of an empty list results in this |
| 54 | + episodeURLs.append("http://kissanime.com"+row.findAll('a')[0].get('href')) |
37 | 55 | except IndexError:
|
38 |
| - pass |
| 56 | + print "\n Obtaining episode URL's ....\n" |
| 57 | + |
| 58 | +print "These are the episode URL's" |
| 59 | +print episodeURLs |
| 60 | + |
| 61 | +for url in episodeURLs: |
| 62 | + print "\n Navigating to get Video for the URL => "+url |
| 63 | + driver.get(url) |
| 64 | + |
| 65 | + print "DELAY start" |
| 66 | + time.sleep(DELAY) |
| 67 | + print "DELAY end" |
| 68 | + |
| 69 | + temp = [] |
| 70 | + |
| 71 | + html = driver.page_source |
| 72 | + soup = BeautifulSoup(html) |
| 73 | + for div in soup.findAll('div', {"id" : "divDownload"}): |
| 74 | + links = div.findAll('a') |
| 75 | + for link in links: |
| 76 | + dummy = (url[url.find('?')-2:url.find('?')], link.text.strip(), link.attrs['href']) |
| 77 | + temp.append(dummy) |
| 78 | + print "\n\n Temp for"+link.text.strip() |
| 79 | + print temp |
| 80 | + |
| 81 | + downloadURLs.append(temp) |
| 82 | + |
| 83 | +for link in downloadURLs: |
| 84 | + print link |
| 85 | + print "\n" |
| 86 | + |
| 87 | + |
| 88 | +print "Copy paste the above links to a text file, use import from tezt file option in IDM to download all" |
| 89 | + |
| 90 | + |
| 91 | + |
| 92 | + |
| 93 | + |
| 94 | + |
| 95 | + |
| 96 | + |
| 97 | + |
39 | 98 |
|
40 |
| -for episodeURL in episodeURLs: |
41 |
| - downloadURLs = getDownloadURLs("http:/kissanime.com/Anime" + episodeURL) |
| 99 | + |
0 commit comments