This repository was archived by the owner on Nov 7, 2018. It is now read-only.
forked from 18010927657/Job
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
67 lines (62 loc) · 2.18 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# -*- coding: utf-8 -*-
'''
基于scrapydo的爬虫启动文件
'''
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import scrapydo
import logging.config
from Job.utils.Util import FileUtil
from Job import settings
from Job.spiders.jobSpider.crawlUNDPjobs import UNDPjobSpider
from Job.spiders.jobSpider.crawlCERNjobs import CERNjobsSpider
from Job.spiders.jobSpider.crawlITERjobs import ITERJobSpider
from Job.spiders.jobSpider.crawlMOHRSSjobs import MOHRSSJobSpider
from Job.spiders.jobSpider.crawlOECDjobs import OECDJobSpider
from Job.spiders.jobSpider.crawlUNIDOjobs import UNIDOjobLink
from Job.spiders.jobSpider.crawlUNUjobs import UNUjobSpider
from Job.spiders.jobSpider.crawlWHOjobs import WHOjobSpider
from Job.spiders.jobSpider.crawlWIPOjobs import WIPOjobSpider
from Job.spiders.jobSpider.crawlESCAPjobs import ESCAPjobsSpider
from Job.spiders.jobSpider.crawlUNESCOjobs import UNESCOjobSpider
'''
若日志输出文件路径不存在,创建日志输出文件路径
默认在c盘log文件夹下
>>>settings.LOGPATH
>>>c:\\log
'''
if os.path.exists(settings.LOGPATH) == False:
os.makedirs(settings.LOGPATH)
logging.config.fileConfig(FileUtil().getLogConfigPath())
logger = logging.getLogger('ahu')
scrapydo.setup()
class StartScrapySpider(object):
def __init__(self,type):
self.type = type
self.start()
def start(self):
'''使用scrapydo启动爬虫
如有新增爬虫需要启动,可在spiders中添加'''
if self.type == 'job':
logger.debug("进行岗位准备")
spiders = [
# UNDPjobSpider,
# CERNjobsSpider,
# UNIDOjobLink,
# UNUjobSpider,
# WHOjobSpider,
# ITERJobSpider,
# MOHRSSJobSpider, #唯一国内网站,测试ip效果较好
OECDJobSpider,
# WIPOjobSpider,
# ESCAPjobsSpider
# UNESCOjobSpider
]
else:
spiders = []
for spider in spiders:
scrapydo.run_spider(spider_cls=spider)
if __name__ == "__main__":
startScrapySpider = StartScrapySpider('job')