2015-10-14 52 views

回答

0

在你scrapy項目中創建一個名爲script.py添加蜘蛛文件code.Assuming名 以下行Python腳本是spider_one.py和spider_two.py和你的蜘蛛分別SpiderOne和SpiderTwo。 所以在你script.py中添加。

from spiders.spider_one import SpiderOne 
from spiders.spider_two import SpiderTwo 

# scrapy api 
from scrapy import signals, log 
from twisted.internet import reactor 
from scrapy.crawler import Crawler 
from scrapy.settings import Settings 


file = "your_file.json" #your results 
TO_CRAWL = [SpiderOne,SpiderTwo] 


# list of crawlers that are running 
RUNNING_CRAWLERS = [] 

def spider_closing(spider): 
    """Activates on spider closed signal""" 
    log.msg("Spider closed: %s" % spider, level=log.INFO) 
    RUNNING_CRAWLERS.remove(spider) 
    if not RUNNING_CRAWLERS: 
     reactor.stop() 

log.start(loglevel=log.DEBUG) 
for spider in TO_CRAWL: 
    settings = Settings() 
    settings.set("USER_AGENT", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36") 
    settings.set("FEED_FORMAT",'json') 
    settings.set("FEED_URI",file) 
    # settings.set("ITEM_PIPELINES",{ 'pipelines.CustomPipeline': 300}) 
    settings.set("DOWNLOAD_DELAY",1) 
    crawler = Crawler(settings) 
    crawler_obj = spider() 
    RUNNING_CRAWLERS.append(crawler_obj) 

    # stop reactor when spider closes 
    crawler.signals.connect(spider_closing, signal=signals.spider_closed) 
    crawler.configure() 
    crawler.crawl(crawler_obj) 
    crawler.start() 

# blocks process so always keep as the last statement 
reactor.run() 

該示例適用於json,但也可以適用於xml。

+0

我測試過這段代碼,但是當我試圖在每個蜘蛛中導入我的物品時,它會引發一個ImportError。 – 3mpty88

+0

@ 3mpty88你可以請發佈錯誤日誌嗎?你確定蜘蛛是在蜘蛛**文件夾嗎? – Varun

+0

ImportError:沒有名爲myproject.items的模塊是我從該代碼獲得的錯誤。蜘蛛在蜘蛛文件夾裏。 – 3mpty88