0
我在使用scrapy時遇到了一些問題。當我運行命令scrapy crawl album -o test.xml
時,蜘蛛運行良好。但是當我從一個腳本抓取時,我給不同的start_urls
給蜘蛛,但得到相同 reusult與命令。兩個網址都可用。這是我寫的代碼。請指出我做錯了什麼,謝謝。scrapy爬行在命令中運行良好,但從腳本運行時遇到一些問題
蜘蛛文件xiami_scrapy.py
import scrapy
empty_referer = {
'Referer': ''
}
class AlbumSpider(scrapy.Spider):
name = 'album'
start_urls = [
'http://www.xiami.com/artist/album-eJlX61793',
]
artist = 'giga'
def __init__(self, url=None, artist=None, *args, **kwargs):
super(AlbumSpider, self).__init__(*args, **kwargs)
if artist is not None:
self.artist = artist
if url is not None:
self.start_urls = [url]
def parse(self, response):
for album in response.css('.album_item100_thread'):
yield {
'artist': self.artist,
'title': album.css('.name>a>strong::text').extract_first(),
'fav_count': album.css('.fav_c_ico::text').extract_first(),
'star_rating': album.css('.album_rank>em::text').extract_first(),
'release_date': response.css('.company>a::text')[1].extract().strip(),
'company': album.css('.company>a::text')[0].extract(),
'url': album.css('.name>a::attr(href)').extract_first(),
}
next_page = response.css('.p_redirect_l::attr(href)').extract_first()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, headers=empty_referer, callback=self.parse)
腳本文件test.py
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner
from xiamiscrapy.spiders.xiami_scrapy import AlbumSpider
from scrapy.utils.log import configure_logging
configure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})
runner = CrawlerRunner()
@defer.inlineCallbacks
def crawl():
spider = AlbumSpider(url='http://www.xiami.com/artist/album-bzMAng64c0a',artist='reol')
yield runner.crawl(spider)
reactor.stop()
crawl()
reactor.run()