2015-09-17 165 views
4
下一頁

我有這樣的代碼scrapy框架:Scrapy抓取與

# -*- coding: utf-8 -*- 
import scrapy 
from scrapy.contrib.spiders import Rule 
from scrapy.linkextractors import LinkExtractor 
from lxml import html 

class Scrapy1Spider(scrapy.Spider): 
    name = "scrapy1" 
    allowed_domains = ["sfbay.craigslist.org"] 
    start_urls = (
     'http://sfbay.craigslist.org/search/npo', 
    ) 

    rules = (Rule(LinkExtractor(allow=(), restrict_xpaths=('//a[@class="button next"]',)), callback="parse", follow= True),) 

    def parse(self, response): 
     site = html.fromstring(response.body_as_unicode()) 
     titles = site.xpath('//div[@class="content"]/p[@class="row"]') 
     print len(titles), 'AAAA' 

但問題是,我得到100個結果,它不會進入下一個頁面。

這裏有什麼問題?

回答

7

您的rule未使用,因爲您沒有使用CrawlSpider

所以,你必須手動創建下一個頁面requests像這樣:

# -*- coding: utf-8 -*- 
import scrapy 
from scrapy.contrib.spiders import Rule 
from scrapy.linkextractors import LinkExtractor 
from lxml import html 

class Scrapy1Spider(scrapy.Spider): 
    name = "craiglist" 
    allowed_domains = ["sfbay.craigslist.org"] 
    start_urls = (
     'http://sfbay.craigslist.org/search/npo', 
    ) 

    Rules = (Rule(LinkExtractor(allow=(), restrict_xpaths=('//a[@class="button next"]',)), callback="parse", follow= True),) 

    def parse(self, response): 
     site = html.fromstring(response.body_as_unicode()) 
     titles = site.xpath('//div[@class="content"]/p[@class="row"]') 
     print len(titles), 'AAAA' 

     # follow next page links 
     next_page = response.xpath('.//a[@class="button next"]/@href').extract() 
     if next_page: 
      next_href = next_page[0] 
      next_page_url = 'http://sfbay.craigslist.org' + next_href 
      request = scrapy.Request(url=next_page_url) 
      yield request 

或者使用CrawlSpider像這樣:

# -*- coding: utf-8 -*- 
import scrapy 
from scrapy.spiders import CrawlSpider, Rule 
from scrapy.linkextractors import LinkExtractor 
from lxml import html 

class Scrapy1Spider(CrawlSpider): 
    name = "craiglist" 
    allowed_domains = ["sfbay.craigslist.org"] 
    start_urls = (
     'http://sfbay.craigslist.org/search/npo', 
    ) 

    rules = (Rule(LinkExtractor(allow=(), restrict_xpaths=('//a[@class="button next"]',)), callback="parse_page", follow= True),) 

    def parse_page(self, response): 
     site = html.fromstring(response.body_as_unicode()) 
     titles = site.xpath('//div[@class="content"]/p[@class="row"]') 
     print len(titles), 'AAAA' 
+0

你能給我與如'CrawlSpider'?這是建議的方式? –

+0

增加了'CrawlSpider'代碼。請注意,您不能使用'parse'作爲回調函數,因爲這在內部使用! –

+0

謝謝,它工作完美。 –