2013-10-06 72 views
1

在這裏,我已經寫了代碼使用python和美麗的湯來解析該頁面上的所有鏈接到鏈接的存儲庫。接下來,它從剛剛創建的存儲庫中獲取任何url的內容,將這個新內容中的鏈接解析到存儲庫中,並繼續處理存儲庫中所有鏈接的這個過程,直到停止或獲取給定數量的鏈接之後。Gevent鏈接爬蟲

但是這段代碼很慢。我如何通過在python中使用gevents使用異步編程來改善它?


代碼

class Crawler(object): 


def __init__(self): 

    self.soup = None          # Beautiful Soup object 
    self.current_page = "http://www.python.org/"   # Current page's address 
    self.links   = set()        # Queue with every links fetched 
    self.visited_links = set() 

    self.counter = 0 # Simple counter for debug purpose 

def open(self): 

    # Open url 
    print self.counter , ":", self.current_page 
    res = urllib2.urlopen(self.current_page) 
    html_code = res.read() 
    self.visited_links.add(self.current_page) 

    # Fetch every links 
    self.soup = BeautifulSoup.BeautifulSoup(html_code) 

    page_links = [] 
    try : 
     page_links = itertools.ifilter( # Only deal with absolute links 
             lambda href: 'http://' in href, 
              (a.get('href') for a in self.soup.findAll('a')) ) 
    except Exception as e: # Magnificent exception handling 
     print 'Error: ',e 
     pass 



    # Update links 
    self.links = self.links.union(set(page_links)) 



    # Choose a random url from non-visited set 
    self.current_page = random.sample(self.links.difference(self.visited_links),1)[0] 
    self.counter+=1 


def run(self): 

    # Crawl 3 webpages (or stop if all url has been fetched) 
    while len(self.visited_links) < 3 or (self.visited_links == self.links): 
     self.open() 

    for link in self.links: 
     print link 



if __name__ == '__main__': 

C = Crawler() 
C.run() 

更新1


import gevent.monkey; gevent.monkey.patch_thread() 
from bs4 import BeautifulSoup 
import urllib2 
import itertools 
import random 
import urlparse 
import sys 

import gevent.monkey; gevent.monkey.patch_all(thread=False) 




class Crawler(object): 


def __init__(self): 
self.soup = None          # Beautiful Soup object 
self.current_page = "http://www.python.org/"   # Current page's address 
self.links   = set()        # Queue with every links fetched 
self.visited_links = set() 

self.counter = 0 # Simple counter for debug purpose 

def open(self): 

# Open url 
print self.counter , ":", self.current_page 
res = urllib2.urlopen(self.current_page) 
html_code = res.read() 
self.visited_links.add(self.current_page) 

# Fetch every links 
self.soup = BeautifulSoup(html_code) 

page_links = [] 
try : 
    for link in [h.get('href') for h in self.soup.find_all('a')]: 
     print "Found link: '" + link + "'" 
     if link.startswith('http'): 
    print 'entered in if link: ',link 
      page_links.append(link) 
      print "Adding link" + link + "\n" 
     elif link.startswith('/'): 
    print 'entered in elif link: ',link 
      parts = urlparse.urlparse(self.current_page) 
      page_links.append(parts.scheme + '://' + parts.netloc + link) 
      print "Adding link " + parts.scheme + '://' + parts.netloc + link + "\n" 
     else: 
    print 'entered in else link: ',link 
      page_links.append(self.current_page+link) 
      print "Adding link " + self.current_page+link + "\n" 

except Exception, ex: # Magnificent exception handling 
    print ex 

# Update links 
self.links = self.links.union(set(page_links)) 

# Choose a random url from non-visited set 
self.current_page = random.sample(self.links.difference(self.visited_links),1)[0] 
self.counter+=1 

def run(self): 

# Crawl 3 webpages (or stop if all url has been fetched) 
crawling_greenlets = [] 

for i in range(3): 
    crawling_greenlets.append(gevent.spawn(self.open)) 


gevent.joinall(crawling_greenlets) 

#while len(self.visited_links) < 4 or (self.visited_links == self.links): 
# self.open() 

for link in self.links: 
    print link 

if __name__ == '__main__': 
C = Crawler() 
C.run() 

回答

2

進口GEVENT並確保猴子修補做是爲了使標準庫調用無阻塞和awa再GEVENT的:

import gevent 
from gevent import monkey; monkey.patch_all() 

(你可以有選擇地決定什麼必須是猴子打補丁,但讓我們說這是不是 你此刻的問題)

在你run,讓你的open功能在greenlet內部調用。 run可以 返回greenlet對象,因此您可以等待它,只要您需要使用gevent.joinall獲得 結果。是這樣的:

def run(self): 
    return gevent.spawn(self.open) 

c1 = Crawler() 
c2 = Crawler() 
c3 = Crawler() 
crawling_tasks = [c.run() for c in (c1,c2,c3)] 
gevent.joinall(crawling_tasks) 

print [c.links for c in (c1, c2, c3)] 
+0

收到這個錯誤異常KeyError異常:<從 '/ usr/lib中/ PY thon2.7/threading.pyc' 模塊 '線程'>在KeyError異常(15886544)忽略 –

+0

儘量不要修補線程模塊:monkey.patch_all(thread = False);除了這個錯誤(順便說一句,你可能會忽略),它是否按照你的預期工作? – mguijarr

+0

爲什麼它打印'0:http:// www.python.org /'3次 –