2017-02-10 17 views
0
import requests 
from requests import Session 
from bs4 import BeautifulSoup 
import re 
from multiprocessing.dummy import Pool as ThreadPool 

def get_total_pages(): 
    tut = [] 
    base_url = 'Your group ' 
    for url in [base_url % i for i in range(1, 27)]: 
     tut.append(url) 
    print(tut) 
    #get_data_from_page(tut) 
    pool = ThreadPool(8) 
    results = pool.map(get_data_from_page, tut) 

def get_data_from_page(tut): 
    f = open("emails.txt", 'a') 
    email = [] 
    for a in tut: 
     link = s.get(a).text 
     soup = BeautifulSoup(link, 'lxml') 
     links = soup.find('div', class_="mens").find_all('span', class_="inviz") 
     for e in links: 
      emails = e.text 
      f.write(emails + ', ') 
      email.append(emails) 
    print(email) 

def main(): 
    get_total_pages() 
if __name__ == '__main__': 
    main() 

這將導致一個錯誤,說它只適用於多工作,以及:請求例外,只有multiprocessing.dummy導入池

raise MissingSchema(error) 
requests.exceptions.MissingSchema: Invalid URL 'h': No schema supplied. Perhaps you meant http://h? 
+0

您能否快速解釋您爲什麼要這麼做?由於您是垃圾郵件發送者試圖從互聯網上收集電子郵件地址 – hansaplast

+0

修復縮進,易讀性 – mirabilos

回答

1

的問題是在這

for a in tut: 
    link = s.get(a).text 

和需要

只是

link = s.get(a).text 
#without for