2017-02-19 56 views
0

tfp = open(filename, 'wb')OSError:[Errno 22}無效參數:'downloaded/misc/jquery.js?v = 1.4.4'

OSError: [Errno 22} Invalid argument: 'downloaded/misc/jquery.js?v=1.4.4'

任何人都可以幫我解決這個錯誤嗎?我認爲它與jquery.js?v=1.4.4無效。我是python的新手;我很抱歉,如果我失去了一些明顯的東西。

下面是代碼:

import os 
from urllib.request import urlretrieve 
from urllib.request import urlopen 
from bs4 import BeautifulSoup 

downloadDirectory = "downloaded" 
baseUrl = "http://pythonscraping.com" 

def getAbsoluteURL(baseUrl, source): 
    if source.startswith("http://www."): 
     url = "http://"+source[11:] 
    elif source.startswith("http://"): 
     url = source 
    elif source.startswith("www."): 
     url = source[4:] 
     url = "http://"+source 
    else: 
     url = baseUrl+"/"+source 
    if baseUrl not in url: 
     return None 
    return url 

def getDownloadPath(baseUrl, absoluteUrl, downloadDirectory): 
    path = absoluteUrl.replace("www.", "") 
    path = path.replace(baseUrl, "") 
    path = downloadDirectory+path 
    directory = os.path.dirname(path) 

    if not os.path.exists(directory): 
     os.makedirs(directory) 

    return path 

html = urlopen("http://www.pythonscraping.com") 
bsObj = BeautifulSoup(html, "html.parser") 
downloadList = bsObj.findAll(src=True) 

for download in downloadList: 
    fileUrl = getAbsoluteURL(baseUrl, download["src"]) 
    if fileUrl is not None: 
     print(fileUrl) 
     urlretrieve(fileUrl, getDownloadPath(baseUrl, fileUrl, downloadDirectory)) 
+0

它不是下載一個有效的文件,也許這是不正確的鏈接,下載文件。 – Arman

+0

是的,這是有道理的。謝謝。 –

回答

1

對於功能urlretrieve(url, filename, reporthook, data), 你給了filename參數必須是你的操作系統有效的文件名參數。

在這種情況下,當您運行

urlretrieve(fileUrl, getDownloadPath(baseUrl, fileUrl, downloadDirectory)) 

您爲url的說法是 「http://pythonscraping.com/misc/jquery.js?v=1.4.4」,你給了filename的說法是「下載/其它/ jquery.js和?V = 1.4。 4" 。

「jquery.js?v = 1.4.4」我認爲這不是一個有效的文件名。

解決方法:在getDownloadPath功能,改變return path

return path.partition('?')[0] 
0

下載/其它/ jquery.js和V = 1.4.4不是一個有效的文件名? 我覺得這樣更好的解決方案:

import requests 
from bs4 import BeautifulSoup 

download_directory = "downloaded" 
base_url = "http://www.pythonscraping.com/" 
# Use Requests instead urllib 
def get_files_url(base_url): 
    # Return a list of tag elements that contain src attrs 
    html = requests.get(base_url) 
    soup = BeautifulSoup(html.text, "lxml") 
    return soup.find_all(src=True) 

def get_file_name(url): 
    # Return the last part after the last "/" as file name 
    # Eg: return a.png as file name if url=http://pythonscraping.com/a.png 
    # Remove characters not valid in file name 
    file_name = url.split("/")[-1] 
    remove_list = "?><\/:\"*|" 
    for ch in remove_list: 
     if ch in file_name: 
      file_name = file_name.replace(ch, "") 
    return download_directory + "/" + file_name 

def get_formatted_url(url): 
    if not url.startswith("http://"): 
     return base_url + url 
    elif base_url not in url: 
     return None 
    else: 
     return url 

links = get_files_url(base_url) 

for link in links: 
    url = link["src"] 
    url = get_formatted_url(url) 
    if url is None: 
     continue 
    print(url) 
    result = requests.get(url, stream=True) 
    file_name = get_file_name(url) 
    print(file_name) 
    with open(file_name, 'wb') as f: 
     for chunk in result.iter_content(10): 
      f.write(chunk) 
相關問題