1
import requests
from bs4 import BeautifulSoup
import csv
from urlparse import urljoin
import urllib2
base_url = 'http://www.baseball-reference.com'
data = requests.get("http://www.baseball-reference.com/teams/BAL/2014-schedule-scores.shtml")
soup = BeautifulSoup(data.content)
outfile = open("./Balpbp.csv", "wb")
writer = csv.writer(outfile)
url = []
for link in soup.find_all('a'):
if not link.has_attr('href'):
continue
if link.get_text() != 'boxscore':
continue
url.append(base_url + link['href'])
for list in url:
response = requests.get(list)
html = response.content
soup = BeautifulSoup(html)
table = soup.find('table', attrs={'id': 'play_by_play'})
list_of_rows = []
for row in table.findAll('tr'):
list_of_cells = []
for cell in row.findAll('td'):
text = cell.text.replace(' ', '')
list_of_cells.append(text)
list_of_rows.append(list_of_cells)
writer.writerows(list_of_rows)
u'G ASCII錯誤\ xa0Holland 'u'N \ xa0Cruz。' ......錯誤數據寫入CSV由於在Python
以下是錯誤消息:
Traceback (most recent call last):
File "try.py", line 40, in <module>
writer.writerows(list_of_rows)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xa0' in position 57: ordinal not in range(128)
當我將數據寫入csv時,我最終得到的數據中包含\ x ...數據塊中的東西,這些數據可以防止數據寫入csv。我怎樣才能改變數據來刪除這部分數據或者做些什麼來規避這個問題?