2015-12-09 26 views
0

我正在嘗試預測某些文檔的標籤。每個文檔可以有多個標籤。這裏是我寫Python機器學習:使用多項式樸素貝葉斯給出多處理異常

import pandas as pd 
import pickle 
import re 
from sklearn.cross_validation import train_test_split 
from sklearn.metrics.metrics import classification_report, accuracy_score, confusion_matrix 
from nltk.stem import WordNetLemmatizer 
from sklearn.feature_extraction.text import TfidfVectorizer 
from sklearn.naive_bayes import MultinomialNB as MNB 
from sklearn.pipeline import Pipeline 
from sklearn.grid_search import GridSearchCV 

def Mytrain(): 
    pipeline = Pipeline([ 
    ('vect', TfidfVectorizer(stop_words='english',sublinear_tf=True)), 
    ('clf', MNB()) 
    ]) 

    parameters = { 
     'vect__max_df': (0.25, 0.5, 0.6, 0.7, 1.0), 
     'vect__ngram_range': ((1, 1), (1, 2), (2,3), (1,3), (1,4), (1,5)), 
     'vect__use_idf': (True, False), 
     'clf__fit_prior': (True, False) 
    } 

    traindf = pickle.load(open("train.pkl","rb")) 

    X, y = traindf['Data'], traindf['Tags'].as_matrix() 

    Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7) 

    gridSearch = GridSearchCV(pipeline, parameters, n_jobs=3, verbose=1, scoring='accuracy') 
    gridSearch.fit(Xtrain, ytrain) 

    print ('best score: %0.3f' % gridSearch.best_score_) 
    print ('best parameters set:') 

    res = open("res.txt", 'w') 
    res.write ('best parameters set:\n') 
    bestParameters = gridSearch.best_estimator_.get_params() 
    for paramName in sorted(parameters.keys()): 
     print ('\t %s: %r' % (paramName, bestParameters[paramName])) 
     res.write('\t %s: %r\n' % (paramName, bestParameters[paramName])) 

    pickle.dump(bestParameters,open("bestParams.pkl","wb")) 

    predictions = gridSearch.predict(Xtest) 
    print ('Accuracy:', accuracy_score(ytest, predictions)) 
    print ('Confusion Matrix:', confusion_matrix(ytest, predictions)) 
    print ('Classification Report:', classification_report(ytest, predictions)) 

說明該標籤可以有多個值的示例程序。現在我得到

An unexpected error occurred while tokenizing input 
The following traceback may be corrupted or invalid 
The error message is: ('EOF in multi-line statement', (40, 0)) 

Traceback (most recent call last): 
    File "X:\abc\predMNB.py", line 128, in <module> 
    MNBdrill(fname,topn) 
    File "X:\abc\predMNB.py", line 82, in MNBdrill 
    gridSearch.fit(Xtrain, ytrain) 
    File "X:\pqr\Anaconda2\lib\site-packages\sklearn\grid_search.py", line 732, in fit 
    return self._fit(X, y, ParameterGrid(self.param_grid)) 
    File "X:\pqr\Anaconda2\lib\site-packages\sklearn\grid_search.py", line 505, in _fit 
    for parameters in parameter_iterable 
    File "X:\pqr\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py", line 666, in __call__ 
    self.retrieve() 
    File "X:\pqr\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py", line 549, in retrieve 
    raise exception_type(report) 
sklearn.externals.joblib.my_exceptions.JoblibMemoryError: JoblibMemoryError 

然後

Multiprocessing exception: 
........................................................................... 
X:\pqr\Anaconda2\lib\site-packages\sklearn\grid_search.py in fit(self=GridSearchCV(cv=None, error_score='raise', 
    ..._func=None, 
     scoring='accuracy', verbose=1), X=14151 text for document having t1,t2,t3,t4 
Name: Content, dtype: object, y=array([u't1',u't2',u't3',u't4'], dtype=object)) 
    727   y : array-like, shape = [n_samples] or [n_samples, n_output], optional 
    728    Target relative to X for classification or regression; 
    729    None for unsupervised learning. 
    730 
    731   """ 
--> 732   return self._fit(X, y, ParameterGrid(self.param_grid)) 
     self._fit = <bound method GridSearchCV._fit of GridSearchCV(...func=None, 
     scoring='accuracy', verbose=1)> 
     X = 14151 text for document having t1,t2,t3,t4 
Name: Content, dtype: object 
     y = array([u't1',u't2',u't3',u't4'], dtype=object) 
     self.param_grid = {'clf__fit_prior': (True, False), 'vect__max_df': (0.25, 0.5, 0.6, 0.7, 1.0), 'vect__ngram_range': ((1, 1), (1, 2), (2, 3), (1, 3), (1, 4), (1, 5)), 'vect__use_idf': (True, False)} 
    733 
    734 
    735 class RandomizedSearchCV(BaseSearchCV): 
    736  """Randomized search on hyper parameters. 

........................................................................... 
X:\pqr\Anaconda2\lib\site-packages\sklearn\grid_search.py in _fit(self=GridSearchCV(cv=None, error_score='raise', 
    ..._func=None, 
     scoring='accuracy', verbose=1), X=14151 text for document having t1,t2,t3,t4 
Name: Content, dtype: object, y=array([u't1',u't2',u't3',u't4'], dtype=object), parameter_iterable=<sklearn.grid_search.ParameterGrid object>) 
    500  )(
    501    delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_, 
    502          train, test, self.verbose, parameters, 
    503          self.fit_params, return_parameters=True, 
    504          error_score=self.error_score) 
--> 505     for parameters in parameter_iterable 
     parameters = undefined 
     parameter_iterable = <sklearn.grid_search.ParameterGrid object> 
    506     for train, test in cv) 
    507 
    508   # Out is a list of triplet: score, estimator, n_test_samples 
    509   n_fits = len(out) 

........................................................................... 
X:\pqr\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py in __call__(self=Parallel(n_jobs=3), iterable=<itertools.islice object>) 
    661    if pre_dispatch == "all" or n_jobs == 1: 
    662     # The iterable was consumed all at once by the above for loop. 
    663     # No need to wait for async callbacks to trigger to 
    664     # consumption. 
    665     self._iterating = False 
--> 666    self.retrieve() 
     self.retrieve = <bound method Parallel.retrieve of Parallel(n_jobs=3)> 
    667    # Make sure that we get a last message telling us we are done 
    668    elapsed_time = time.time() - self._start_time 
    669    self._print('Done %3i out of %3i | elapsed: %s finished', 
    670       (len(self._output), 

    --------------------------------------------------------------------------- 
    Sub-process traceback: 
    --------------------------------------------------------------------------- 
    MemoryError          

的堆棧跟蹤指向具有相同問題的其他方法後繼續。如果需要的話我可以張貼整個事情,但這裏是我認爲正在發生的

通知

scoring='accuracy', verbose=1), X=14151 text for document having t1,t2,t3,t4 
Name: Content, dtype: object, y=array([u't1',u't2',u't3',u't4'], dtype=object)) 

,因爲有多個標籤,這可能會導致一個問題呢?

也,何謂

多重異常是什麼意思?

MemoryError?

請幫我解決這個問題。

回答

1

你有多少火車數據?

我最好的選擇是唯一的「真正」的錯誤是MemoryError即你試圖訓練你的分類器時使用所有可用的RAM,所有其他奇怪的錯誤/回溯是失敗的內存分配的後果。

分類器正在接受培訓時,您是否檢查了空閒內存?

+0

沒有檢查。但是你確定多個標籤不是原因? MNB可以預測多個值嗎? – AbtPst

+0

我有大約600 MB的訓練數據 – AbtPst