我想寫一個腳本,我將計算幾個文檔的相似度。我想用LSA來做。我找到了下面的代碼並稍微改變了一下。我有一個輸入3個文件,然後輸出一個3×3矩陣,它們之間有相似性。我想要做相同的相似度計算,但只能使用sklearn庫。那可能嗎?使用潛在語義分析sklearn
from numpy import zeros
from scipy.linalg import svd
from math import log
from numpy import asarray, sum
from nltk.corpus import stopwords
from sklearn.metrics.pairwise import cosine_similarity
titles = [doc1,doc2,doc3]
ignorechars = ''',:'!'''
class LSA(object):
def __init__(self, stopwords, ignorechars):
self.stopwords = stopwords.words('english')
self.ignorechars = ignorechars
self.wdict = {}
self.dcount = 0
def parse(self, doc):
words = doc.split();
for w in words:
w = w.lower()
if w in self.stopwords:
continue
elif w in self.wdict:
self.wdict[w].append(self.dcount)
else:
self.wdict[w] = [self.dcount]
self.dcount += 1
def build(self):
self.keys = [k for k in self.wdict.keys() if len(self.wdict[k]) > 1]
self.keys.sort()
self.A = zeros([len(self.keys), self.dcount])
for i, k in enumerate(self.keys):
for d in self.wdict[k]:
self.A[i,d] += 1
def calc(self):
self.U, self.S, self.Vt = svd(self.A)
return -1*self.Vt
def TFIDF(self):
WordsPerDoc = sum(self.A, axis=0)
DocsPerWord = sum(asarray(self.A > 0, 'i'), axis=1)
rows, cols = self.A.shape
for i in range(rows):
for j in range(cols):
self.A[i,j] = (self.A[i,j]/WordsPerDoc[j]) * log(float(cols)/DocsPerWord[i])
mylsa = LSA(stopwords, ignorechars)
for t in titles:
mylsa.parse(t)
mylsa.build()
a = mylsa.calc()
cosine_similarity(a)
從@ ogrisel的回答是:
我運行下面的代碼,但我嘴裏仍然是開放的:)當TFIDF對具有相同主題的兩個文件最多80%的相似性,此代碼給予我99.99%。這就是爲什麼我認爲這是什麼問題:P
dataset = [doc1,doc2,doc3]
vectorizer = TfidfVectorizer(max_df=0.5,stop_words='english')
X = vectorizer.fit_transform(dataset)
lsa = TruncatedSVD()
X = lsa.fit_transform(X)
X = Normalizer(copy=False).fit_transform(X)
cosine_similarity(X)
在上面,你認爲X的價值是什麼作爲相似性度量? –