我一直在玩MNIST數字識別數據集,我有點卡住了。我閱讀了一些研究論文,並實施了我所瞭解的全部內容。基本上我做的是我首先創建了我的訓練集和用於評估分類器的交叉驗證集,然後在測試和訓練集上運行PCA,然後使用KNN和SVM執行分類任務。我面臨的主要問題是,我應該在所有集合上運行PCA,然後將我的訓練集和交叉驗證集分開或分開,然後在交叉驗證測試和訓練集上單獨運行PCA。我很抱歉詢問我已經嘗試過的事情,因爲我已經嘗試了兩種情況,在第一種情況下,我的分類器表現出色,因爲我猜測PCA使用測試數據集的同時創建了調整我的結果的主要組件,在我的模型中偏見的原因,在另一種情況下,表現是20%至30%左右,這是非常低的。所以我有點困惑,應該如何改進我的模型,高度讚賞任何幫助和指導,我已經粘貼了我的代碼以供參考。MNIST數字識別數據集性能不佳
library(ggplot2)
library(e1071)
library(ElemStatLearn)
library(plyr)
library(class)
import.csv <- function(filename){
return(read.csv(filename, sep = ",", header = TRUE, stringsAsFactors = FALSE))
}
train.data <- import.csv("train.csv")
test.data <- train.data[30001:32000,]
train.data <- train.data[1:6000,]
#Performing PCA on the dataset to reduce the dimensionality of the data
get_PCA <- function(dataset){
dataset.features <- dataset[,!(colnames(dataset) %in% c("label"))]
features.unit.variance <- names(dataset[, sapply(dataset, function(v) var(v, na.rm=TRUE)==0)])
dataset.features <- dataset[,!(colnames(dataset) %in% features.unit.variance)]
pr.comp <- prcomp(dataset.features, retx = T, center = T, scale = T)
#finding the total variance contained in the principal components
prin_comp <- summary(pr.comp)
prin_comp.sdev <- data.frame(prin_comp$sdev)
#print(paste0("%age of variance contained = ", sum(prin_comp.sdev[1:500,])/sum(prin_comp.sdev)))
screeplot(pr.comp, type = "lines", main = "Principal Components")
num.of.comp = 50
red.dataset <- prin_comp$x
red.dataset <- red.dataset[,1:num.of.comp]
red.dataset <- data.frame(red.dataset)
return(red.dataset)
}
#Perform k-fold cross validation
do_cv_class <- function(df, k, classifier){
num_of_nn = gsub("[^[:digit:]]","",classifier)
classifier = gsub("[[:digit:]]","",classifier)
if(num_of_nn == "")
{
classifier = c("get_pred_",classifier)
}
else
{
classifier = c("get_pred_k",classifier)
num_of_nn = as.numeric(num_of_nn)
}
classifier = paste(classifier,collapse = "")
func_name <- classifier
output = vector()
size_distr = c()
n = nrow(df)
for(i in 1:n)
{
a = 1 + (((i-1) * n)%/%k)
b = ((i*n)%/%k)
size_distr = append(size_distr, b - a + 1)
}
row_num = 1:n
sampling = list()
for(i in 1:k)
{
s = sample(row_num,size_distr)
sampling[[i]] = s
row_num = setdiff(row_num,s)
}
prediction.df = data.frame()
outcome.list = list()
for(i in 1:k)
{
testSample = sampling[[i]]
train_set = df[-testSample,]
test_set = df[testSample,]
if(num_of_nn == "")
{
classifier = match.fun(classifier)
result = classifier(train_set,test_set)
confusion.matrix <- table(pred = result, true = test_set$label)
accuracy <- sum(diag(confusion.matrix)*100)/sum(confusion.matrix)
print(confusion.matrix)
outcome <- list(sample_ID = i, Accuracy = accuracy)
outcome.list <- rbind(outcome.list, outcome)
}
else
{
classifier = match.fun(classifier)
result = classifier(train_set,test_set)
print(class(result))
confusion.matrix <- table(pred = result, true = test_set$label)
accuracy <- sum(diag(confusion.matrix)*100)/sum(confusion.matrix)
print(confusion.matrix)
outcome <- list(sample_ID = i, Accuracy = accuracy)
outcome.list <- rbind(outcome.list, outcome)
}
}
return(outcome.list)
}
#Support Vector Machines with linear kernel
get_pred_svm <- function(train, test){
digit.class.train <- as.factor(train$label)
train.features <- train[,-train$label]
test.features <- test[,-test$label]
svm.model <- svm(train.features, digit.class.train, cost = 10, gamma = 0.0001, kernel = "radial")
svm.pred <- predict(svm.model, test.features)
return(svm.pred)
}
#KNN model
get_pred_knn <- function(train,test){
digit.class.train <- as.factor(train$label)
train.features <- train[,!colnames(train) %in% "label"]
test.features <- test[,!colnames(train) %in% "label"]
knn.model <- knn(train.features, test.features, digit.class.train)
return(knn.model)
}
=========================================== =============================
謝謝你的幫助和指導:)。 – user37940
如果它有幫助,考慮upvoting /接受:) –
我很感謝你幫助我,因爲我得到了96.87%的準確性,這是驚人的,我很抱歉沒有投票,因爲它一直說我需要有15這樣做的聲譽。 – user37940