2014-02-17 75 views
2

我正在使用neuralnet程序包R預測手寫數字。正在使用MNIST database進行該算法的訓練和測試。下面是R代碼我使用:在R中使用'neuralnet'時出現意外輸出

# Importing the data into R 
path <- "path_to_data_folder/MNIST_database_of_handwritten_digits/" # Data can be downloaded from: http://yann.lecun.com/exdb/mnist/ 
to.read = file(paste0(path, "train-images-idx3-ubyte"), "rb") 
to.read_Label = file(paste0(path, "train-labels-idx1-ubyte"), "rb") 
magicNumber <- readBin(to.read, integer(), n=1, endian="big") 
magicNumber_Label <- readBin(to.read_Label, integer(), n=1, endian="big") 
numberOfImages <- readBin(to.read, integer(), n=1, endian="big") 
numberOfImages_Label <- readBin(to.read_Label, integer(), n=1, endian="big") 
rowPixels <- readBin(to.read, integer(), n=1, endian="big") 
columnPixels <- readBin(to.read, integer(), n=1, endian="big") 

# image(1:rowPixels, 1:columnPixels, matrix(readBin(to.read, integer(), n=(rowPixels*columnPixels), size=1, endian="big"), rowPixels, columnPixels)[,columnPixels:1], col=gray((0:255)/255)) 

trainDigits <- NULL 
trainDigits <- vector(mode="list", length=numberOfImages) 
for(i in 1:numberOfImages) 
    trainDigits[[i]] <- as.vector(matrix(readBin(to.read, integer(), n=(rowPixels*columnPixels), size=1, endian="big"), rowPixels, columnPixels)[,columnPixels:1]) 

trainDigits <- t(data.frame(trainDigits)) # Takes a minute 
trainDigits <- data.frame(trainDigits, row.names=NULL) 

# i <- 1 # Specify the image number to visualize the image 
# image(1:rowPixels, 1:columnPixels, matrix(trainDigits[i,], rowPixels, columnPixels), col=gray((0:255)/255)) 

trainDigits_Label <- NULL 
for(i in 1:numberOfImages_Label) 
    trainDigits_Label <- c(trainDigits_Label, readBin(to.read_Label, integer(), n=1, size=1, endian="big")) 

# appending the labels to the training data 
trainDigits <- cbind(trainDigits, trainDigits_Label) 

#################### Modelling #################### 

library(neuralnet) 
# Considering only 500 rows for training due to time and memory constraints 
myNnet <- neuralnet(formula = as.formula(paste0("trainDigits_Label ~ ", paste0("X",1:(ncol(trainDigits)-1), collapse="+"))), 
           data = trainDigits[1:500,], hidden = 10, algorithm='rprop+', learningrate=0.01) 

#################### Test Data #################### 

to.read_test = file(paste0(path, "t10k-images-idx3-ubyte"), "rb") 
to.read_Label_test = file(paste0(path, "t10k-labels-idx1-ubyte"), "rb") 
magicNumber <- readBin(to.read_test, integer(), n=1, endian="big") 
magicNumber_Label <- readBin(to.read_Label_test, integer(), n=1, endian="big") 
numberOfImages_test <- readBin(to.read_test, integer(), n=1, endian="big") 
numberOfImages_Label_test <- readBin(to.read_Label_test, integer(), n=1, endian="big") 
rowPixels <- readBin(to.read_test, integer(), n=1, endian="big") 
columnPixels <- readBin(to.read_test, integer(), n=1, endian="big") 

testDigits <- NULL 
testDigits <- vector(mode="list", length=numberOfImages_test) 
for(i in 1:numberOfImages_test) 
    testDigits[[i]] <- as.vector(matrix(readBin(to.read_test, integer(), n=(rowPixels*columnPixels), size=1, endian="big"), rowPixels, columnPixels)[,columnPixels:1]) 

testDigits <- t(data.frame(testDigits)) # Takes a minute 
testDigits <- data.frame(testDigits, row.names=NULL) 

testDigits_Label <- NULL 
for(i in 1:numberOfImages_Label_test) 
    testDigits_Label <- c(testDigits_Label, readBin(to.read_Label_test, integer(), n=1, size=1, endian="big")) 

#################### 'neuralnet' Predictions #################### 

predictOut <- compute(myNnet, testDigits) 
table(round(predictOut$net.result), testDigits_Label) 

#################### Random Forest #################### 
# Cross-validating NN results with Random Forest 

library(randomForest) 
myRF <- randomForest(x=trainDigits[,-ncol(trainDigits)], y=as.factor(trainDigits_Label), ntree=100) 

predRF <- predict(myRF, newdata=testDigits) 
table(predRF, testDigits_Label) # Confusion Matrix 
sum(diag(table(predRF, testDigits_Label)))/sum(table(predRF, testDigits_Label)) # % of correct predictions 

有60000幅訓練圖像(28倍* 28像素的圖像)以及0到9的分佈(幾乎)間的整個數據集相等的數字。與上面僅使用500個圖像的'建模'部分不同,我使用整個訓練數據集來訓練一個myNnet模型(28 * 28 = 784個輸入和10個輸出),然後預測測試數據集中10,000個圖像的輸出。 (由於內存限制,我在隱藏層中只使用了10個神經元)。

我用預測獲得的結果很奇怪:輸出是一種高斯分佈,其中大部分時間是預測4, 4中的0或9減少(種類)成指數。你可以看到下面的混淆矩陣(我四捨五入的輸出,因爲他們不是整數):

> table(round(predictOut$net.result), testDigits_Label) 
    testDigits_Label 
     0 1 2 3 4 5 6 7 8 9 
    -2 1 1 4 1 1 3 0 4 1 2 
    -1 8 17 12 9 7 8 8 12 7 10 
    0 38 50 44 45 35 28 36 40 30 39 
    1 77 105 86 80 71 69 68 75 67 77 
    2 116 163 126 129 101 97 111 101 99 117 
    3 159 205 196 174 142 140 153 159 168 130 
    4 216 223 212 183 178 170 177 169 181 196 
    5 159 188 150 183 183 157 174 176 172 155 
    6 119 111 129 125 143 124 144 147 129 149 
    7 59 53 52 60 74 52 51 91 76 77 
    8 22 14 18 14 32 36 28 38 35 41 
    9 6 5 3 7 15 8 8 16 9 16 

我想一定有什麼錯我的做法,所以我使用randomForestR試圖預測。但是,randomForest工作正常,給出了超過95%的準確性。這裏是randomForest預測的混淆矩陣:

> table(predRF, testDigits_Label) 
     testDigits_Label 
predRF 0 1 2 3 4 5 6 7 8 9 
    0 967 0 6 1 1 7 11 2 5 5 
    1 0 1123 0 0 0 1 3 7 0 5 
    2 1 2 974 9 3 1 3 25 4 2 
    3 0 3 5 963 0 21 0 0 9 10 
    4 0 0 12 0 940 1 4 2 7 15 
    5 4 0 2 16 0 832 6 0 11 4 
    6 6 5 5 0 7 11 929 0 3 2 
    7 1 1 14 7 2 2 0 979 4 6 
    8 1 1 12 7 5 11 2 1 917 10 
    9 0 0 2 7 24 5 0 12 14 950 
  • 問題1:那麼,爲什麼有這個數據集的這種奇怪的行爲neuralnet誰能請解釋一下嗎? (順便說一句,當我檢查時,neuralnetiris數據集正常工作)。

    • 編輯:我想我明白當使用neuralnet爲高斯樣的輸出分佈的原因。當使用neuralnet時,每個輸出類(這裏有10個類)只有一個輸出節點(或者它是神經元?)而不是節點。因此,在計算用於反向傳播的delta delta時,該算法計算'期望輸出'與'計算輸出'之間的差異,對於所有實例的聚合,對於輸出爲4或5. Error因此,權重將在反向傳播過程中進行調整,以使輸出誤差最小化。這可能是由neuralnet給出的高斯類型輸出的原因。
  • 問題2:同時,我想知道如何糾正的neuralnet此行爲,並獲得與randomForest結果看齊預測。

+0

見[此示例](http://www.parallelr.com/r-dnn-並行加速度/)DNN的本地R與MNIST數據集。 – Patric

回答

10

一些初步的意見,你可以像這樣多一點點有效加載數據:

# Read in data. 
trainDigits <- replicate(numberOfImages,c(matrix(readBin(to.read, integer(), n=(rowPixels*columnPixels), size=1, endian="big"),rowPixels,columnPixels)[,columnPixels:1])) 
trainDigits <- data.frame(t(trainDigits),row.names=NULL) 
trainDigits_Label<-replicate(numberOfImages,readBin(to.read_Label, integer(), n=1, size=1, endian="big")) 

你的第一個問題是,你還沒有指定一個多類預測neuralnet。你在做的是預測一個實數,從0到9.這就是爲什麼只有一個輸出,而不是10個預測。

如果您在?neuralnet看起來有一個多類預測的一個例子。您必須將每個班級放在一個單獨的變量中,並將其放在formula的左側。其他軟件包,如nnet,會自動檢測到factor併爲您執行此操作。您可以使用classInd功能的因素分成多個變量:

# appending the labels to the training data 
output <- class.ind(trainDigits_Label) 
colnames(output)<-paste0('out.',colnames(output)) 
output.names<-colnames(output) 
input.names<-colnames(trainDigits) 
trainDigits<-cbind(output,trainDigits) 

現在你可以粘貼在一起的公式:

# Considering only 500 rows 
trainsize=500 
# neuralnet:::varify.variables (sic) does not pass "data" when calling "terms". 
# If it did, you wouldn't have to construct the formula like this. 
library(neuralnet) 
myNnet <- neuralnet(formula = paste(paste(output.names,collapse='+'),'~', 
           paste(input.names,collapse='+')), 
        data = trainDigits[1:trainsize,], 
        hidden = 10, 
        algorithm='rprop+', 
        learningrate=0.01, 
        rep=1) 

修正仍沒有使神經網絡表現良好。要了解神經網絡做得有多糟糕,請看培訓數據。它應該是非常好的,因爲它已經看到了所有這些數據:

# Accuracy on training data 
res<-compute(myNnet,trainDigits[1:trainsize,input.names]) 
picks<-(0:9)[apply(res$net.result,1,which.max)] 
prop.table(table(trainDigits_Label[1:trainsize] == picks)) 
# FALSE TRUE 
# 0.376 0.624 

準確性62%在訓練數據上很糟糕。如你所料,它執行在略高於隨機對數據的其餘部分:

# Accuracy on test data 
res<-compute(myNnet,trainDigits[(trainsize+1):60000,input.names]) 
picks<-(0:9)[apply(res$net.result,1,which.max)] 
prop.table(table(trainDigits_Label[(trainsize+1):60000] == picks)) 
# FALSE   TRUE 
# 0.8612268908 0.1387731092 
# 14% accuracy 

隨機森林確實得非常好完全相同的數據。最近它變得如此受歡迎有一個很好的理由。

trainsize=500 
library(randomForest) 
myRF <- randomForest(trainDigits_Label~., 
        data=data.frame(trainDigits_Label=as.factor(trainDigits_Label), 
            trainDigits[input.names])[1:trainsize,], 
        ntree=100) 

# Train 
p <- as.numeric(as.character(predict(myRF))) 
prop.table(table(trainDigits_Label[1:trainsize]==p)) 
# Accuracy: 79%  

# Test 
p <- as.numeric(as.character(predict(myRF,trainDigits[(trainsize+1):60000,]))) 
prop.table(table(trainDigits_Label[(trainsize+1):60000]==p)) 
# Accuracy: 76% 

因此,對於第二個問題,我的反問題是:爲什麼你會期望神經網絡和隨機森林一樣好?他們可能有一些模糊的結構相似性,但擬合過程是完全不同的。我想你可以對神經網絡中的節點進行細化,並將它們與隨機森林模型中最重要的變量進行比較。但是,在這一點上,它更像是一個統計問題,而不是一個編程問題。

+0

非常感謝有關如何使用'neuralnet'進行多類分類的見解。是的,看到RF是如此簡單的算法如何在這項任務中表現如此出色,這是非常有趣的。關於你的反問題,我認爲神經網絡是一個非常強大的算法,它可以學習識別其他ML算法幾乎看不到的任何模式。即使在簡單模式的情況下,我也期望神經網絡能夠調整其權重,以便模仿其他可以檢測這些模式的算法的行爲。 – StrikeR

+2

我同意神經網絡*靈活*,這可以讓他們識別其他機器看不見的圖案。但這並不意味着他們更強大*;靈活性使他們更難以適應,更容易陷入局部最小值,並且更容易過度訓練數據(如本例所示)。 – nograpes

2

我要感謝在此討論中的所有以前的作者,因爲它是在網絡上使用包裝的上最具信息量的來源!這個討論對我來說很有幫助,我研究了神經網絡 R包。

問題2:它是可以使用neuralnet使用這些提示來預測數字標籤與更好的精度

  • 使用更多的神經元。隱藏層中的10個神經元是不夠的。至少應該使用30個神經元。
  • 在訓練前規範化和集中輸入。閱讀Max Kuhn的「Applied Predictive Modeling」,第3章。
  • 學習率參數僅用於「backprop」算法。對於其他算法(RPROP +,下垂,單反相機,...)使用learningrate.limitlearningrate.factor參數。
  • 使用更多的訓練數據。

有了30元NN會給:

[1] "NN to predict Labels." 
[1] "Confusion matrix for training set:" 
     Expected 
Predicted 0 1 2 3 4 5 6 7 8 9 
     0 96 0 0 0 0 0 0 0 0 0 
     1 1 116 0 0 0 0 0 0 0 0 
     2 0 0 99 0 0 0 0 0 0 0 
     3 0 0 0 93 0 0 0 0 0 0 
     4 0 0 0 0 104 1 0 0 0 0 
     5 0 0 0 0 1 91 0 0 0 0 
     6 0 0 0 0 0 0 94 0 0 0 
     7 0 0 0 0 0 0 0 117 0 0 
     8 0 0 0 0 0 0 0 0 87 0 
     9 0 0 0 0 0 0 0 0 0 100 
[1] "Model accuracy on training set is 99.7%" 

[1] "Confusion matrix for test set:" 
     Expected 
Predicted 0 1 2 3 4 5 6 7 8 9 
     0 337 380 257 160 87 85 67 25 45 30 
     1 134 169 97 77 60 64 70 32 41 16 
     2 121 179 112 109 59 79 69 31 55 27 
     3 119 136 138 114 99 102 96 67 66 55 
     4 87 102 91 135 106 102 104 86 87 54 
     5 84 75 95 114 114 91 142 104 82 66 
     6 48 41 80 98 106 116 144 138 104 92 
     7 22 28 55 82 103 78 100 146 104 124 
     8 16 9 42 56 80 60 65 123 93 125 
     9 12 16 65 65 168 115 101 276 297 420 
[1] "Model accuracy on test set is 17.32%" 

測試結果遠遠好,但混淆矩陣對角線具有什麼形狀意味着該模型在正確的方向努力。通過使用訓練集的大小和閾值,可以改進模型的準確性。我有30%的準確性。但是這個模型是有限的,並且可以用預測標籤類而不是標籤的模型獲得最好的結果。我能夠通過neuralne packge從shuch模型中獲得約80%的準確性。

隨着30層的神經元和1000訓練大小此NN給出:

[1] "NN to predict Label Classes." 
[1] "Confusion matrix for training set:" 
     Expected 
Predicted 0 1 2 3 4 5 6 7 8 9 
     0 95 0 0 0 0 0 0 0 0 1 
     1 0 113 0 0 0 0 0 1 0 0 
     2 0 0 98 0 0 2 0 1 0 0 
     3 1 2 0 93 1 0 0 1 0 0 
     4 0 0 0 0 104 0 0 0 0 1 
     5 1 1 0 0 0 90 0 1 0 0 
     6 0 0 0 0 0 0 93 0 0 0 
     7 0 0 0 0 0 0 0 112 0 0 
     8 0 0 0 0 0 0 0 0 86 0 
     9 0 0 1 0 0 0 1 1 1 98 
[1] "Model accuracy on training set is 98.2%" 
[1] "Confusion matrix for test set:" 
     Expected 
Predicted 0 1 2 3 4 5 6 7 8 9 
     0 791 0 32 28 11 62 12 25 20 22 
     1 1 1050 13 4 10 10 2 13 31 35 
     2 24 2 580 59 8 13 39 73 26 24 
     3 42 14 105 607 79 112 74 68 106 124 
     4 10 12 40 28 495 62 59 20 83 83 
     5 39 31 25 126 35 444 71 6 54 22 
     6 13 3 45 7 22 15 554 3 18 13 
     7 4 4 31 11 37 10 7 732 11 66 
     8 21 7 92 79 51 96 50 19 518 21 
     9 35 12 69 61 234 68 90 69 107 599 
[1] "Model accuracy on test set is 63.7%" 
#################### Importing the data into R ########## 
#path <- "path_to_data_folder/MNIST_database_of_handwritten_digits/" # Data can be downloaded from: http://yann.lecun.com/exdb/mnist/ 
path <- "../MNIST_DATA/UNZIP/" 
to.read = file(paste0(path, "train-images.idx3-ubyte"), "rb") 
to.read_Label = file(paste0(path, "train-labels.idx1-ubyte"), "rb") 
magicNumber <- readBin(to.read, integer(), n=1, endian="big") 
magicNumber_Label <- readBin(to.read_Label, integer(), n=1, endian="big") 
numberOfImages <- readBin(to.read, integer(), n=1, endian="big") 
numberOfImages_Label <- readBin(to.read_Label, integer(), n=1, endian="big") 
rowPixels <- readBin(to.read, integer(), n=1, endian="big") 
columnPixels <- readBin(to.read, integer(), n=1, endian="big") 

trainDigits <- NULL 

#Trick #1: read unsigned data 
trainDigits <- replicate(numberOfImages,c(matrix(readBin(to.read, integer(), n=(rowPixels*columnPixels), 
                 size=1, endian="big", signed=F), 
               rowPixels,columnPixels)[,columnPixels:1])) 
trainDigits <- data.frame(t(trainDigits),row.names=NULL) 
trainDigits_Label<-replicate(numberOfImages,readBin(to.read_Label, integer(), n=1, size=1, endian="big", signed=F)) 
close(to.read) 
close(to.read_Label) 

#################### Test Data #################### 

to.read_test = file(paste0(path, "t10k-images.idx3-ubyte"), "rb") 
to.read_Label_test = file(paste0(path, "t10k-labels.idx1-ubyte"), "rb") 
magicNumber <- readBin(to.read_test, integer(), n=1, endian="big") 
magicNumber_Label <- readBin(to.read_Label_test, integer(), n=1, endian="big") 
numberOfImages_test <- readBin(to.read_test, integer(), n=1, endian="big") 
numberOfImages_Label_test <- readBin(to.read_Label_test, integer(), n=1, endian="big") 
rowPixels <- readBin(to.read_test, integer(), n=1, endian="big") 
columnPixels <- readBin(to.read_test, integer(), n=1, endian="big") 

#read unsigned data 
testDigits <- replicate(numberOfImages_test,c(matrix(readBin(to.read, integer(), n=(rowPixels*columnPixels), 
                  size=1, endian="big", signed=F), 
                rowPixels,columnPixels)[,columnPixels:1])) 
testDigits <- data.frame(t(testDigits),row.names=NULL) 
testDigits_Label<-replicate(numberOfImages_test,readBin(to.read_Label_test, integer(), n=1, size=1, endian="big", signed=F)) 
close(to.read_test) 
close(to.read_Label_test) 

#################### Modelling #################### 

library(neuralnet) 

#add Label data to training data.frame 
trainData <- cbind(trainDigits_Label, trainDigits) 
names(trainData)[1] <- "Label" 

#Reduce training data for speedup 
trainSample <- 1000 #use more then 500 rows to get better model accuracy (slow!) 
trainData <- trainData[1:trainSample,] 
myThreshold <- trainSample/5000 #use smaller threshold to get better model accuracy (slow!) 

#Trick #2: normalize and center pixel data before trainig and testing 
normFactor <- max(trainData) #=255 
trainData[,-1] <- trainData[,-1]/normFactor #normalize inputs 
centerFactor <- mean(as.matrix(trainData[,-1])) #0.5 mean по столбцу? 
trainData[,-1] <- trainData[,-1]- centerFactor #center inputs 
testDigits <- testDigits/normFactor - centerFactor 

#Trick #3: use more neurons in the hidden layer to rise the model accuracy 
nHidden=30 

#train model which predicts Labels 
myFormula <- as.formula(paste0("Label ~ ", paste0("X",1:(ncol(trainDigits)), collapse="+"))) 
myNnet <- neuralnet(formula = myFormula, data = trainData, hidden = c(nHidden), 
        algorithm='rprop+', #learningrate=0.01, 
        learningrate.limit=list(min=c(1e-10), max=c(0.01)), #default values min/max = 1e-10/0.1 
        learningrate.factor=list(minus=c(0.5), plus=c(1.2)), #default values minus/plus = 0.5/1.2 
        err.fct="sse", #Using "sum square errors" function for Error 
        act.fct="tanh",#Using tangent hyperbolicus activation smoothing function 
        threshold=myThreshold, 
        lifesign="full", lifesign.step=500, 
        stepmax=3e05) 

#Trick #4: get rid of negative predictions. consider them to be equal to zero. 
#The same with too big predictions (>9) 
myNnet$net.result[[1]][myNnet$net.result[[1]]<0]<-0 
myNnet$net.result[[1]][myNnet$net.result[[1]]>9]<-9 

#################### 'neuralnet' Predictions #################### 

predictOut <- compute(myNnet, testDigits) 
predictOut$net.result[predictOut$net.result<0] <- 0 
predictOut$net.result[predictOut$net.result>9] <- 9 

#################### Result analysis #################### 

#Model accuracy on training data 
confTrain <- table(Predicted=round(myNnet$net.result[[1]]), Expected=(trainData[,"Label"])) 
print("NN to predict Labels.") 
print("Confusion matrix for training set:") 
print (confTrain) 
print(paste0("Model accuracy on training set is ", round(sum(diag(confTrain))/sum(confTrain)*100,4), "%")) 

#Model accuracy on test data 
confTest <- table(Predicted=round(predictOut$net.result), Expected=testDigits_Label) 
print("Confusion matrix for test set:") 
print (confTest) 
print(paste0("Model accuracy on test set is ", round(sum(diag(confTest))/sum(confTest)*100,4), "%")) 



######################################################################################### 
#Trick #5: Predict digit Class instead of predicting digit Label 
#Replace each Label with a vector of 10 bits "Label classes" 
library (nnet) 

# appending the Label classes to the training data 
output <- class.ind(trainData[,"Label"]) 
colnames(output)<-paste0('out.',colnames(output)) 
output.names<-colnames(output) 
input.names<-colnames(trainData[,-1]) 
trainData <-cbind(output,trainData) 

#train model which predicts Label classes 
myFormula <- as.formula(paste0(paste0(output.names,collapse='+')," ~ ", 
           paste0(input.names, collapse="+"))) 
myNnetClass <- neuralnet(formula = myFormula, data = trainData, hidden = c(nHidden), 
        algorithm='sag', #learningrate=0.01, 
        learningrate.limit=list(min=c(1e-10), max=c(0.01)), #default values min/max = 1e-10/0.1 
        learningrate.factor=list(minus=c(0.5), plus=c(1.2)), #default values minus/plus = 0.5/1.2 
        err.fct="sse", #Using "sum square errors" function for Error 
        act.fct="tanh",#Using tangent hyperbolicus activation smoothing function 
        threshold=myThreshold, 
        lifesign="full", lifesign.step=500, 
        stepmax=3e05) 


# Convert binary output to categorical output (labels) 
nnres=myNnetClass$net.result[[1]] 
myNnetClass$net.result[[1]] <- (0:9)[apply(myNnetClass$net.result[[1]],1,which.max)] 


#################### 'neuralnet' Predictions #################### 

predictOutClass <- compute(myNnetClass, testDigits) 
colnames(predictOutClass$net.result) <- paste0("Cl", 0:9) 
predictedLabel <- (0:9)[apply(predictOutClass$net.result, 1, which.max)] 

#################### Result analysis #################### 

#Model accuracy on training data 
confTrain <- table(Predicted=myNnetClass$net.result[[1]], Expected=trainData[,"Label"]) 
print("NN to predict Label Classes.") 
print("Confusion matrix for training set:") 
print (confTrain) 
print(paste0("Model accuracy on training set is ", round(sum(diag(confTrain))/sum(confTrain)*100,4), "%")) 

#Model accuracy on test data 
confTest <- table(Predicted=predictedLabel, Expected=testDigits_Label) 
print("Confusion matrix for test set:") 
print (confTest) 
print(paste0("Model accuracy on test set is ", round(sum(diag(confTest))/sum(confTest)*100,4), "%")) 
+0

非常感謝ASH的這些見解。我目前從這個問題上移開,但想嘗試一下你的建議。 – StrikeR