我編輯了TensorFlow樣本MNIST data set,它在我的電腦上達到了〜90%的準確度,並嘗試使用CIFAR-10 dataset上的類似代碼。然而,準確度只有0-15%,從未達到20%。Tensorflow - CIFAR-10數據集的振盪學習率
import six.moves.cPickle as cPickle
from pprint import pprint
def unpickle():
dict=[]
fo = open(r'C:\train\cifar-10-batches-py\data_batch_1', 'rb')
dict.append(cPickle.load(fo, encoding='latin1'))
fo.close()
return dict
def testpickle():
afo = open(r'C:\train\cifar-10-batches-py\test_batch', 'rb')
adict = cPickle.load(afo, encoding='latin1')
afo.close()
return adict
dt=unpickle()
import tensorflow as tf
import numpy as np
datadt=np.empty([5,10000,1024])
####to arrange input data properly####
for p in range(len(dt)):
print(p)
for i in range(len(dt[p]["labels"])):
a=dt[p]["labels"][i]
dt[p]["labels"][i]=[0,0,0,0,0,0,0,0,0,0]
dt[p]["labels"][i][a]=1
datadt[p][i]=(dt[p]["data"][i].tolist()[:1024])
tdt=testpickle()
###arrange test data properly###
testdt=np.empty([10000,1024])
for i in range(len(tdt["labels"])):
a=tdt["labels"][i]
tdt["labels"][i]=[0,0,0,0,0,0,0,0,0,0]
tdt["labels"][i][a]=1
testdt[i]=(tdt["data"][i].tolist()[:1024])
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 1024])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
W_conv1=weight_variable([5,5,1,8])
b_conv1=bias_variable([8])
x_image=tf.reshape(x,[-1,32,32,1])
h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
h_pool1=max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 8, 16])
b_conv2 = bias_variable([16])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([8 * 8 * 16, 32])
b_fc1 = bias_variable([32])
h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*16])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([32, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(0.5).minimize(cross_entropy)
sess.run(tf.global_variables_initializer())
tshaped_x=testdt
tshaped_y=tdt["labels"]
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
k=100
import random
for i in range(len(dt)):
for u in range(99):
shaped_x=datadt[i][(u*k):(u*k+k)]#np.reshape(dt["data"][i], (-1,3072))
shaped_y=dt[i]["labels"][(u*k):(u*k+k)]#np.reshape(dt["labels"][i], (-1,10))
train_step.run(feed_dict={x: shaped_x, y_:shaped_y,keep_prob:0.5})
r=random.randint(0,9000)
print(accuracy.eval(feed_dict={x:tshaped_x[r:r+50], y_:tshaped_y[r:r+50],keep_prob:1.0}))
代碼的神經網絡的部分是非常相似的樣品,然而結果:
0.08
0.06
0.12
0.2
0.14
0.14
0.1
0.12
0.1
0.1
0.04
0.14
0.14
(爲了方便起見我只是用各畫面數據RGB的紅色的數據作爲輸入 - 原來3072 INT表示R,G,B,並且我使用了前1024個整數,如dt[p]["data"][i].tolist()[:1024]
所示)
我一直在尋找不同網站的答案,但是很失敗。作爲Tensorflow的初學者,抱歉太天真了。感謝您的慷慨幫助!
P.S.無論我如何將AdamOptimizer的學習率從0.0001改爲999,結果都是相同的(非常相似)
我真的不明白你的「P.S.」關於學習率的說明。你真的是指999或0.999?如果你的意思是999,我會建議嘗試一個小得多的學習率,即0.005。 – ml4294
@ ml4294是的,從0.00001到0到999,結果可怕的是相同的 – user000001