2016-07-13 214 views
0

我正努力從張量流中恢復NN的值。我試圖按照網上的例子,這裏是我的代碼:從NN恢復TensorFlow不起作用

import tensorflow as tf 
import numpy as np 
import math, random 
import matplotlib.pyplot as plt 


np.random.seed(1000) # for repro 
function_to_learn = lambda x: np.sin(x) + 0.1*np.random.randn(*x.shape) 

NUM_HIDDEN_NODES = 2 
NUM_EXAMPLES = 1000 
TRAIN_SPLIT = .8 
MINI_BATCH_SIZE = 100 
NUM_EPOCHS = 500 


all_x = np.float32(np.random.uniform(-2*math.pi, 2*math.pi, (1, NUM_EXAMPLES))).T 
np.random.shuffle(all_x) 
train_size = int(NUM_EXAMPLES*TRAIN_SPLIT) 
trainx = all_x[:train_size] 
validx = all_x[train_size:] 
trainy = function_to_learn(trainx) 
validy = function_to_learn(validx) 



plt.figure() 
plt.scatter(trainx, trainy, c='green', label='train') 
plt.scatter(validx, validy, c='red', label='validation') 
plt.legend() 


X = tf.placeholder(tf.float32, [None, 1], name="X") 
Y = tf.placeholder(tf.float32, [None, 1], name="Y") 


w_h = tf.Variable(tf.zeros([1, NUM_HIDDEN_NODES],name="w_h")) 
b_h = tf.Variable(tf.zeros([1, NUM_HIDDEN_NODES],name="b_h")) 
w_o = tf.Variable(tf.zeros([NUM_HIDDEN_NODES,1],name="w_o")) 
b_o = tf.Variable(tf.zeros([1, 1],name="b_o")) 



def init_weights(shape, init_method='xavier', xavier_params = (None, None)): 
    if init_method == 'zeros': 
     return tf.Variable(tf.zeros(shape, dtype=tf.float32)) 
    elif init_method == 'uniform': 
     return tf.Variable(tf.random_normal(shape, stddev=0.01, dtype=tf.float32)) 



def model(X, num_hidden = NUM_HIDDEN_NODES): 
    w_h = init_weights([1, num_hidden], 'uniform') 
    b_h = init_weights([1, num_hidden], 'zeros') 
    h = tf.nn.sigmoid(tf.matmul(X, w_h) + b_h) 

    w_o = init_weights([num_hidden, 1], 'xavier', xavier_params=(num_hidden, 1)) 
    b_o = init_weights([1, 1], 'zeros') 
    return tf.matmul(h, w_o) + b_o 



yhat = model(X, NUM_HIDDEN_NODES) 

train_op = tf.train.AdamOptimizer().minimize(tf.nn.l2_loss(yhat - Y)) 


plt.figure() 


with tf.Session() as sess: 
    sess.run(tf.initialize_all_variables()) 

    for v in tf.all_variables(): 
     print v.name 



saver = tf.train.Saver() 

errors = [] 

with tf.Session() as sess: 
    sess.run(tf.initialize_all_variables()) 
    for i in range(NUM_EPOCHS): 
     for start, end in zip(range(0, len(trainx), MINI_BATCH_SIZE), range(MINI_BATCH_SIZE, len(trainx), MINI_BATCH_SIZE)): 
      sess.run(train_op, feed_dict={X: trainx[start:end], Y: trainy[start:end]}) 

     mse = sess.run(tf.nn.l2_loss(yhat - validy), feed_dict={X:validx}) 
     errors.append(mse) 
     if i%100 == 0: 
      print "epoch %d, validation MSE %g" % (i, mse) 
      print sess.run(w_h) 
      saver.save(sess,"/Python/tensorflow/res/save_net.ckpt", global_step = i) 



    print " ******* AFTR *******" 
    for v in tf.all_variables(): 
     print v.name 
    plt.plot(errors) 
    plt.xlabel('#epochs') 
    plt.ylabel('MSE') 

*******得到恢復價值觀,我想:**

import tensorflow as tf 
import numpy as np 
import math, random 
import matplotlib.pyplot as plt 


NUM_HIDDEN_NODES = 2 



#SECOND PART TO GET THE STORED VALUES 

w_h = tf.Variable(np.arange(NUM_HIDDEN_NODES).reshape(1, NUM_HIDDEN_NODES), dtype=tf.float32, name='w_h') 
b_h = tf.Variable(np.arange(NUM_HIDDEN_NODES).reshape(1, NUM_HIDDEN_NODES), dtype=tf.float32, name='b_h') 

w_o = tf.Variable(np.arange(NUM_HIDDEN_NODES).reshape(NUM_HIDDEN_NODES, 1), dtype=tf.float32, name='w_o') 
b_o = tf.Variable(np.arange(1).reshape(1, 1), dtype=tf.float32, name='b_o') 



saver = tf.train.Saver() 
with tf.Session() as sess: 
    ckpt = tf.train.get_checkpoint_state("/Python/tensorflow/res/") 
    if ckpt and ckpt.model_checkpoint_path: 
     # Restores from checkpoint 
     saver.restore(sess, "/Python/tensorflow/res/save_net.ckpt-400") 
     print "Model loaded" 
    else: 
     print "No checkpoint file found" 

    print("weights:", sess.run(w_h)) 
    print("biases:", sess.run(b_h)) 

您的幫助非常感謝,我幾乎放棄了這一點。

非常感謝再次

回答

2

看來要恢復你的變量從從現有代碼的當前變量/形狀不同的檢查點文件。

節省:

w_h = tf.Variable(tf.zeros([1, 5],name="w_h")) 
b_h = tf.Variable(tf.zeros([1, 5],name="b_h")) 
w_o = tf.Variable(tf.zeros([5,1],name="w_o")) 
b_o = tf.Variable(tf.zeros([1, 1],name="b_o")) 

(如果從上面的定義常量代替它)恢復:

w_h = tf.Variable(np.arange(10).reshape(1, 10), dtype=tf.float32, name='w_h') 
b_h = tf.Variable(np.arange(10).reshape(1, 10), dtype=tf.float32, name='b_h') 

w_o = tf.Variable(np.arange(10).reshape(10, 1), dtype=tf.float32, name='w_o') 
b_o = tf.Variable(np.arange(1).reshape(1, 1), dtype=tf.float32, name='b_o') 

爲了防止這些類型的問題,嘗試使用功能的訓練和推理因此所有代碼將相同的變量和常量。

0

當您撥打init_weights時,您將創建兩組權重,一次全局,第二次創建。第二組變量是獲得優化的變量,但兩組都保存。

在您的eval代碼中,您正在創建一組變量,因此您的恢復只會恢復第一組,該組在初始化後尚未修改。

解決方案是將模型創建代碼分解出來,以便在培訓期間和eval期間創建完全相同的圖形,或者使用meta_graph這將在還原期間重新創建圖形結構。