1

我正在做多變量線性迴歸,sklearn的波士頓房屋數據集,506x13矩陣。我打算使用所有數據進行訓練,然後「插入」一個隨機數據,例如boston_dataset.data[39],然後查看損失情況。但是當我打印結果時,我得到的全部是NaN。這是我的代碼。Tensorflow NaN的多元線性迴歸結果

import tensorflow as tf 
import numpy as np 
import matplotlib.pyplot as plt 
from sklearn.datasets import load_boston 

np.set_printoptions(suppress=True) 

boston = load_boston() 

m = boston.data.shape[0] - 1 

bt_unfixed = np.transpose(boston.data) 
bt = np.insert(bt_unfixed, 0, 1) 

Y = tf.placeholder(tf.float64, name='Y___') 
X = tf.placeholder(tf.float64, [1, 13], name='X_____') 
#print X.shape 
W = tf.Variable(tf.zeros([13, 1]), name='weights') 
b = tf.Variable(0.5, name='bias') 

hypothesis = tf.add(tf.matmul(X, tf.cast(W, tf.float64)), tf.cast(b, tf.float64)) 

loss = tf.reduce_sum(tf.square(hypothesis - Y))/(2 * m) 

optimizer = tf.train.GradientDescentOptimizer(0.01) 

train_op = optimizer.minimize(loss) 

with tf.Session() as sess: 
    sess.run(tf.initialize_all_variables()) 
    for i in range(0, 500): 
     for (x, y) in zip(boston.data, boston.target): 
      sess.run(train_op, feed_dict={X:x.reshape(1, 13), Y:y}) 
     if (i + 1)%50 == 0: 
      print "Ran " + str(i) + "times\nW=" +str(sess.run(W)) + "\nb=" +str(sess.run(b)) 

    print "Done!\n" 
    print "Running test...\n" 
    t = sess.run(cost, feed_dict={X:boston.data[504], Y:boston.target.data[504]}) 
    print "loss =" + str(t) + "Real value" + str(boston.target.data[504]) + "Pred " +str(sess.run(hypothesis, feed_dict={X:boston.data[504]})) 

謝謝!也請隨意添加任何建議

回答

2

你好像沒有做任何數據預處理到波士頓數據這使得損失和假設值去INF(NAN)。所以我規範化了數據並且工作。這是我的代碼。

import tensorflow as tf 
import numpy as np 
import matplotlib.pyplot as plt 
from sklearn.datasets import load_boston 


boston = load_boston() 

data = boston.data 
label = boston.target 

# normalized data 
data -= np.mean(data, axis=0) 
data /= np.std(data, axis=0) 

M = boston.data.shape[0] 


Y = tf.placeholder(tf.float32, name='Y') 
X = tf.placeholder(tf.float32, [1, 13], name='X') 

W = tf.Variable(tf.random_normal([13, 1]), name='weights') 
b = tf.Variable(tf.random_normal([1]), name='bias') 

hypothesis = tf.add(tf.matmul(X, W), b) 

loss = tf.reduce_sum(tf.square(hypothesis - Y))/(2. * (M - 1)) 

optimizer = tf.train.GradientDescentOptimizer(0.01) 

train_op = optimizer.minimize(loss) 

with tf.Session() as sess: 
    sess.run(tf.initialize_all_variables()) 

    for i in range(0, 500): 
     for l in xrange(M): 
      _, loss_val, hypo = sess.run(
       [train_op, loss, hypothesis], 
       feed_dict={X: data[l, :].reshape([1, 13]), 
          Y: label[l]}) 
     if (i + 1) % 50 == 0: 
      print "Ran " + str(i) + "times\nW=" + \ 
       str(sess.run(W)) + "\nb=" + str(sess.run(b)) 

    print "Done!\n" 
    print "Running test...\n" 
    t = sess.run(
     loss, feed_dict={X: data[50].reshape([1, 13]), 
         Y: label[50]}) 
    print "loss =" + str(t) 
    print "Real value Y: " + str(label[50]) 
    print "Pred Y: " + str(sess.run(hypothesis, 
            feed_dict={X: data[50].reshape([1, 13])}))