你好我正在嘗試使用張量流預處理Logistic迴歸(抱歉,如果我的代碼看起來愚蠢),並且我已經在numpy中寫入了一次成本函數,並且一次在張量流中,我得到了不同的結果對於相同的起始重量,有人能幫助我嗎?的成本函數Tensorflow邏輯迴歸不同的輸出
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
DataSize=1000
data, y = make_blobs(n_samples=1000, centers=2, n_features=2,random_state=1,center_box=(-5.0,5.0))
plt.scatter(data[:,0],data[:,1])
plt.show(block=False)
x=np.linspace(-1,5,1000)
b=np.ones([1,1])
W=np.ones([2,1])
asd=W*x.T+b
pred=np.dot(data,W)+b
plt.plot(x,asd[0])
plt.show(block=False)
result=((1))/(1+np.exp(-pred))
s=np.log(result)
J=-(y.T.dot(s)+(1-y).T.dot(1-s))/1000
print ("cost in numpy",J)
#
with tf.variable_scope("scopi",reuse=True):
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
b = tf.Variable(tf.ones((1,1)),name="bias")
W = tf.Variable(tf.ones((1,2)),name="weights")
ypred=W*X+b
hx=tf.reduce_sum(tf.sigmoid(ypred),reduction_indices=1)
#cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
J=-tf.reduce_sum(tf.mul(tf.transpose(Y),hx)+tf.mul(tf.transpose(1-Y),(1-hx)))/1000
opti=tf.train.AdamOptimizer(0.1).minimize(J)
with tf.Session() as session:
session.run(tf.initialize_all_variables())
h = session.run(J, feed_dict={X: data, Y: y})
print ("cost in tensorflow", h)
# epoch = 100
# for i in range(epoch):
# for j in range(DataSize):
# session.run(opti, feed_dict={X: data[j], Y: y[j]})
#
#
#
#
#
# if i%10==0:
#
# a=session.run(J,feed_dict={X:data,Y:y})
#
# print ("cost ", a)
成本樣品:
( '在numpy的成本',陣列([2.37780175]))( '成本tensorflow',0.073667422)
你能告訴我們樣本輸出嗎? – martianwars
('cost in numpy',array([2.37780175])) ('tensorflow'中的成本,0.073667422) – DavidOooO
您可以將其添加到問題中嗎? – martianwars