所以我決定再進一步在谷歌的Tensorflow的MNIST tutorial,並嘗試建立一個基本的人臉識別系統。人臉識別系統在Python Tensorflow
目錄:
阿馬爾 - >包含了所有的目標圖像
測試 - >包含了所有的測試圖片與底片
列車 - >包含了所有的訓練圖像
有每個目錄中有60個圖像文件。我使用目錄名稱作爲圖像標籤。
在這一點上,我能提取圖像亮度,一切都做,但我收到以下錯誤:
I tensorflow/core/common_runtime/local_device.cc:40] Local device intra op parallelism threads: 4
I tensorflow/core/common_runtime/direct_session.cc:58] Direct session inter op parallelism threads: 4
[[0, 0, 1], [0, 0, 1]]
Traceback (most recent call last):
File "face.py", line 82, in <module>
model()
File "face.py", line 74, in model
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 357, in run
np_val = np.array(subfeed_val, dtype=subfeed_t.dtype.as_numpy_dtype)
ValueError: setting an array element with a sequence.
以下是代碼:
def preprocessImages(dir): # Grescaling the images
from os import listdir
from os.path import isfile, join
import Image
path = dir
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
for files in onlyfiles:
img = Image.open(str(path+files)).convert('L')
img.save(str(path+files))
def extractImages(path): # Extracting image pixel intensities in an array
images = []
from os import listdir
from os.path import isfile, join
import Image
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
for image in onlyfiles:
img = Image.open(path+image)
pixVal = list(img.getdata())
images.append(pixVal)
return images
def extractLabels(target): # Extracting labels or directories accordingly
res = []
path = './'
from os import listdir
from os.path import isfile, join
import Image
onlyfiles = [f for f in listdir(path) if not isfile(join(path, f))]
for i in onlyfiles:
if i == target:
res.append(1)
else:
res.append(0)
return res
dirs = ['train','test','amar'] # put some directories here
labels = []
images = []
for di in dirs:
labels.append(extractLabels(di))
images.append(extractImages(('./'+di+'/')))
def batch(no): # Function to select a batch of elements from both the arrays
import random
import numpy as np
global labels
global images
lab = []
img = []
for i in range(no):
lab.append(random.choice(labels))
for i in range(no):
img.append(random.choice(images))
return img,lab
def model():
import tensorflow as tf
x = tf.placeholder(tf.float32, [None,409600]) # The images of 240x240 = 409600 pixels
W = tf.Variable(tf.zeros([409600,3])) # The weights for each image
b = tf.Variable(tf.zeros([3])) # The labels of the images containing the real numbers
y = tf.nn.softmax(tf.matmul(x, W) + b) # The predicted y viz. y = softmax(W*x + b)
y_ = tf.placeholder(tf.float32, [None, 3]) # The real y that will be checked against the prediction
cross_entropy = -tf.reduce_sum(y_*tf.log(y)) # The entropy error b/w the y and y_
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) # The method to optimize error at each training step
init = tf.initialize_all_variables() # initializing all variables
sess = tf.Session()
sess.run(init)
# Training for our model
for i in range(1000):
batch_xs, batch_ys = batch(2)
print batch_ys
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) # Checking for the prediction
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # Accuracy of the prediction
accuracy = sess.run(accuracy, feed_dict={x: extractImages('./test/'), y_: extractLabels('test')})*100 # Normalizing it in terms of percentage
print "The accuracy of the model was",accuracy," %"
print type(mnist)
model()
謝謝,我會努力記住與執行。 –