0
我已經訓練了CNN神經網絡python與800個樣本,並在60測試。 預測它給我的相同的結果。keras CNN相同的輸出
#main file - run this to train the network
import numpy as np
from keras.datasets import cifar10
from datasetFetch import DataFetch
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th')
import simplejson
from matplotlib import pyplot
from scipy.misc import toimage
# load data
#(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# create a grid of 3x3 images
#for i in range(0, 9):
# pyplot.subplot(3,3,1 + i)
# pyplot.imshow(toimage(X_train[i]))
# show the plot
#pyplot.show()
#init data
CONST_PHOTOS = 400 # number of photos of each type
y_train = []
#train data
data = DataFetch('orange',CONST_PHOTOS)
data1 = data.openPictures()
data = DataFetch('apple', CONST_PHOTOS)
data.removeErrorImages()
data2 = data.openPictures()
#test data
tdata = DataFetch('test-orange',30)
tdata1 = tdata.openPictures()
tdata = DataFetch('test-apple',30)
tdata2 = tdata.openPictures()
#add togheter data
X_train = data.connectData(data1,data2,'train')
y_train = data.getYtrain('train')
X_test = tdata.connectData(tdata1,tdata2,'test')
y_test = tdata.getYtrain('test')
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
# normalize inputs from 0-255 to 0.0-1.0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train/255.0
X_test = X_test/255.0
#one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_train.shape[1] #number of categories
# Create the model
model = Sequential()
model.add(Conv2D(224, (11, 11), input_shape=(224, 224, 3), activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(55, (5, 5), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), data_format="channels_last"))
model.add(Conv2D(13, (3, 3), activation='relu', padding='same'))
model.add(Dropout(0.5))
model.add(Conv2D(13, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), data_format="channels_last"))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
epochs = 100
lrate = 0.01
decay = lrate/epochs
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
#print(model.summary())
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=10)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
#and then we save
# serialize model to JSON
model_json = model.to_json()
with open("Data/model.json", "w") as json_file:
json_file.write(simplejson.dumps(simplejson.loads(model_json), indent=4))
# serialize weights to HDF5
model.save_weights("Data/model.h5")
print("Saved model to disk")
我用keras和tensorflow。圖像是224x224像素每個分成2個類別。我對神經網絡瞭解不多,這是我第一次嘗試做這項大型工作。我聽說它可能過於合適,或者我需要一個更重要的圖層,或者我的批量/時代/學習率是錯誤的。
任何幫助表示讚賞!
編輯1:種子如何影響網絡的訓練? 訓練結束後,精確度正好是50%,並且使用一個單獨的.py文件,它只加載模型並在其上使用預測函數返回所用圖像的確切輸出百分比。我嘗試了用於訓練和外部的圖像。 我添加了dataFetch代碼。
#preparing the photos to be 224x224 and getting them from urls saved in txt files
from PIL import Image
import requests
from io import BytesIO
import numpy as np
import socket
import random
from scipy import misc
from PIL import ImageChops
import math, operator
from functools import reduce
import glob
import os
import signal
compare = Image.open('/home/mihai/PycharmProjects/neuralnet/compare.jpg')
compare1 = Image.open('/home/mihai/PycharmProjects/neuralnet/compare1.jpg')
compare2 = Image.open('/home/mihai/PycharmProjects/neuralnet/compare2.jpg')
compare3 = Image.open('/home/mihai/PycharmProjects/neuralnet/compare3.jpg')
compare4 = Image.open('/home/mihai/PycharmProjects/neuralnet/compare4.jpg')
def rmsdiff(im1, im2):
"Calculate the root-mean-square difference between two images"
h = ImageChops.difference(im1, im2).histogram()
# calculate rms
return math.sqrt(reduce(operator.add, map(lambda h, i: h*(i**2), h, range(256)))/(float(im1.size[0]) * im1.size[1]))
class DataFetch:
chosenFile = ''
maxNumber = 0
y_train = []
y_test = []
def __init__(self, choice, number):
print('Images with '+choice+'s are being prepared')
self.chosenFile = choice
self.maxNumber = number
def getPictures(self):
imgArr = np.zeros((self.maxNumber, 224, 224, 3), dtype='uint8')
count = 0
class timeoutError(Exception):
signal.alarm(0)
def handler(signum, frame):
raise timeoutError
with open(self.chosenFile, "r") as ins:
for line in ins:
if count < self.maxNumber:
signal.signal(signal.SIGALRM, handler)
signal.alarm(3)
try:
try:
r = requests.get(line)
try:
img = Image.open(BytesIO(r.content))
ok = 0
try:
if rmsdiff(compare, img) > 1.3 and rmsdiff(compare1, img) > 1.3 and rmsdiff(compare2, img) > 1.3 and rmsdiff(compare3, img) > 1.3 and rmsdiff(compare4, img) > 1.3:
ok = 1
else:
print('Image removed from website')
except ValueError:
ok = 1
if ok == 1:
img = img.resize((224, 224))
img = img.convert('RGB')
img.save('/home/mihai/PycharmProjects/neuralnet/images/'+self.chosenFile+'/'+str(count)+".jpg", 'JPEG')
imgArr[count, :, :, :] = img
count = count + 1
print(count)
except OSError:
print('Image not Available')
except socket.error:
print('URL not available')
except timeoutError:
print("URL not available")
return imgArr
def openPictures(self):
cdir = os.getcwd()
imgArr = np.zeros((self.maxNumber, 224, 224, 3), dtype='uint8')
count = 0
for filename in glob.glob(cdir+'/images/'+self.chosenFile+'/*.jpg'):
if count < self.maxNumber:
img = Image.open(filename)
imgArr[count, :, :, :] = img
count = count + 1
return imgArr
def removeErrorImages(self):
cdir = os.getcwd()
for filename in glob.glob(cdir+'/images/'+self.chosenFile+'/*.jpg'):
img = Image.open(filename)
try:
if rmsdiff(compare, img) < 1.3 or rmsdiff(compare1, img) < 1.3 or rmsdiff(compare2, img) < 1.3 or rmsdiff(compare3, img) < 1.3 or rmsdiff(compare4, img) < 1.3:
os.remove(cdir+'/images/'+self.chosenFile+'/'+filename+'.jpg')
except ValueError:
pass
def getYtrain(self,outParam):
if outParam == 'train':
self.y_train = np.reshape(self.y_train, (len(self.y_train), 1))
return self.y_train
else:
self.y_test = np.reshape(self.y_test, (len(self.y_test), 1))
return self.y_test
def connectData(self, data1, data2, outParam):
d1c = 0
d2c = 0
outList = []
X_train = np.zeros((2 * self.maxNumber, 224, 224, 3), dtype='uint8')
for i in range(2 * self.maxNumber):
if d1c < self.maxNumber and d2c <self.maxNumber:
if random.random() <= 0.5:
X_train[d1c + d2c, :, :, :] = data1[d1c, :, :, :]
d1c = d1c + 1
outList.append(0)
else:
X_train[d1c + d2c, :, :, :] = data2[d2c, :, :, :]
d2c = d2c + 1
outList.append(1)
else:
if d1c < self.maxNumber:
X_train[d1c + d2c, :, :, :] = data1[d1c, :, :, :]
d1c = d1c + 1
outList.append(0)
else:
if d2c < self.maxNumber:
X_train[d1c + d2c, :, :, :] = data2[d2c, :, :, :]
d2c = d2c + 1
outList.append(1)
if outParam == 'train':
self.y_train = outList
else:
if outParam == 'test':
self.y_test = outList
return X_train
代碼預測:
#run this to test a sample
from keras.utils import np_utils
from keras.models import model_from_json
from keras.optimizers import SGD
from datasetFetch import DataFetch
# load json and create model
json_file = open('Data/model2.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("Data/model2.h5")
print("Loaded model from disk")
epochs = 100
lrate = 0.01
decay = lrate/epochs
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
loaded_model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#prepare X_test
tdata = DataFetch('test-orange',int(3))
tdata1 = tdata.openPictures()
tdata = DataFetch('test-apple',int(3))
tdata2 = tdata.openPictures()
X_test = tdata.connectData(tdata1,tdata2,'test')
y_test = tdata.getYtrain('test')
X_test = X_test.astype('float32')
X_test = X_test/255.0
y_test = np_utils.to_categorical(y_test)
print('Number of samples to be tested: '+str(X_test.shape[0]))
scores = loaded_model.evaluate(X_test, y_test, verbose=0)
print(scores[1]*100)
score = loaded_model.predict(X_test,batch_size=6, verbose=1)
print(score) #prints percentages
準確性是相同的,因爲你設置了一個固定的隨機種子 –
你是什麼意思的「它給出了相同的結果」?無論您傳遞給它什麼樣的輸入,它都會做出相同的預測,或者您將相同的輸入傳遞給它並始終獲得50%的準確性? – gionni
啊對不起,我以爲你在同一個腳本中使用了model.predict。訓練階段模型的準確性/損失是什麼?以及在使用模型預測之前如何重新加載數據和模型? –