2017-09-19 154 views
0

我從tensorflow github的神經網絡簡單的例子,並試圖將其分成兩部分。第一部分是培訓+測試,第二部分是分離出需要恢復的測試部分。恢復似乎有效,但無法找到預測功能。張量流量估算模板基於模型的保存和恢復

這裏是第一部分:

from __future__ import print_function 

from tensorflow.python.saved_model import builder as saved_model_builder 

# Import MNIST data 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 

import tensorflow as tf 
import matplotlib 
import matplotlib.pyplot as plt 
import numpy as np 
import shutil 

matplotlib.use('TkAgg') 

# Parameters 
learning_rate = 0.1 
num_steps = 1000 
batch_size = 128 
display_step = 100 

# Network Parameters 
n_hidden_1 = 256 # 1st layer number of neurons 
n_hidden_2 = 256 # 2nd layer number of neurons 
num_input = 784 # MNIST data input (img shape: 28*28) 
num_classes = 10 # MNIST total classes (0-9 digits) 

#init = tf.initialize_all_variables() 

sess = tf.Session() 

# Define the input function for training 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': mnist.train.images}, y=mnist.train.labels, 
    batch_size=batch_size, num_epochs=None, shuffle=True) 

# Define the neural network 
def neural_net(x_dict): 
    # TF Estimator input is a dict, in case of multiple inputs 
    x = x_dict['images'] 
    # Hidden fully connected layer with 256 neurons 
    layer_1 = tf.layers.dense(x, n_hidden_1, name="layer_1") 
    # Hidden fully connected layer with 256 neurons 
    layer_2 = tf.layers.dense(layer_1, n_hidden_2, name="layer_2") 
    # Output fully connected layer with a neuron for each class 
    out_layer = tf.layers.dense(layer_2, num_classes, name="out_layer") 
    return out_layer 

# Define the model function (following TF Estimator Template) 
def model_fn(features, labels, mode): 
    # Build the neural network 
    logits = neural_net(features) 

    # Predictions 
    pred_classes = tf.argmax(logits, axis=1) 
    pred_probas = tf.nn.softmax(logits) 

    # If prediction mode, early return 
    if mode == tf.estimator.ModeKeys.PREDICT: 
     return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) 

    # Define loss and optimizer 
    loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
     logits=logits, labels=tf.cast(labels, dtype=tf.int32))) 
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 
    train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step()) 

    # Evaluate the accuracy of the model 
    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) 

    # TF Estimators requires to return a EstimatorSpec, that specify 
    # the different ops for training, evaluating, ... 
    estim_specs = tf.estimator.EstimatorSpec(
     mode=mode, 
     predictions=pred_classes, 
     loss=loss_op, 
     train_op=train_op, 
     eval_metric_ops={'accuracy': acc_op}) 

    return estim_specs 

# Build the Estimator 
model = tf.estimator.Estimator(model_fn) 

# Train the Model 
model.train(input_fn, steps=num_steps) 

# Evaluate the Model 
# Define the input function for evaluating 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': mnist.test.images}, y=mnist.test.labels, 
    batch_size=batch_size, shuffle=False) 
# Use the Estimator 'evaluate' method 
model.evaluate(input_fn) 

#model.export_savedmodel(".", input_fn) 

init = tf.global_variables_initializer() 
sess.run(init) 

tf.add_to_collection("nn_model", model) 

# Add ops to save and restore all the variables. 
#saver = tf.train.Saver() 

#save_path = saver.save(sess, "model/model.ckpt") 

try: 
    shutil.rmtree("model") 
except: 
    pass 

builder = saved_model_builder.SavedModelBuilder("model") 
builder.add_meta_graph_and_variables(sess, ["nn"]) 
builder.save() 

print("Model saved in file") 

# Predict single images 
n_images = 4 
# Get images from test set 
test_images = mnist.test.images[:n_images] 
# Prepare the input data 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': test_images}, shuffle=False) 
# Use the model to predict the images class 
preds = list(model.predict(input_fn)) 

# Display 
for i in range(n_images): 
    plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') 
    plt.show() 
    print("Model prediction:", preds[i]) 

上述程序工作正常。它會保存模型,不能確定,因爲我看到所有正在創建的目錄。儘管它確實給出了一個警告:

警告:tensorflow:序列化nn_model時遇到錯誤。 類型不受支持,或者項目類型與CollectionDef中的字段類型不匹配。 「估計」對象有沒有屬性「名」

這裏是恢復,並嘗試應用,並未能在預測()線「應用」程序:

import tensorflow as tf 

# Import MNIST data 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 

sess=tf.Session() 
#First let's load meta graph and restore weights 
#saver = tf.train.import_meta_graph('model/model.ckpt.meta') 
#saver.restore(sess,tf.train.latest_checkpoint('nn_model')) 
tf.saved_model.loader.load(sess, ["nn"], "model") 

model = tf.get_collection('nn_model') 

# Predict single images 
n_images = 4 
# Get images from test set 
test_images = mnist.test.images[:n_images] 
# Prepare the input data 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': test_images}, shuffle=False) 
# Use the model to predict the images class 
preds = list(model.predict(input_fn)) 

# Display 
for i in range(n_images): 
    plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') 
    plt.show() 
    print("Model prediction:", preds[i]) 

它給人的錯誤:

回溯(最近通話最後一個): 文件 「applynn.py」 35行,在 preds =名單(model.predict(input_fn)) AttributeError的: '模塊' 對象有沒有屬性「預測'

那麼這裏缺少什麼?

回答

0

所以這個問題現在已經修復。這是我必須做的,以解決這個問題。

第一部分是:

from __future__ import print_function 

from tensorflow.python.saved_model import builder as saved_model_builder 

# Import MNIST data 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 

import tensorflow as tf 
import matplotlib 
import matplotlib.pyplot as plt 
import numpy as np 
import shutil 

matplotlib.use('TkAgg') 

# Parameters 
learning_rate = 0.1 
num_steps = 1000 
batch_size = 128 
display_step = 100 

# Network Parameters 
n_hidden_1 = 256 # 1st layer number of neurons 
n_hidden_2 = 256 # 2nd layer number of neurons 
num_input = 784 # MNIST data input (img shape: 28*28) 
num_classes = 10 # MNIST total classes (0-9 digits) 

#init = tf.initialize_all_variables() 

sess = tf.Session() 

# Define the input function for training 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': mnist.train.images}, y=mnist.train.labels, 
    batch_size=batch_size, num_epochs=None, shuffle=True) 

# Define the neural network 
def neural_net(x_dict): 
    # TF Estimator input is a dict, in case of multiple inputs 
    x = x_dict['images'] 
    # Hidden fully connected layer with 256 neurons 
    layer_1 = tf.layers.dense(x, n_hidden_1, name="layer_1") 
    # Hidden fully connected layer with 256 neurons 
    layer_2 = tf.layers.dense(layer_1, n_hidden_2, name="layer_2") 
    # Output fully connected layer with a neuron for each class 
    out_layer = tf.layers.dense(layer_2, num_classes, name="out_layer") 
    return out_layer 

# Define the model function (following TF Estimator Template) 
def model_fn(features, labels, mode): 
    # Build the neural network 
    logits = neural_net(features) 

    # Predictions 
    pred_classes = tf.argmax(logits, axis=1) 
    pred_probas = tf.nn.softmax(logits) 

    # If prediction mode, early return 
    if mode == tf.estimator.ModeKeys.PREDICT: 
     return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) 

    # Define loss and optimizer 
    loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
     logits=logits, labels=tf.cast(labels, dtype=tf.int32))) 
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 
    train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step()) 

    # Evaluate the accuracy of the model 
    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) 

    # TF Estimators requires to return a EstimatorSpec, that specify 
    # the different ops for training, evaluating, ... 
    estim_specs = tf.estimator.EstimatorSpec(
     mode=mode, 
     predictions=pred_classes, 
     loss=loss_op, 
     train_op=train_op, 
     eval_metric_ops={'accuracy': acc_op}) 

    return estim_specs 

# Build the Estimator 
estimator = tf.estimator.Estimator(model_fn, model_dir='estimator') 

# Train the Model 
estimator.train(input_fn, steps=num_steps) 

# Evaluate the Model 
# Define the input function for evaluating 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': mnist.test.images}, y=mnist.test.labels, 
    batch_size=batch_size, shuffle=False) 
# Use the Estimator 'evaluate' method 
estimator.evaluate(input_fn) 

#model.export_savedmodel(".", input_fn) 

init = tf.global_variables_initializer() 
sess.run(init) 

tf.add_to_collection("nn_model", estimator) 

# Add ops to save and restore all the variables. 
#saver = tf.train.Saver() 

#save_path = saver.save(sess, "model/model.ckpt") 

try: 
    shutil.rmtree("model") 
except: 
    pass 

builder = saved_model_builder.SavedModelBuilder("model") 
builder.add_meta_graph_and_variables(sess, ["nn"]) 
builder.save() 

print("Model saved in file") 

# Predict single images 
n_images = 4 
# Get images from test set 
test_images = mnist.test.images[:n_images] 
# Prepare the input data 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': test_images}, shuffle=False) 
# Use the model to predict the images class 
preds = list(estimator.predict(input_fn)) 

# Display 
for i in range(n_images): 
    plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') 
    plt.show() 
    print("Model prediction:", preds[i]) 

第二部分是:

import tensorflow as tf 
import matplotlib.pyplot as plt 
import numpy as np 

# Import MNIST data 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 

# Network Parameters 
n_hidden_1 = 256 # 1st layer number of neurons 
n_hidden_2 = 256 # 2nd layer number of neurons 
num_classes = 10 # MNIST total classes (0-9 digits) 

# Define the neural network 
def neural_net(x_dict): 
    # TF Estimator input is a dict, in case of multiple inputs 
    x = x_dict['images'] 
    # Hidden fully connected layer with 256 neurons 
    layer_1 = tf.layers.dense(x, n_hidden_1, name="layer_1") 
    # Hidden fully connected layer with 256 neurons 
    layer_2 = tf.layers.dense(layer_1, n_hidden_2, name="layer_2") 
    # Output fully connected layer with a neuron for each class 
    out_layer = tf.layers.dense(layer_2, num_classes, name="out_layer") 
    return out_layer 

# Define the model function (following TF Estimator Template) 
def model_fn(features, labels, mode): 
    # Build the neural network 
    logits = neural_net(features) 

    # Predictions 
    pred_classes = tf.argmax(logits, axis=1) 
    pred_probas = tf.nn.softmax(logits) 

    # If prediction mode, early return 
    if mode == tf.estimator.ModeKeys.PREDICT: 
     return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) 

    # Define loss and optimizer 
    loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
     logits=logits, labels=tf.cast(labels, dtype=tf.int32))) 
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 
    train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step()) 

    # Evaluate the accuracy of the model 
    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) 

    # TF Estimators requires to return a EstimatorSpec, that specify 
    # the different ops for training, evaluating, ... 
    estim_specs = tf.estimator.EstimatorSpec(
     mode=mode, 
     predictions=pred_classes, 
     loss=loss_op, 
     train_op=train_op, 
     eval_metric_ops={'accuracy': acc_op}) 

    return estim_specs 


sess=tf.Session() 

estimator = tf.estimator.Estimator(model_fn, model_dir='estimator') 

# Predict single images 
n_images = 4 
# Get images from test set 
test_images = mnist.test.images[:n_images] 
# Prepare the input data 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': test_images}, shuffle=False) 
# Use the model to predict the images class 
preds = list(estimator.predict(input_fn)) 

# Display 
for i in range(n_images): 
    plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') 
    plt.show() 
    print("Model prediction:", preds[i]) 

請注意,我所說的模型變量估計,因爲它確實是估計。另外,我傳遞一個model_dir,以便與其他變量分開序列化估計器。我還必須明確確保第二個python文件可以訪問這兩個函數以及它們依賴的任何變量。在代碼中做了一些小的其他修復。