0
我試過tf.Print
的東西,但它似乎沒有打印任何東西。運行張量流模型時如何檢查每個張量的維數?
這裏是我的代碼,我試圖tf.Print
張量的維數。我應該如何修復代碼以打印出重塑的尺寸?
batch_size = 32
image_height = 480
image_width = 720
num_channels = 4
num_labels = 18
deep_graph = tf.Graph()
with deep_graph.as_default():
tf_valid_dataset = tf.constant(x_valid, dtype = tf.float32_ref)
#tf_test_dataset = tf.constant(x_test, dtype = tf.float32_ref)
tf_train_dataset = tf.placeholder(
tf.float32,
shape = (batch_size, image_height, image_width, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape = (batch_size, num_labels))
conv1_weight = tf.Variable(
tf.truncated_normal([5, 5, num_channels, 32],
stddev = 0.1, seed = 0))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64],
stddev = 0.1, seed = 0, dtype = tf.float32))
conv2_biases = tf.Variable(tf.constant(0.1, shape = [64]))
fc1_weights = tf.Variable(
tf.truncated_normal([image_height * image_width * 32 , 512],
stddev = 0.1, seed = 0, dtype = tf.float32))
fc1_biases = tf.Variable(tf.constant(0.1, shape = [512]))
fc2_weights = tf.Variable(
tf.truncated_normal([512, 512],
stddev = 0.1, seed = 0, dtype = tf.float32))
fc2_biases = tf.Variable(tf.constant(0.1, shape = [512]))
fc3_weights = tf.Variable(
tf.truncated_normal([512, num_labels], stddev = 0.1, seed = 0, dtype = tf.float32))
fc3_biases = tf.Variable(tf.constant(0.1, shape = [num_labels]))
def model(data, train = False):
conv1 = tf.nn.conv2d(data, conv1_weight, strides = [1,1,1,1], padding = 'SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1, ksize = [1, 2, 2, 1],
strides = [1, 2, 2, 1], padding = 'SAME')
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides = [1, 1, 1, 1], padding = 'SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2, ksize = [1, 2, 2, 1],
strides = [1, 1, 1, 1], padding = 'SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool2.get_shape().as_list()
reshape = tf.reshape(pool2, [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
reshape = tf.Print(reshape, [reshape], "reshape: ", summarize=10)
# Fully connecte layers
hidden_layer1 = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# For training data, add 0.5 dropout.
if train:
hidden_layer1 = tf.nn.dropout(hidden_layer1, 0.5, seed = 0)
hidden_layer2 = tf.nn.relu(tf.matmul(hidden_layer1, fc2_weights) + fc2_biases)
if train:
hidden_layer2 = tf.nn.dropout(hidden_layer2, 0.5, seed = 0)
return tf.matmul(hidden_layer2, fc3_weights) + fc3_biases
# Call the model() function to make train_prediction
train_prediction = model(tf_train_dataset, True)
# calculate loss by using train_prediction
loss = tf.reduce_mean(tf.reduce_sum(tf.square(train_prediction - tf_train_labels), 1))
# Add L2 regularization to loss
loss += 1e-7 * (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases) +
tf.nn.l2_loss(fc3_weights) + tf.nn.l2_loss(fc3_biases))
# Optimizer
global_step = tf.Variable(0, name = 'global_step',trainable = False)
starter_learning_rate = 0.001
learning_rate = tf.train.exponential_decay(starter_learning_rate,
global_step,
100000,
0.96,
staircase = True)
optimizer = tf.train.AdamOptimizer(learning_rate, 0.95).minimize(loss, global_step = global_step)
您可以使用'x.get_shape()'查看會話中的'reshape'值及其形狀。 – martianwars