2016-04-24 69 views
1

我該如何記錄(與SummaryWriter,例如TensorBoard)張量Variable的單個標量元素?例如,如何記錄網絡中給定圖層或節點的單個權重?如何從TensorFlow變量中記錄單個標量值?

在我的示例代碼中,我將一個普通的前饋神經網絡按入服務中進行簡單的線性迴歸,並且希望(在這種情況下)將孤獨節點的權重記錄爲獨立隱藏層作爲學習進展。

與會話期間,我可以得到明確這些值,例如

sess.run(layer_weights)[0][i][0] 

i個權重,其中layer_weights是重量Variable s的列表;但我無法弄清楚如何記錄相應的標量值。如果我嘗試

w1 = tf.slice(layer_weights[0], [0], [1])[0] 
tf.scalar_summary('w1', w1) 

w1 = layer_weights[0][1][0] 
tf.scalar_summary('w1', w1) 

我得到

ValueError: Shape (5, 1) must have rank 1

我如何才能登錄各個標量值從TensorFlow Variable


from __future__ import (absolute_import, print_function, division, unicode_literals) 

import numpy as np 
import tensorflow as tf 


# Basic model parameters as external flags 
flags = tf.app.flags 
FLAGS = flags.FLAGS 
flags.DEFINE_float('network_nodes', [5, 1], 'The number of nodes in each layer, including input and output.') 
flags.DEFINE_float('epochs', 250, 'Epochs to run') 
flags.DEFINE_float('learning_rate', 0.15, 'Initial learning rate.') 
flags.DEFINE_string('data_dir', './data', 'Directory to hold training and test data.') 
flags.DEFINE_string('train_dir', './_tmp/train', 'Directory to log training (and the network def).') 
flags.DEFINE_string('test_dir', './_tmp/test', 'Directory to log testing.') 


def variable_summaries(var, name): 
    with tf.name_scope("summaries"): 
     mean = tf.reduce_mean(var) 
     tf.scalar_summary('mean/' + name, mean) 
     with tf.name_scope('stddev'): 
      stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean))) 
      tf.scalar_summary('sttdev/' + name, stddev) 
    tf.scalar_summary('max/' + name, tf.reduce_max(var)) 
    tf.scalar_summary('min/' + name, tf.reduce_min(var)) 
    tf.histogram_summary(name, var) 


def add_layer(input_tensor, input_dim, output_dim, neuron_fn, layer_name): 
    with tf.name_scope(layer_name): 
     with tf.name_scope("weights"): 
      weights = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1)) 
     with tf.name_scope("biases"): 
      biases = tf.Variable(tf.constant(0.1, shape=[output_dim])) 
     with tf.name_scope('activations'): 
      with tf.name_scope('weighted_inputs'): 
       weighted_inputs = tf.matmul(input_tensor, weights) + biases 
       tf.histogram_summary(layer_name + '/weighted_inputs', weighted_inputs) 
      output = neuron_fn(weighted_inputs) 
    return output, weights, biases 


def make_ff_network(nodes, input_activation, hidden_activation_fn=tf.nn.sigmoid, output_activation_fn=tf.nn.softmax): 
    layer_activations = [input_activation] 
    layer_weights = [] 
    layer_biases = [] 
    n_layers = len(nodes) 
    for l in range(1, n_layers): 
     a, w, b = add_layer(layer_activations[l - 1], nodes[l - 1], nodes[l], 
         output_activation_fn if l == n_layers - 1 else hidden_activation_fn, 
         'output_layer' if l == n_layers - 1 else 'hidden_layer' + (
          '_{}'.format(l) if n_layers > 3 else '')) 
     layer_activations += [a] 
     layer_weights += [w] 
     layer_biases += [b] 
    with tf.name_scope('output'): 
     net_activation = tf.identity(layer_activations[-1], name='network_activation') 
    return net_activation, layer_weights, layer_biases 

# Inputs and outputs 
with tf.name_scope('data'): 
    x = tf.placeholder(tf.float32, shape=[None, FLAGS.network_nodes[0]], name='inputs') 
    y_ = tf.placeholder(tf.float32, shape=[None, FLAGS.network_nodes[-1]], name='correct_outputs') 

# Network structure 
y, layer_weights, layer_biases = make_ff_network(FLAGS.network_nodes, x, output_activation_fn=tf.identity) 

# Metrics and operations 
with tf.name_scope('accuracy'): 
    with tf.name_scope('loss'): 
     loss = tf.reduce_mean(tf.square(y - y_)) 
    # NONE OF THESE WORK: 
    #w1 = tf.slice(layer_weights[0], [0], [1])[0] 
    #tf.scalar_summary('w1', w1) 
    #w1 = layer_weights[0][1][0] 
    #tf.scalar_summary('w1', w1) 
    tf.scalar_summary('loss', loss) 

train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(loss) 

# Logging 
train_writer = tf.train.SummaryWriter(FLAGS.train_dir, tf.get_default_graph()) 
test_writer = tf.train.SummaryWriter(FLAGS.test_dir) 
merged = tf.merge_all_summaries() 



W = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) 

train_x = np.random.rand(100000, FLAGS.network_nodes[0]) 
train_y = np.array([np.dot(W, train_x.T)+ 6.0]).T 

test_x = np.random.rand(1000, FLAGS.network_nodes[0]) 
test_y = np.array([np.dot(W, test_x.T)+ 6.0]).T 

with tf.Session() as sess: 
    sess.run(tf.initialize_all_variables()) 

    for ep in range(FLAGS.epochs): 
     sess.run(train_step, feed_dict={x: train_x, y_: train_y}) 
     summary = sess.run(merged, feed_dict={x: test_x, y_: test_y}) 
     test_writer.add_summary(summary, ep+1) 

    # THESE WORK 
    print('w1 = {}'.format(sess.run(layer_weights)[0][0][0])) 
    print('w2 = {}'.format(sess.run(layer_weights)[0][1][0])) 
    print('w3 = {}'.format(sess.run(layer_weights)[0][2][0])) 
    print('w4 = {}'.format(sess.run(layer_weights)[0][3][0])) 
    print('w5 = {}'.format(sess.run(layer_weights)[0][4][0])) 
    print(' b = {}'.format(sess.run(layer_biases)[0][0])) 

回答

3

有在不同的代碼錯誤。

主要問題是您要傳遞張量的python列表到scalar_summary。 錯誤表示您傳遞的張量沒有秩1與切片操作有關。

您想傳遞權重並將它們記錄在每層圖層中。要做到這一點的方法之一是記錄每個重量在每一層上:

for weight in layer_weights: 
    tf.scalar_summary([ ['%s_w%d%d' % (weight.name, i,j) for i in xrange(len(layer_weights))] for j in xrange(5) ], weight) 

這將在tensorboard tensorboard --logdir=./_tmp/test產生這些漂亮的圖形

enter image description here

相關問題