2017-04-25 99 views
0

我想在這裏實現算法:https://arxiv.org/pdf/1702.02098.pdfTensorflow流指標不工作

出於某種原因,我總是得到0.2召回和0的精度。我是否正確使用流媒體指標?該文檔只有一個僞代碼。這裏是我的代碼:


import tensorflow as tf 
import numpy as np 
from nltk.corpus import brown 
from gensim.models import Word2Vec 
from sklearn.preprocessing import LabelBinarizer 
from tensorflow.contrib.metrics import streaming_accuracy, streaming_recall 

data = brown.tagged_sents() 

tags = set() 
for sent in data: 
    for token, tag in sent: 
     tags.add(tag) 

label_processor = LabelBinarizer() 
label_processor.fit(list(tags)) 

embedding_dim = 100 
word2vec = Word2Vec(brown.sents(), size=embedding_dim, min_count=1) 
embedding = word2vec.wv 
del word2veC# Saves RAM 

test = 0.1 
val = 0.1 

data_length = len(data) 
inds = np.random.permutation(np.arange(data_length)) 

test_inds = inds[:int(data_length*test)] 
val_inds = inds[int(data_length*test):int(data_length*(val+test))] 
train_inds = inds[int(data_length*(val+test)):] 

val_x = [] 
val_y = [] 
for i in val_inds: 
    x = [] 
    tags = [] 

    for token, tag in data[i]: 
     x.append(embedding[token]) 
     tags.append(tag) 

    x = np.array(x) 
    x = x.reshape(x.shape[0], 1, x.shape[1], 1) 
    y = np.array(label_processor.transform(tags)) 

    val_x.append(x) 
    val_y.append(y) 

val_x = np.concatenate(val_x, axis=0) 
val_y = np.concatenate(val_y, axis=0) 

test_x = [] 
test_y = [] 
for i in test_inds: 
    x = [] 
    tags = [] 

    for token, tag in data[i]: 
     x.append(embedding[token]) 
     tags.append(tag) 

    x = np.array(x) 
    x = x.reshape(x.shape[0], 1, x.shape[1], 1) 
    y = np.array(label_processor.transform(tags)) 

    test_x.append(x) 
    test_y.append(y) 

test_x = np.concatenate(test_x, axis=0) 
test_y = np.concatenate(test_y, axis=0) 

learning_rate = 0.001 
n_iter = 12000 
display_step = 100 
depth = 5 

label_processor 
n_classes = label_processor.classes_.shape[0] 
dropout_prob = 0.50 

x = tf.placeholder(tf.float32, [None, 1, embedding_dim, 1]) 
y = tf.placeholder(tf.float32, [None, n_classes]) 
dropout = tf.placeholder(tf.float32, []) 
depth_tensor = tf.constant(depth, tf.float32) 

def data_processor(data, train_inds, word2vec, label_processor, n_iter):  
    inds = np.random.randint(len(train_inds), size=(n_iter)) 

    for i in inds:     
     x = [] 
     tags = [] 

     for token, tag in data[train_inds[i]]: 
      x.append(word2vec[token]) 
      tags.append(tag) 

     x = np.array(x) 
     x = x.reshape(x.shape[0], 1, x.shape[1], 1) 
     y = np.array(label_processor.transform(tags)) 

     yield x, y 

def model(x, y, weights, biases, dropout, depth_tensor): 
    net = tf.nn.dropout(x, dropout) 
    net = tf.nn.conv2d(net, weights['first'], strides=[1, 1, 1, 1], padding='SAME') 
    net = tf.nn.bias_add(net, biases['first']) 
    net = tf.nn.relu(net) 

    net_flat = tf.reshape(net, [-1, weights['out'].get_shape().as_list()[0]]) 
    preds = tf.add(tf.matmul(net_flat, weights['out']), biases['out']) 
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=y)) 
    for i in range(1, depth): 
     net = tf.nn.dropout(net, dropout) 
     net = tf.nn.atrous_conv2d(net, weights['iterated'], rate=2**i, padding='SAME') 
     net = tf.nn.bias_add(net, biases['iterated']) 
     net = tf.nn.relu(net) 

     net_flat = tf.reshape(net, [-1, weights['out'].get_shape().as_list()[0]]) 
     preds = tf.add(tf.matmul(net_flat, weights['out']), biases['out']) 
     cost += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=y)) 

    return preds, tf.divide(cost, depth_tensor) 

weights = {'first': tf.Variable(tf.random_normal([1, 3, 1, 10])), 
      'iterated': tf.Variable(tf.random_normal([1, 3, 10, 10])), 
      'out': tf.Variable(tf.random_normal([embedding_dim*10, n_classes]))} 

biases = {'first': tf.Variable(tf.random_normal([10])), 
      'iterated': tf.Variable(tf.random_normal([10])), 
      'out': tf.Variable(tf.random_normal([n_classes]))} 

preds, cost = model(x, y, weights, biases, dropout, depth_tensor) 

optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 

accuracy, update_accuracy = streaming_accuracy(y, preds) 
recall, update_recall = streaming_recall(y, preds) 

init = tf.global_variables_initializer() 
init2 = tf.local_variables_initializer() 

with tf.Session() as sess: 
    sess.run(init) 
    i = 1 

    for batch_x, batch_y in data_processor(data, train_inds, embedding, label_processor, n_iter): 
     sess.run(optimizer, 
       feed_dict={x: batch_x, y: batch_y, 
          dropout: dropout_prob}) 

     if i % display_step == 0: 
      loss = sess.run(cost, 
          feed_dict={x: batch_x, y: batch_y, dropout: dropout_prob}) 

      print("Iter:{}, Minibatch Loss:{:.6f}".format(i,loss)) 
     i += 1 

    sess.run(init2) 
    for batch_x, batch_y in data_processor(data, val_inds, embedding, label_processor, n_iter): 
     recall, accuracy = sess.run([update_recall, update_accuracy], 
            feed_dict={x:batch_x, y: batch_y, dropout: 1}) 

     f1 = 2 * recall * accuracy/(recall + accuracy) 

    print("Testing Accuracy:", accuracy,"Testing Recall:", recall, "Testing F1 Score:", f1) 

這裏就是我曾經流指標的一部分:

accuracy, update_accuracy = streaming_accuracy(y, preds) 
recall, update_recall = streaming_recall(y, preds) 

init = tf.global_variables_initializer() 
init2 = tf.local_variables_initializer() 

with tf.Session() as sess: 
    sess.run(init) 
    i = 1 

    for batch_x, batch_y in data_processor(data, train_inds, embedding, label_processor, n_iter): 
     sess.run(optimizer, 
       feed_dict={x: batch_x, y: batch_y, 
          dropout: dropout_prob}) 

     if i % display_step == 0: 
      loss = sess.run(cost, 
          feed_dict={x: batch_x, y: batch_y, dropout: dropout_prob}) 

      print("Iter:{}, Minibatch Loss:{:.6f}".format(i,loss)) 
     i += 1 

    sess.run(init2) 
    for batch_x, batch_y in data_processor(data, val_inds, embedding, label_processor, n_iter): 
     recall, accuracy = sess.run([update_recall, update_accuracy], 
            feed_dict={x:batch_x, y: batch_y, dropout: 1}) 

     f1 = 2 * recall * accuracy/(recall + accuracy) 
+0

不幸的是,很難確切地說明這樣一長段代碼出了什麼問題。你有一個可重複使用的小案例可以分享嗎? –

+0

我分開了使用流指標的部分。這樣很好嗎? –

回答

0

雖然,我相信這是因爲你只運行update opfetch op

的代碼如下:

for batch_x, batch_y in data_processor(data, val_inds, embedding, label_processor, n_iter): 
    recall, accuracy = sess.run([update_recall, update_accuracy], 
           feed_dict={x:batch_x, y: batch_y, dropout: 1}) 
    f1 = 2 * recall * accuracy/(recall + accuracy) 

應該是:

for batch_x, batch_y in data_processor(data, val_inds, embedding, label_processor, n_iter): 
    _, recall_value, _, accuracy_value = sess.run([update_recall, recall, update_accuracy, accuracy], 
           feed_dict={x:batch_x, y: batch_y, dropout: 1}) 
    f1 = 2 * recall_value * accuracy_value/(recall_value + accuracy_value) 

張量accuracyrecall保存實際值和update*只是用來更新內部計數器變量來計算最終的度量值。

順便說一句:我沒有測試代碼。請讓我知道,如果它不符合你的預期。