2017-02-23 38 views
0

我想在tensorflow中實現multidimentional lstm,我使用TensorArray來記住以前的狀態,我使用複雜的方式來獲得兩個neigbours狀態(上圖和左圖)。 tf.cond要求兩個可能的條件存在並且具有相同數量的輸入。這就是爲什麼我添加了一個cell.zero_state到狀態的(最後一個索引+1)。然後我使用一個函數來獲得正確的索引到狀態。當我試圖使用優化器爲了最小化成本,我得到該錯誤:使用TensorArray的梯度誤差Tensorflow

InvalidArgumentError (see above for traceback): TensorArray MultiDimentionalLSTMCell-l1-multi-l1/[email protected]: Could not read from TensorArray index 809 because it has not yet been written to.

有人可以告訴如何解決它嗎?

ps:沒有優化器的工作!

class MultiDimentionalLSTMCell(tf.nn.rnn_cell.RNNCell): 
    """ 
    Note that state_is_tuple is always True. 
    """ 

    def __init__(self, num_units, forget_bias=1.0, activation=tf.nn.tanh): 
     self._num_units = num_units 
     self._forget_bias = forget_bias 
     self._activation = activation 

    @property 
    def state_size(self): 
     return tf.nn.rnn_cell.LSTMStateTuple(self._num_units, self._num_units) 

    @property 
    def output_size(self): 
     return self._num_units 

    def __call__(self, inputs, state, scope=None): 
     """Long short-term memory cell (LSTM). 
     @param: imputs (batch,n) 
     @param state: the states and hidden unit of the two cells 
     """ 
     with tf.variable_scope(scope or type(self).__name__): 
      c1,c2,h1,h2 = state 

      # change bias argument to False since LN will add bias via shift 
      concat = tf.nn.rnn_cell._linear([inputs, h1, h2], 5 * self._num_units, False) 

      i, j, f1, f2, o = tf.split(1, 5, concat) 

      new_c = (c1 * tf.nn.sigmoid(f1 + self._forget_bias) + 
        c2 * tf.nn.sigmoid(f2 + self._forget_bias) + tf.nn.sigmoid(i) * 
        self._activation(j)) 

      new_h = self._activation(new_c) * tf.nn.sigmoid(o) 
      new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h) 
      return new_h, new_state 


def multiDimentionalRNN_whileLoop(rnn_size,input_data,sh,dims=None,scopeN="layer1"): 
     """Implements naive multidimentional recurent neural networks 

     @param rnn_size: the hidden units 
     @param input_data: the data to process of shape [batch,h,w,chanels] 
     @param sh: [heigth,width] of the windows 
     @param dims: dimentions to reverse the input data,eg. 
      dims=[False,True,True,False] => true means reverse dimention 
     @param scopeN : the scope 

     returns [batch,h/sh[0],w/sh[1],chanels*sh[0]*sh[1]] the output of the lstm 
     """ 
     with tf.variable_scope("MultiDimentionalLSTMCell-"+scopeN): 
      cell = MultiDimentionalLSTMCell(rnn_size) 

      shape = input_data.get_shape().as_list() 

      if shape[1]%sh[0] != 0: 
       offset = tf.zeros([shape[0], sh[0]-(shape[1]%sh[0]), shape[2], shape[3]]) 
       input_data = tf.concat(1,[input_data,offset]) 
       shape = input_data.get_shape().as_list() 
      if shape[2]%sh[1] != 0: 
       offset = tf.zeros([shape[0], shape[1], sh[1]-(shape[2]%sh[1]), shape[3]]) 
       input_data = tf.concat(2,[input_data,offset]) 
       shape = input_data.get_shape().as_list() 

      h,w = int(shape[1]/sh[0]),int(shape[2]/sh[1]) 
      features = sh[1]*sh[0]*shape[3] 
      batch_size = shape[0] 

      x = tf.reshape(input_data, [batch_size,h,w, features]) 
      if dims is not None: 
       x = tf.reverse(x, dims) 
      x = tf.transpose(x, [1,2,0,3]) 
      x = tf.reshape(x, [-1, features]) 
      x = tf.split(0, h*w, x)  

      sequence_length = tf.ones(shape=(batch_size,), dtype=tf.int32)*shape[0] 
      inputs_ta = tf.TensorArray(dtype=tf.float32, size=h*w,name='input_ta') 
      inputs_ta = inputs_ta.unpack(x) 
      states_ta = tf.TensorArray(dtype=tf.float32, size=h*w+1,name='state_ta',clear_after_read=False) 
      outputs_ta = tf.TensorArray(dtype=tf.float32, size=h*w,name='output_ta') 

      states_ta = states_ta.write(h*w, tf.nn.rnn_cell.LSTMStateTuple(tf.zeros([batch_size,rnn_size], tf.float32), 
                 tf.zeros([batch_size,rnn_size], tf.float32))) 
      def getindex1(t,w): 
       return tf.cond(tf.less_equal(tf.constant(w),t), 
           lambda:t-tf.constant(w), 
           lambda:tf.constant(h*w)) 
      def getindex2(t,w): 
       return tf.cond(tf.less(tf.constant(0),tf.mod(t,tf.constant(w))), 
           lambda:t-tf.constant(1), 
           lambda:tf.constant(h*w)) 

      time = tf.constant(0) 

      def body(time, outputs_ta, states_ta): 
       constant_val = tf.constant(0) 
       stateUp = tf.cond(tf.less_equal(tf.constant(w),time), 
            lambda: states_ta.read(getindex1(time,w)), 
            lambda: states_ta.read(h*w)) 
       stateLast = tf.cond(tf.less(constant_val,tf.mod(time,tf.constant(w))), 
            lambda: states_ta.read(getindex2(time,w)), 
            lambda: states_ta.read(h*w)) 

       currentState = stateUp[0],stateLast[0],stateUp[1],stateLast[1] 
       out , state = cell(inputs_ta.read(time),currentState) 
       outputs_ta = outputs_ta.write(time,out) 
       states_ta = states_ta.write(time,state) 
       return time + 1, outputs_ta, states_ta 

      def condition(time,outputs_ta,states_ta): 
       return tf.less(time , tf.constant(h*w)) 

      result , outputs_ta, states_ta = tf.while_loop(condition, body, [time,outputs_ta,states_ta]) 


      outputs = outputs_ta.pack() 
      states = states_ta.pack() 

      y = tf.reshape(outputs, [h,w,batch_size,rnn_size]) 
      y = tf.transpose(y, [2,0,1,3]) 
      if dims is not None: 
       y = tf.reverse(y, dims) 

      return y 


def tanAndSum(rnn_size,input_data,scope): 
     outs = [] 
     for i in range(2): 
      for j in range(2): 
       dims = [False]*4 
       if i!=0: 
        dims[1] = True 
       if j!=0: 
        dims[2] = True     
       outputs = multiDimentionalRNN_whileLoop(rnn_size,input_data,[2,2], 
                 dims,scope+"-multi-l{0}".format(i*2+j)) 
       outs.append(outputs) 
     outs = tf.pack(outs, axis=0) 
     mean = tf.reduce_mean(outs, 0) 
     return tf.nn.tanh(mean) 

graph = tf.Graph() 
with graph.as_default(): 

    input_data = tf.placeholder(tf.float32, [20,36,90,1]) 
    #input_data = tf.ones([20,36,90,1],dtype=tf.float32) 
    sh = [2,2] 
    out1 = tanAndSum(20,input_data,'l1') 
    out = tanAndSum(25,out1,'l2') 
    cost = tf.reduce_mean(out) 
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost) 
    #out = multiDimentionalRNN_raw_rnn(2,input_data,sh,dims=[False,True,True,False],scopeN="layer1") 
    #cell = MultiDimentionalLSTMCell(10) 
    #out = cell.zero_state(2, tf.float32).c 
with tf.Session(graph=graph) as session: 
    tf.global_variables_initializer().run() 
    ou,k,_ = session.run([out,cost,optimizer],{input_data:np.ones([20,36,90,1],dtype=np.float32)}) 
    print(ou.shape) 
    print(k) 

回答

2

您應該將參數parallel_iterations=1添加到您的while循環調用中。 如:

result, outputs_ta, states_ta = tf.while_loop(
    condition, body, [time,outputs_ta,states_ta], parallel_iterations=1) 

因爲體內你執行讀取和相同的張量陣列(states_ta)上寫操作這是必需的。如果並行循環執行(parallel_iterations> 1),某個線程可能會嘗試從tensorArray讀取信息,而這個信息沒有被另一個線程寫入。

我在tensorflow 0.12.1上測試了與parallel_iterations = 1的代碼段,它按預期工作。