2017-08-29 52 views
0

我想以這種方式來實現一個有狀態卷積LSTM:錯誤有狀態卷積LSTM

# build CNN/LSTM and train it. 
# 
model = Sequential() 

# build CNN/LSTM and train it. 

model.add(TimeDistributed(Conv2D(16, (3, 3), padding='same'), input_shape=(210, 22, 26, 1))) 
model.add(Activation('elu')) 
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2)))) 
model.add(Dropout(0.2)) 

model.add(TimeDistributed(Conv2D(32, (3, 3), padding='same'))) 
model.add(Activation('elu')) 
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2)))) 
model.add(Dropout(0.2)) 

model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same'))) 
model.add(Activation('elu')) 
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2)))) 

model.add(TimeDistributed(Flatten())) 

model.add(Conv1D(16, 3, padding='same')) 
model.add(Activation('elu')) 
model.add(MaxPooling1D(pool_size=8)) 

model.add(Bidirectional(LSTM(64, batch_input_shape=(32, 26, 16), return_sequences=True, stateful=True))) 
model.add(Activation('elu')) 
model.add(Bidirectional(LSTM(128, return_sequences=False, stateful=True))) 
model.add(Activation('elu')) 
model.add(Dense(1, activation='sigmoid')) 
adammgm = keras.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0001) 
model.compile(loss='mean_squared_error', optimizer=adammgm, metrics=['accuracy']) 
print(model.summary()) 


# saves the model weights after each epoch 
# if the validation loss decreased 
# 
checkpointer = ModelCheckpoint(filepath= odir_a + "/temp_lstm_model_weights-{epoch:02d}.h5") 

model.fit_generator(generate_reduced_dimension_arrays_from_file(ilist_a, edfroot_a, featroot_a, labroot_a, feat_ext_a, lab_ext_a, num_channels_a, feat_fdur_a, win_len_a, models_order, lstm_batch_size_a, NEDC_DEF_BATCH_FILE), steps_per_epoch=NEDC_DEF_STEPS_PER_EPOCH, epochs=lstm_epoch_size_a, callbacks=[checkpointer]) 

但是,當我想實現這個結構,我得到這個錯誤:

ValueError異常:如果RNN是有狀態的,它需要知道它的批量大小。指定輸入張量的批量大小: - 如果使用Sequential模型,請通過將batch_input_shape參數傳遞到第一個圖層來指定批量大小。 - 如果使用功能性API,則通過將batch_shape參數傳遞到輸入層來指定時間維度。

我真正的所有帖子,但我仍然不知道如何解決這個問題。

+0

添加'batch_input_shape'你的第一層... –

+0

我加入到CNN的第一層。我也添加到所有圖層。我總是收到相同的錯誤。 –

+0

你能向我們展示你添加它的代碼嗎?其中一件重要的事情是,與其他形狀不同,這需要包括批量大小。 –

回答

0

問題解決了。該解決方案是使用batch_input_shape代替input_shape在CNN的第一層:

# build CNN/LSTM and train it. 
# 
model = Sequential() 

# build CNN/LSTM and train it. 

model.add(TimeDistributed(Conv2D(16, (3, 3), padding='same'), batch_input_shape=(32, 210, 22, 26, 1))) 
model.add(Activation('elu')) 
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2)))) 
model.add(Dropout(0.2)) 

model.add(TimeDistributed(Conv2D(32, (3, 3), padding='same'))) 
model.add(Activation('elu')) 
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2)))) 
model.add(Dropout(0.2)) 

model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same'))) 
model.add(Activation('elu')) 
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2)))) 

model.add(TimeDistributed(Flatten())) 

model.add(Conv1D(16, 3, padding='same')) 
model.add(Activation('elu')) 
model.add(MaxPooling1D(pool_size=8)) 

model.add(Bidirectional(LSTM(64, return_sequences=True, stateful=True))) 
model.add(Activation('elu')) 
model.add(Bidirectional(LSTM(128, return_sequences=False, stateful=True))) 
model.add(Activation('elu')) 
model.add(Dense(1, activation='sigmoid')) 
adammgm = keras.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0001) 
model.compile(loss='mean_squared_error', optimizer=adammgm, metrics=['accuracy']) 
print(model.summary())