2017-10-18 24 views
0

我正在爲圖像增強培訓一個深度CNN,並且遇到了一個非常奇怪的問題。Tensorflow:保存/導入檢查點無錯誤地工作,但所有導入的變量都具有值'none'

我的網絡架構是完全卷積的,並實現了幾個小的「u形」組件,其中功能映射被下/上採樣以便在整個「頂層」中進行處理。在頂層,網絡有幾個節點「猜測」輸出圖像,然後將較低層的輸出添加到從猜測中導出的特徵。我對最終預測中的錯誤以及這些猜測都有所損失。

網絡正是如此定義:

def convnet(x, weights, biases): 
    #TOP LAYER 
    conv0_1  = conv3dWrap(x, weights['wConv0_1'], biases['bConv0_1'],[1,1,1,1,1]) 
    conv0_2  = conv3dWrap(conv0_1, weights['wConv0_2'], biases['bConv0_2'],[1,1,1,1,1]) 

    #MID LAYER DOWN SAMPLE 
    conv1_1  = conv3dWrap(conv0_2, weights['wConv1_1'], biases['bConv1_1'],[1,2,2,2,1]) 
    conv1_2  = conv3dWrap(conv1_1, weights['wConv1_2'], biases['bConv1_2'],[1,1,1,1,1]) 

    #BOTTOM LAYER DOWN SAMPLE 
    conv2_1  = conv3dWrap(conv1_2, weights['wConv2_1'], biases['bConv2_1'],[1,2,2,2,1]) 
    conv2_2  = conv3dWrap(conv2_1, weights['wConv2_2'], biases['bConv2_2'],[1,1,1,1,1]) 
    conv2_3  = conv3dWrap(conv2_2, weights['wConv2_3'], biases['bConv2_3'],[1,1,1,1,1]) 
    convTrans2_1 = conv3dTransWrap(conv2_3,weights['wTConv2_1'], biases['bTConv2_1'], [4,2,32,32,64],[1,2,2,2,1]) 

    #MID LAYER UPSAMPLE 
    conv1_3  = conv3dWrap(tf.add(convTrans2_1,conv1_2),weights['wConv1_3'], biases['bConv1_3'],[1,1,1,1,1]) 
    conv1_4  = conv3dWrap(conv1_3, weights['wConv1_4'], biases['bConv1_4'],[1,1,1,1,1]) 
    convTrans1_1 = conv3dTransWrap(conv1_4, weights['wTConv1_1'], biases['bTConv1_1'], [4,4,64,64,32],[1,2,2,2,1]) 

    #TOP LAYER AGAIN 
    conv0_3  = conv3dWrap(tf.add(conv0_2,convTrans1_1), weights['wConv0_3'], biases['bConv0_3'],[1,1,1,1,1]) 
    conv0_4  = conv3dWrap(conv0_3, weights['wConv0_4'], biases['bConv0_4'],[1,1,1,1,1]) 
    recon0_1  = reconWrap(conv0_3, weights['wReconDS0_1'], biases['bReconDS0_1'],[1,1,1,1,1]) 
    print(recon0_1.shape) 
    catRecon0_1 = tf.add(conv0_4,tf.contrib.keras.backend.repeat_elements(recon0_1,32,4)) 
    conv0_5  = conv3dWrap(catRecon0_1, weights['wConv0_5'], biases['bConv0_5'],[1,1,1,1,1]) 

    #MID LAYER AGAIN 
    conv1_5  = conv3dWrap(conv0_5, weights['wConv1_5'], biases['bConv1_5'],[1,2,2,2,1]) 
    conv1_6  = conv3dWrap(conv1_5, weights['wConv1_6'], biases['bConv1_6'],[1,1,1,1,1]) 

    #BOTTOM LAYER 
    conv2_4  = conv3dWrap(conv1_6, weights['wConv2_4'], biases['bConv2_4'],[1,2,2,2,1]) 
    conv2_5  = conv3dWrap(conv2_4, weights['wConv2_5'], biases['bConv2_5'],[1,1,1,1,1]) 
    conv2_6  = conv3dWrap(conv2_5, weights['wConv2_6'], biases['bConv2_6'],[1,1,1,1,1]) 
    convTrans2_2 = conv3dTransWrap(conv2_6,weights['wTConv2_2'], biases['bTConv2_2'], [4,2,32,32,64],[1,2,2,2,1]) 

    #MID LAYER UPSAMPLE 
    conv1_7  = conv3dWrap(tf.add(convTrans2_2,conv1_6),weights['wConv1_7'], biases['bConv1_7'],[1,1,1,1,1]) 
    conv1_8  = conv3dWrap(conv1_7, weights['wConv1_8'], biases['bConv1_8'],[1,1,1,1,1]) 
    convTrans1_2 = conv3dTransWrap(conv1_8,weights['wTConv1_2'], biases['bTConv1_2'], [4,4,64,64,32],[1,2,2,2,1]) 

    #TOP LAYER 
    conv0_6  = conv3dWrap(tf.add(conv0_5,convTrans1_2), weights['wConv0_6'], biases['bConv0_6'],[1,1,1,1,1]) 
    recon0_2  = reconWrap(conv0_6, weights['wReconDS0_2'], biases['bReconDS0_2'],[1,1,1,1,1]) 
    catRecon0_2 = tf.add(conv0_6,tf.contrib.keras.backend.repeat_elements(recon0_2,32,4)) 
    conv0_7  = conv3dWrap(catRecon0_2, weights['wConv0_7'], biases['bConv0_7'],[1,1,1,1,1]) 

    #MID LAYER 
    conv1_9  = conv3dWrap(conv0_7, weights['wConv1_9'], biases['bConv1_9'],[1,2,2,2,1]) 
    conv1_10  = conv3dWrap(conv1_9, weights['wConv1_10'], biases['bConv1_10'],[1,1,1,1,1]) 

    #BOTTOM LAYER 
    conv2_7  = conv3dWrap(conv1_10, weights['wConv2_7'], biases['bConv2_7'],[1,2,2,2,1]) 
    conv2_8  = conv3dWrap(conv2_7, weights['wConv2_8'], biases['bConv2_8'],[1,1,1,1,1]) 
    conv2_9  = conv3dWrap(conv2_8, weights['wConv2_9'], biases['bConv2_9'],[1,1,1,1,1]) 
    convTrans2_3 = conv3dTransWrap(conv2_9, weights['wTConv2_3'], biases['bTConv2_3'], [4,2,32,32,64],[1,2,2,2,1]) 

    #MID LAYER UPSAMPLE 
    conv1_11  = conv3dWrap(tf.add(convTrans2_3,conv1_10),weights['wConv1_11'], biases['bConv1_11'],[1,1,1,1,1]) 
    conv1_12  = conv3dWrap(conv1_11, weights['wConv1_12'], biases['bConv1_12'],[1,1,1,1,1]) 
    convTrans1_3 = conv3dTransWrap(conv1_12,weights['wTConv1_3'], biases['bTConv1_3'], [4,4,64,64,32],[1,2,2,2,1]) 

    #TOP LAYER 
    conv0_8  = conv3dWrap(tf.add(conv0_7,convTrans1_3), weights['wConv0_8'], biases['bConv0_8'],[1,1,1,1,1]) 
    recon0_3  = reconWrap(conv0_8, weights['wReconDS0_3'], biases['bReconDS0_3'],[1,1,1,1,1]) 
    catRecon0_3 = tf.add(conv0_8,tf.contrib.keras.backend.repeat_elements(recon0_3,32,4)) 
    conv0_9  = conv3dWrap(catRecon0_3, weights['wConv0_9'], biases['bConv0_9'],[1,1,1,1,1]) 
    print(recon0_3.shape) 

    #MID LAYER 
    conv1_13  = conv3dWrap(conv0_9, weights['wConv1_13'], biases['bConv1_13'],[1,2,2,2,1]) 
    conv1_14  = conv3dWrap(conv1_13, weights['wConv1_14'], biases['bConv1_14'],[1,1,1,1,1]) 

    #BOTTOM LAYER 
    conv2_10  = conv3dWrap(conv1_14, weights['wConv2_10'], biases['bConv2_10'],[1,2,2,2,1]) 
    conv2_11  = conv3dWrap(conv2_10, weights['wConv2_11'], biases['bConv2_11'],[1,1,1,1,1]) 
    conv2_12  = conv3dWrap(conv2_11, weights['wConv2_12'], biases['bConv2_12'],[1,1,1,1,1]) 
    convTrans2_4 = conv3dTransWrap(conv2_12, weights['wTConv2_4'], biases['bTConv2_4'], [4,2,32,32,64],[1,2,2,2,1]) 

    #MID LAYER UPSAMPLE 
    conv1_15  = conv3dWrap(tf.add(convTrans2_4,conv1_14),weights['wConv1_15'], biases['bConv1_15'],[1,1,1,1,1]) 
    conv1_16  = conv3dWrap(conv1_15, weights['wConv1_16'], biases['bConv1_16'],[1,1,1,1,1]) 
    convTrans1_4 = conv3dTransWrap(conv1_16,weights['wTConv1_4'], biases['bTConv1_4'], [4,4,64,64,32],[1,2,2,2,1]) 

    #TOP LAYER 
    conv0_10  = conv3dWrap(tf.add(conv0_9,convTrans1_4), weights['wConv0_10'], biases['bConv0_10'],[1,1,1,1,1]) 

    #OUTPUT 
    convOUT  = reconWrap(conv0_10, weights['wConvOUT'], biases['bConvOUT'],[1,1,1,1,1]) 
    print(convOUT.shape) 

    return recon0_1, recon0_2, recon0_3, convOUT 

都到哪裏去「包裝」,如下所示:

def conv3dWrap(x, W, b, strides): 
    x = tf.nn.conv3d(x, W, strides, padding='SAME') 
    x = tf.nn.bias_add(x, b) 
    return tf.nn.relu(x) 

def reconWrap(x, W, b, strides): 
    x = tf.nn.conv3d(x, W, strides, padding='SAME') 
    x = tf.nn.bias_add(x, b) 
    return x 

def conv3dTransWrap(x, W, b, shape, strides): 
    x = tf.nn.conv3d_transpose(x, W, shape, strides, padding='SAME') 
    x = tf.nn.bias_add(x,b) 
    return tf.nn.relu(x) 

我的重量和偏見都存儲在字典開始之前的訓練中定義的:

weights={ 
#TOP LAYER 
'wConv0_1':  tf.Variable(tf.random_normal([4, 3, 3, 1, 5]), name='wC0_1'), 
'wConv0_2':  tf.Variable(tf.random_normal([4, 3, 3, 5, 32]), name='wC0_2'), 
'wConv0_3':  tf.Variable(tf.random_normal([4, 3, 3, 32, 32]), name='wC0_3'), 
'wConv0_4':  tf.Variable(tf.random_normal([4, 3, 3, 32, 32]), name='wC0_4'), 
'wReconDS0_1': tf.Variable(tf.random_normal([1, 1, 1, 32, 1]) , name='wR0_1') ...... #THIS CONTINUES FOR QUITE AWHILE 

然後,我開始像這樣的訓練:

def train_cnn(x): 
    epochLosses=[] 
    print('Beginning Training!') 
    print(NUM_EPOCHS) 
    r1,r2,r3,pred = convNet(x, weights, biases)   
    cost = (tf.losses.mean_squared_error(y,pred) 
    + 0.25* ((tf.losses.mean_squared_error(y,r1)) 
    + (tf.losses.mean_squared_error(y,r2)) 
    + (tf.losses.mean_squared_error(y,r3)))) 

    regularizer= 0.01*tf.nn.l2_loss((weights['wConv0_1'])+ 
            0.01*tf.nn.l2_loss(weights['wConv0_2'])+ 
            0.01*tf.nn.l2_loss(weights['wConv0_3'])+  
            0.01*tf.nn.l2_loss(weights['wConv0_4'])+  
            0.01*tf.nn.l2_loss(weights['wReconDS0_1'])+ 
            0.01*tf.nn.l2_loss(weights['wConv0_5'])+ 
            0.01*tf.nn.l2_loss(weights['wConv0_6'])+  
            0.01*tf.nn.l2_loss(weights['wReconDS0_2'])+ 
            0.01*tf.nn.l2_loss(weights['wReconDS0_3'])+ 
            0.01*tf.nn.l2_loss(weights['wConv0_7'])+  
            0.01*tf.nn.l2_loss(weights['wConv0_8'])+  
            0.01*tf.nn.l2_loss(weights['wConv0_9'])+  
            0.01*tf.nn.l2_loss(weights['wConv0_10'])+  
            0.01*tf.nn.l2_loss(weights['wConvOUT'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_1'])+ 
            0.01*tf.nn.l2_loss(weights['wConv1_2'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_3'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_4'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_5'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_6'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_7'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_8'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_9'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_10'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_11'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_12'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_13'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_14'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_15'])+  
            0.01*tf.nn.l2_loss(weights['wConv1_16'])+  
            0.01*tf.nn.l2_loss(weights['wTConv1_1'])+  
            0.01*tf.nn.l2_loss(weights['wTConv1_2'])+  
            0.01*tf.nn.l2_loss(weights['wTConv1_3'])+  
            0.01*tf.nn.l2_loss(weights['wTConv1_4'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_1'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_2'])+ 
            0.01*tf.nn.l2_loss(weights['wConv2_3'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_4'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_5'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_6'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_7'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_8'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_9'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_10'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_11'])+  
            0.01*tf.nn.l2_loss(weights['wConv2_12'])+ 
            0.01*tf.nn.l2_loss(weights['wTConv2_1'])+  
            0.01*tf.nn.l2_loss(weights['wTConv2_2'])+  
            0.01*tf.nn.l2_loss(weights['wTConv2_3'])+  
            0.01*tf.nn.l2_loss(weights['wTConv2_4'])) 
    cost=cost+regularizer 
    optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cost) 
    saver = tf.train.Saver() 
    sess = tf.Session() 
    sess.run(tf.global_variables_initializer()) 
    valLosses=[] 
    epochLosses=[] 
    print('Beginning Session!') 
    writer = tf.summary.FileWriter ('./GRAPH' , sess.graph) 
    sess.run(tf.global_variables_initializer()) 

最後,我繼續做一些加載批量的東西,一旦他們準備好了,我做下面的事情(對於每次傳球,我都不會每次傳球都進行保存重引進工作):

   _, c = sess.run([optimizer, cost], feed_dict = {x: inBatch,y: gsBatch}) 
       epoch_loss += c   
       save_path = saver.save(sess, "./CHKPT/model.cpkt") 

所以,當我繼續前進,導入此模型

sess = tf.Session() 
x = tf.placeholder(dtype=tf.float32) 
new_saver = tf.train.import_meta_graph('./CHKPT/model.cpkt.meta') 
sess.run(tf.global_variables_initializer()) 
a,b,c,pred = convNet(x, weights, biases) 

我遇到了以下錯誤:

ValueError: Tried to convert 'filter' to a tensor and failed. Error: None values not supported. 

當我查看導入的權重和偏見時,他們每個人的值都是'無'。這不僅很奇怪,而且在訓練期間網絡'非常快速'運行,比我預期的要快得多。我擔心沒有發生合法的計算。

這肯定不是這樣,但是,我幾乎肯定我正在按照我用於許多其他網絡的保存/加載過程逐字。任何人都可以點亮這裏可能發生的事情嗎?

編輯:我對TF也很新,很可能在我的代碼中存在非理想因素。如果您在保存/導入之外看到任何不潔淨的東西,請告訴我。

回答

0

運行sess.run(tf.global_variables_initializer())將重新初始化每個張量並刪除它們的加載值。加載模型時跳過調用tf.global_variables_initializer()。初始化由保存程序完成。

您還缺少還原呼叫(import_meta_graph()只加載保護程序對象)。

new_saver = tf.train.import_meta_graph('./CHKPT/model.cpkt.meta') 
new_saver.restore(sess, './CHKPT/model.cpkt') 

其後,當你運行:

a,b,c,pred = convNet(x, weights, biases) 

您創建一個全新的網絡,而不會使用加載的一個。 相反,您必須在恢復模型後在tf.global_variables()內找到需要的張量。例如通過searching for them by name

+0

我剛剛在加載代碼中刪除了初始化程序調用,問題保持不變。 – Karl

+0

@Karl你也跳過了模型的恢復。我編輯了答案。 – BlueSun

+0

我剛剛添加了這個,我仍然有同樣的問題。 我認爲問題在於培訓和/或保存。當我看重量和偏見我進口,它們實際上是 '無' 價值: 權重 出[40]: { 'wConv0_1':無, 'wConv0_10':無, 'wConv0_2' :無, 'wConv0_3':無, 'wConv0_4':無, 'wConv0_5':無, 'wConv0_6':無, 'wConv0_7':無, – Karl