0
考慮這個演示代碼:爲tf.cond不確定性的行爲上is_variable_initialized
import logging
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
import sys
sess = tf.InteractiveSession()
def test_var_2():
v = tf.Variable(initial_value=True, trainable=False, name="test_var_3")
print("is initialized:", tf.is_variable_initialized(v).eval(), file=sys.__stdout__)
def make_init():
with tf.control_dependencies([tf.Print(0, ["var is not initialized", tf.is_variable_initialized(v)])]):
return tf.variables_initializer([v])
def on_already_init():
return tf.group(tf.no_op(), tf.Print(0, ["var is initialized", tf.is_variable_initialized(v)]))
maybe_init = tf.cond(
tf.is_variable_initialized(v),
on_already_init,
make_init,
name="init")
with tf.control_dependencies([maybe_init, tf.Print(0, ["cd init"])]):
x = tf.where(v.read_value(), True, False)
print("x:", x.eval())
print("is initialized:", tf.is_variable_initialized(v).eval(), file=sys.__stdout__)
print("x:", x.eval())
print("is initialized:", tf.is_variable_initialized(v).eval(), file=sys.__stdout__)
if __name__ == "__main__":
test_var_2()
輸出具有不確定性。我要麼得到這個(1):
is initialized: False
.../logging_ops.cc:79] [cd init]
.../logging_ops.cc:79] [var is not initialized][0]
x: True
is initialized: True
.../logging_ops.cc:79] [cd init]
.../logging_ops.cc:79] [var is initialized][1]
x: True
is initialized: True
還是這個(2):
is initialized: False
.../logging_ops.cc:79] [cd init]
.../logging_ops.cc:79] [var is not initialized][1]
x: True
is initialized: True
.../logging_ops.cc:79] [cd init]
.../logging_ops.cc:79] [var is initialized][1]
x: True
is initialized: True
還是這個(3):
is initialized: False
.../logging_ops.cc:79] [cd init]
.../logging_ops.cc:79] [var is initialized][1]
x: True
is initialized: True
.../logging_ops.cc:79] [cd init]
.../logging_ops.cc:79] [var is initialized][1]
x: True
is initialized: True
的變體3(其中VAR初始化在第一個x.eval()
)對我來說毫無意義。它怎麼會在那個cond-branch中結束?爲什麼突然初始化?
變體1和2之間的區別可能是因爲tf.Print
沒有完全評估爲tf.control_dependencies
的一部分?