我工作在tensorflow實施新TFGAN模塊TFGAN Module實施新TFGAN模塊
有沒有人能夠真正得到它的工作?我運行到經過tf.random.noise成一個簡單的發電機的問題:
tfgan = tf.contrib.gan
noise = tf.random_normal([BATCH_SIZE, 28,28])
def my_generator(z, out_dim=28*28, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(h1, alpha*h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.nn.tanh(logits)
return out, logits
然後tfgan電話:
# Build the generator and discriminator.
gan_model = tfgan.gan_model(
generator_fn=my_generator,
discriminator_fn=my_discriminator,
real_data=images,
generator_inputs=noise)
Error: "tuple' object has no attribute 'dtype'"
,指着我generator_inputs線。
(作爲一個方面說明,我已經幾乎完成了我所有的神經網絡工作在keras層次的抽象,所以我知道這是一個簡單的問題)
編輯PER評論來自kvorobiev(謝謝非常多)
排除數據生成器(基本相同,在GitHub上的帖子)的代碼,
tfgan = tf.contrib.gan
noise = tf.random_normal([28,28])
def unconditional_generator(z, out_dim=28*28, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(h1, alpha*h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.nn.tanh(logits)
return out, logits
def unconditional_discriminator(x, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(h1, alpha*h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.nn.sigmoid(logits)
return out, logits
# Build the generator and discriminator.
gan_model = tfgan.gan_model(
generator_fn= unconditional_generator, # you define
discriminator_fn = unconditional_discriminator, # you define
real_data=img_generator,
generator_inputs=noise)
# Build the GAN loss.
gan_loss = tfgan.gan_loss(
gan_model,
generator_loss_fn=tfgan_losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan_losses.wasserstein_discriminator_loss)
# Create the train ops, which calculate gradients and apply updates to weights.
train_ops = tfgan.gan_train_ops(
gan_model,
gan_loss,
generator_optimizer=tf.train.AdamOptimizer(gen_lr, 0.5),
discriminator_optimizer=tf.train.AdamOptimizer(dis_lr, 0.5))
# Run the train ops in the alternating training scheme.
tfgan.gan_train(
train_ops,
hooks=[tf.train.StopAtStepHook(num_steps=100)],
logdir=FLAGS.train_log_dir)
回溯:
-------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-3-2c570c5257d0> in <module>()
37 discriminator_fn = unconditional_discriminator, # you define
38 real_data=img_generator,
---> 39 generator_inputs=noise)
40
41 # Build the GAN loss.
~/tf_1.4/lib/python3.5/site-packages/tensorflow/contrib/gan/python/train.py in gan_model(generator_fn, discriminator_fn, real_data, generator_inputs, generator_scope, discriminator_scope, check_shapes)
105 with variable_scope.variable_scope(discriminator_scope) as dis_scope:
106 discriminator_gen_outputs = discriminator_fn(generated_data,
--> 107 generator_inputs)
108 with variable_scope.variable_scope(dis_scope, reuse=True):
109 real_data = ops.convert_to_tensor(real_data)
<ipython-input-3-2c570c5257d0> in unconditional_discriminator(x, n_units, reuse, alpha)
19 with tf.variable_scope('discriminator', reuse=reuse):
20 # Hidden layer
---> 21 h1 = tf.layers.dense(x, n_units, activation=None)
22
23 # Leaky ReLU
~/tf_1.4/lib/python3.5/site-packages/tensorflow/python/layers/core.py in dense(inputs, units, activation, use_bias, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, bias_constraint, trainable, name, reuse)
245 trainable=trainable,
246 name=name,
--> 247 dtype=inputs.dtype.base_dtype,
248 _scope=name,
249 _reuse=reuse)
AttributeError: 'tuple' object has no attribute 'dtype'
其實,tfgan.gan_model調用發生錯誤。發佈完整的錯誤追蹤和代碼'tfgan.gan_model'的所有參數。 – kvorobiev
發佈 - 提前致謝。 – jsl2