我正在嘗試使用SyncReplicaOptimizer和MonitoredTraining Session在分佈式張量流中編寫同步訓練碼。分佈式Tensorflow,Master在培訓時卡住了,工作人員沒有開始培訓,而使用SyncReplicasOptimizer和MonitoredTrainingSession?
我面臨的問題是,經過一些步驟後,主人會暫停培訓,並且沒有工人開始培訓。有沒有人遇到過這個?
這是我寫的代碼。數據從張量流記錄中讀取。我遵循tensorflow網站中描述的確切方式。
def build(self):
self.modelObj = Model(self.imagesize, self.targetSize)
self.modelObj.model()
self.global_step = tf.contrib.framework.get_or_create_global_step()
self.opt = tf.train.AdamOptimizer(self.learningrate)
if self.syncTraining:
self.trainer = tf.train.SyncReplicasOptimizer(self.opt,replicas_to_aggregate=self.num_workers,total_num_replicas=self.num_workers)
else:
self.trainer = self.opt
self.trainstep = self.trainer.minimize(self.modelObj.loss, global_step=self.global_step)
self.saver = tf.train.Saver(max_to_keep=1)
self.summary_op = tf.summary.merge_all()
self.init_op = tf.global_variables_initializer()
if self.syncTraining:
self.sync_replicas_hook = self.trainer.make_session_run_hook(is_chief = (self.task_index==0))
def train(self):
if self.syncTraining:
with tf.train.MonitoredTrainingSession(master=self.server.target,
is_chief=(self.task_index==0),
checkpoint_dir=self.logdir,
hooks=[self.sync_replicas_hook]) as self.session:
step = 0
try:
while not self.session.should_stop():
# training
[trainx, trainy_] = self.session.run([self.trainx, self.trainy_])
feed = {self.modelObj.x: trainx, self.modelObj.y_: trainy_,
self.modelObj.batch: self.batch_size, self.modelObj.keep_prob: 0.7}
_, trainloss = self.session.run([self.trainstep, self.modelObj.loss], feed_dict=feed)
print("step: %d, training loss %f" % (step, trainloss))
step += 1
except tf.errors.OutOfRangeError:
print('training finished, number of epochs reached')