我正在使用chatbot-retrieval項目開發張量流服務客戶端/服務器應用程序。您必須爲dtype字符串和形狀提供佔位符張量'input_example_tensor'的值[1]
我的代碼有兩部分,分別是服務部分和客戶端部分。
下面是服務零件的代碼片段。
def get_features(context, utterance):
context_len = 50
utterance_len = 50
features = {
"context": context,
"context_len": tf.constant(context_len, shape=[1,1], dtype=tf.int64),
"utterance": utterance,
"utterance_len": tf.constant(utterance_len, shape=[1,1], dtype=tf.int64),
}
return features
def my_input_fn(estimator, input_example_tensor):
feature_configs = {
'context':tf.FixedLenFeature(shape=[50], dtype=tf.int64),
'utterance':tf.FixedLenFeature(shape=[50], dtype=tf.int64)
}
tf_example = tf.parse_example(input_example_tensor, feature_configs)
context = tf.identity(tf_example['context'], name='context')
utterance = tf.identity(tf_example['utterance'], name='utterance')
features = get_features(context, utterance)
return features
def my_signature_fn(input_example_tensor, features, predictions):
feature_configs = {
'context':tf.FixedLenFeature(shape=[50], dtype=tf.int64),
'utterance':tf.FixedLenFeature(shape=[50], dtype=tf.int64)
}
tf_example = tf.parse_example(input_example_tensor, feature_configs)
tf_context = tf.identity(tf_example['context'], name='tf_context_utterance')
tf_utterance = tf.identity(tf_example['utterance'], name='tf_utterance')
default_graph_signature = exporter.regression_signature(
input_tensor=input_example_tensor,
output_tensor=tf.identity(predictions)
)
named_graph_signatures = {
'inputs':exporter.generic_signature(
{
'context':tf_context,
'utterance':tf_utterance
}
),
'outputs':exporter.generic_signature(
{
'scores':predictions
}
)
}
return default_graph_signature, named_graph_signatures
def main():
##preliminary codes here##
estimator.fit(input_fn=input_fn_train, steps=100, monitors=[eval_monitor])
estimator.export(
export_dir = FLAGS.export_dir,
input_fn = my_input_fn,
use_deprecated_input_fn = True,
signature_fn = my_signature_fn,
exports_to_keep = 1
)
以下是客戶端部分的代碼片段。
def tokenizer_fn(iterator):
return (x.split(" ") for x in iterator)
vp = tf.contrib.learn.preprocessing.VocabularyProcessor.restore(FLAGS.vocab_processor_file)
input_context = "biz banka kart farkli bir banka atmsinde para"
input_utterance = "farkli banka kart biz banka atmsinde para"
context_feature = np.array(list(vp.transform([input_context])))
utterance_feature = np.array(list(vp.transform([input_utterance])))
context_tensor = tf.contrib.util.make_tensor_proto(context_feature, shape=[1, context_feature.size])
utterance_tensor = tf.contrib.util.make_tensor_proto(context_feature, shape=[1, context_feature.size])
request.inputs['context'].CopyFrom(context_tensor)
request.inputs['utterance'].CopyFrom(utterance_tensor)
result_counter.throttle()
result_future = stub.Predict.future(request, 5.0) # 5 seconds
result_future.add_done_callback(
_create_rpc_callback(label[0], result_counter))
return result_counter.get_error_rate()
服務和客戶端部件均構建時沒有錯誤。運行服務應用程序,然後運行客戶端應用程序後,當rpc調用完成時,我得到了傳遞給客戶端應用程序的以下奇怪錯誤。
下面是當RPC調用完成
AbortionError(code=StatusCode.INVALID_ARGUMENT, details="You must feed a value for placeholder tensor 'input_example_tensor' with dtype string and shape [1]
[[Node: input_example_tensor = Placeholder[_output_shapes=[[1]], dtype=DT_STRING, shape=[1], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]")
的錯誤是奇怪,因爲似乎有沒有辦法從客戶端應用程序養活佔位符,我得到的錯誤。
如果我通過張量流服務訪問模型,如何爲佔位符'input_example_tensor'提供數據?
答案: (我張貼我的答案在這裏,因爲我無法發佈它作爲一個答案,由於缺乏StackOverflow的徽章的任何人誰是志願者提交他/她的問題的答案不止。 )
我可以通過在estimator.export函數中使用選項use_deprecated_input_fn = False來解決問題,並相應地更改輸入簽名。
下面是運行沒有問題的最終代碼。
def get_features(input_example_tensor, context, utterance):
context_len = 50
utterance_len = 50
features = {
"my_input_example_tensor": input_example_tensor,
"context": context,
"context_len": tf.constant(context_len, shape=[1,1], dtype=tf.int64),
"utterance": utterance,
"utterance_len": tf.constant(utterance_len, shape=[1,1], dtype=tf.int64),
}
return features
def my_input_fn():
input_example_tensor = tf.placeholder(tf.string, name='tf_example_placeholder')
feature_configs = {
'context':tf.FixedLenFeature(shape=[50], dtype=tf.int64),
'utterance':tf.FixedLenFeature(shape=[50], dtype=tf.int64)
}
tf_example = tf.parse_example(input_example_tensor, feature_configs)
context = tf.identity(tf_example['context'], name='context')
utterance = tf.identity(tf_example['utterance'], name='utterance')
features = get_features(input_example_tensor, context, utterance)
return features, None
def my_signature_fn(input_example_tensor, features, predictions):
default_graph_signature = exporter.regression_signature(
input_tensor=input_example_tensor,
output_tensor=predictions
)
named_graph_signatures = {
'inputs':exporter.generic_signature(
{
'context':features['context'],
'utterance':features['utterance']
}
),
'outputs':exporter.generic_signature(
{
'scores':predictions
}
)
}
return default_graph_signature, named_graph_signatures
def main():
##preliminary codes here##
estimator.fit(input_fn=input_fn_train, steps=100, monitors=[eval_monitor])
estimator._targets_info = tf.contrib.learn.estimators.tensor_signature.TensorSignature(tf.constant(0, shape=[1,1]))
estimator.export(
export_dir = FLAGS.export_dir,
input_fn = my_input_fn,
input_feature_key ="my_input_example_tensor",
use_deprecated_input_fn = False,
signature_fn = my_signature_fn,
exports_to_keep = 1
)
你能夠得到錯誤的堆棧跟蹤?不清楚實際提供的值的類型和形狀是什麼,這會導致錯誤。 –
我可以通過在estimator.export函數中使用選項use_deprecated_input_fn = False來解決問題,並相應地更改輸入簽名。由於我是該網站的新手,因此StackOverflow無法接受我帳戶的回覆,並且此問題被低估。 –
你可以請upvote這個問題,以便我可以發佈該問題的運行代碼? –