本文整理汇总了Python中tensorflow.contrib.session_bundle.exporter.classification_signature函数的典型用法代码示例。如果您正苦于以下问题:Python classification_signature函数的具体用法?Python classification_signature怎么用?Python classification_signature使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了classification_signature函数的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: classification_signature_fn_with_prob
def classification_signature_fn_with_prob(
examples, unused_features, predictions):
"""Classification signature from given examples and predicted probabilities.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of predicted probabilities or dict that contains the
probabilities tensor as in {'probabilities', `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions['probabilities'])
else:
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions)
return default_signature, {}
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:26,代码来源:export.py
示例2: _classification_signature_fn
def _classification_signature_fn(examples, unused_features, predictions):
"""Servo signature function."""
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
input_tensor=examples,
classes_tensor=predictions[PredictionKey.CLASSES],
scores_tensor=predictions[PredictionKey.PROBABILITIES])
else:
default_signature = exporter.classification_signature(
input_tensor=examples,
scores_tensor=predictions)
# TODO(zakaria): add validation
return default_signature, {}
开发者ID:caikehe,项目名称:tensorflow,代码行数:14,代码来源:head.py
示例3: classification_signature_fn
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or dict of tensors that contains the classes tensor
as in {'classes': `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
"""
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions['classes'])
else:
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions)
return default_signature, {}
开发者ID:Nishant23,项目名称:tensorflow,代码行数:19,代码来源:export.py
示例4: classification_signature_fn_with_prob
def classification_signature_fn_with_prob(
examples, unused_features, predictions):
"""Classification signature from given examples and predicted probabilities.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of predicted probabilities or dict that contains the
probabilities tensor as in {'probabilities', `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
"""
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions['probabilities'])
else:
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions)
return default_signature, {}
开发者ID:Nishant23,项目名称:tensorflow,代码行数:20,代码来源:export.py
示例5: classification_signature_fn
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor`.
Returns:
Tuple of default classification signature and empty named signatures.
"""
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions)
return default_signature, {}
开发者ID:barantes,项目名称:tensorflow,代码行数:14,代码来源:export.py
示例6: classification_signature_fn
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or dict of tensors that contains the classes tensor
as in {'classes': `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError("examples cannot be None when using this signature fn.")
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(examples, classes_tensor=predictions["classes"])
else:
default_signature = exporter.classification_signature(examples, classes_tensor=predictions)
return default_signature, {}
开发者ID:paolodedios,项目名称:tensorflow,代码行数:23,代码来源:export.py
示例7: classification_signature_fn_with_prob
def classification_signature_fn_with_prob(
examples, unused_features, predictions):
"""Classification signature from given examples and predicted probabilities.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of predicted probabilities.
Returns:
Tuple of default classification signature and empty named signatures.
"""
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions)
return default_signature, {}
开发者ID:barantes,项目名称:tensorflow,代码行数:15,代码来源:export.py
示例8: classification_signature_fn
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `dict` of `Tensor`s.
Returns:
Tuple of default classification signature and empty named signatures.
"""
signature = exporter.classification_signature(
examples,
classes_tensor=predictions[Classifier.CLASS_OUTPUT],
scores_tensor=predictions[Classifier.PROBABILITY_OUTPUT])
return signature, {}
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:16,代码来源:classifier.py
示例9: main
def main(_):
if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
print('Usage: mnist_export.py [--training_iteration=x] '
'[--export_version=y] export_dir')
sys.exit(-1)
if FLAGS.training_iteration <= 0:
print 'Please specify a positive value for training iteration.'
sys.exit(-1)
if FLAGS.export_version <= 0:
print 'Please specify a positive value for version number.'
sys.exit(-1)
# Train model
print 'Training model...'
mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
sess = tf.InteractiveSession()
x = tf.placeholder('float', shape=[None, 784])
y_ = tf.placeholder('float', shape=[None, 10])
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.initialize_all_variables())
y = tf.nn.softmax(tf.matmul(x, w) + b)
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
for _ in range(FLAGS.training_iteration):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print 'training accuracy %g' % sess.run(accuracy,
feed_dict={x: mnist.test.images,
y_: mnist.test.labels})
print 'Done training!'
# Export model
# WARNING(break-tutorial-inline-code): The following code snippet is
# in-lined in tutorials, please update tutorial documents accordingly
# whenever code changes.
export_path = sys.argv[-1]
print 'Exporting trained model to', export_path
saver = tf.train.Saver(sharded=True)
model_exporter = exporter.Exporter(saver)
signature = exporter.classification_signature(input_tensor=x, scores_tensor=y)
model_exporter.init(sess.graph.as_graph_def(),
default_graph_signature=signature)
model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)
print 'Done exporting!'
开发者ID:charlesDADI,项目名称:serving,代码行数:47,代码来源:mnist_export.py
示例10: doBasicsOneExportPath
def doBasicsOneExportPath(self,
export_path,
clear_devices=False,
global_step=GLOBAL_STEP,
sharded=True):
# Build a graph with 2 parameter nodes on different devices.
tf.reset_default_graph()
with tf.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
# v2 is an unsaved variable derived from v0 and v1. It is used to
# exercise the ability to run an init op when restoring a graph.
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(10, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(20, name="v1")
v2 = tf.Variable(1, name="v2", trainable=False, collections=[])
assign_v2 = tf.assign(v2, tf.add(v0, v1))
init_op = tf.group(assign_v2, name="init_op")
tf.add_to_collection("v", v0)
tf.add_to_collection("v", v1)
tf.add_to_collection("v", v2)
global_step_tensor = tf.Variable(global_step, name="global_step")
named_tensor_bindings = {"logical_input_A": v0, "logical_input_B": v1}
signatures = {
"foo": exporter.regression_signature(input_tensor=v0,
output_tensor=v1),
"generic": exporter.generic_signature(named_tensor_bindings)
}
asset_filepath_orig = os.path.join(tf.test.get_temp_dir(), "hello42.txt")
asset_file = tf.constant(asset_filepath_orig, name="filename42")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, asset_file)
with gfile.FastGFile(asset_filepath_orig, "w") as f:
f.write("your data here")
assets_collection = tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS)
ignored_asset = os.path.join(tf.test.get_temp_dir(), "ignored.txt")
with gfile.FastGFile(ignored_asset, "w") as f:
f.write("additional data here")
tf.initialize_all_variables().run()
# Run an export.
save = tf.train.Saver({"v0": v0,
"v1": v1},
restore_sequentially=True,
sharded=sharded)
export = exporter.Exporter(save)
export.init(sess.graph.as_graph_def(),
init_op=init_op,
clear_devices=clear_devices,
default_graph_signature=exporter.classification_signature(
input_tensor=v0),
named_graph_signatures=signatures,
assets_collection=assets_collection)
export.export(export_path,
global_step_tensor,
sess,
exports_to_keep=gc.largest_export_versions(2))
# Restore graph.
compare_def = tf.get_default_graph().as_graph_def()
tf.reset_default_graph()
with tf.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
save = tf.train.import_meta_graph(
os.path.join(export_path, constants.VERSION_FORMAT_SPECIFIER %
global_step, constants.META_GRAPH_DEF_FILENAME))
self.assertIsNotNone(save)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
# Validate custom graph_def.
graph_def_any = collection_def[constants.GRAPH_KEY].any_list.value
self.assertEquals(len(graph_def_any), 1)
graph_def = tf.GraphDef()
graph_def_any[0].Unpack(graph_def)
if clear_devices:
for node in compare_def.node:
node.device = ""
self.assertProtoEquals(compare_def, graph_def)
# Validate init_op.
init_ops = collection_def[constants.INIT_OP_KEY].node_list.value
self.assertEquals(len(init_ops), 1)
self.assertEquals(init_ops[0], "init_op")
# Validate signatures.
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
self.assertEqual(
default_signature.classification_signature.input.tensor_name, "v0:0")
#.........这里部分代码省略.........
开发者ID:2020zyc,项目名称:tensorflow,代码行数:101,代码来源:exporter_test.py
示例11: export
def export():
# Create index->synset mapping
synsets = []
with open(SYNSET_FILE) as f:
synsets = f.read().splitlines()
# Create synset->metadata mapping
texts = {}
with open(METADATA_FILE) as f:
for line in f.read().splitlines():
parts = line.split('\t')
assert len(parts) == 2
texts[parts[0]] = parts[1]
with tf.Graph().as_default():
# Build inference model.
# Please refer to Tensorflow inception model for details.
# Input transformation.
serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
feature_configs = {
'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
jpegs = tf_example['image/encoded']
images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
# Run inference.
logits, _ = inception_model.inference(images, NUM_CLASSES + 1)
# Transform output to topK result.
values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)
# Create a constant string Tensor where the i'th element is
# the human readable class description for the i'th index.
# Note that the 0th index is an unused background class
# (see inception model definition code).
class_descriptions = ['unused background']
for s in synsets:
class_descriptions.append(texts[s])
class_tensor = tf.constant(class_descriptions)
classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
mapping=class_tensor)
# Restore variables from training checkpoint.
variable_averages = tf.train.ExponentialMovingAverage(
inception_model.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from training checkpoints.
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Successfully loaded model from %s at step=%s.' %
(ckpt.model_checkpoint_path, global_step))
else:
print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
return
# Export inference model.
init_op = tf.group(tf.initialize_all_tables(), name='init_op')
classification_signature = exporter.classification_signature(
input_tensor=serialized_tf_example,
classes_tensor=classes,
scores_tensor=values)
named_graph_signature = {
'inputs': exporter.generic_signature({'images': jpegs}),
'outputs': exporter.generic_signature({
'classes': classes,
'scores': values
})}
model_exporter = exporter.Exporter(saver)
model_exporter.init(
init_op=init_op,
default_graph_signature=classification_signature,
named_graph_signatures=named_graph_signature)
model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
print('Successfully exported model to %s' % FLAGS.export_dir)
开发者ID:nilbody,项目名称:serving,代码行数:83,代码来源:inception_export.py
示例12: cnn
#.........这里部分代码省略.........
#featurewise_std_normalization=False,
#rotation_range=0,
#width_shift_range=0.3,
#height_shift_range=0.3,
#zoom_range=[0,1.3],
#shear_range=0.2,
# horizontal_flip=True,
#vertical_flip=True)
"""
datagen.fit(X_train)
model.fit_generator(
datagen.flow(
X_train,
Y_train,
batch_size=1024,
shuffle=True),
samples_per_epoch=len(X_train),
nb_epoch=nb_epoch,
verbose=1,
validation_data=(
test_i[0],
test_o[0]))
"""
# Set up some params
nb_epoch = 25 #100 # number of epochs to train on
batch_size = 1024 # training batch size
tb = TensorBoard(log_dir='./logs')
# perform training ...
# - call the main training loop in keras for our network+dataset
filepath = 'convmodrecnets_CNN2_0.5.wts.h5'
history = model.fit(X_train,
Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
show_accuracy=False,
verbose=2,
validation_data=(X_test, Y_test),
callbacks = [
keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, mode='auto'),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
])
# we re-load the best weights once training is finished
model.load_weights(filepath)
K.set_learning_phase(0)
acc = {}
for snr in snrs:
# extract classes @ SNR
test_SNRs = map(lambda x: lbl[x][1], test_idx)
test_X_i = X_test[np.where(np.array(test_SNRs)==snr)]
test_Y_i = Y_test[np.where(np.array(test_SNRs)==snr)]
# estimate classes
test_Y_i_hat = model.predict(test_X_i)
#print("PREDICT ",test_Y_i_hat)
conf = np.zeros([len(classes),len(classes)])
confnorm = np.zeros([len(classes),len(classes)])
for i in range(0,test_X_i.shape[0]):
j = list(test_Y_i[i,:]).index(1)
k = int(np.argmax(test_Y_i_hat[i,:]))
conf[j,k] = conf[j,k] + 1
for i in range(0,len(classes)):
confnorm[i,:] = conf[i,:] / np.sum(conf[i,:])
plt.figure()
plot_confusion_matrix(confnorm, labels=classes, title="ConvNet Confusion Matrix (SNR=%d)"%(snr))
cor = np.sum(np.diag(conf))
ncor = np.sum(conf) - cor
print ("Overall Accuracy: ", cor / (cor+ncor))
acc[snr] = 1.0*cor/(cor+ncor)
config = model.get_config()
weights = model.get_weights()
new_model = models.Sequential.from_config(config)
new_model.set_weights(weights)
export_path = "/tmp/cnn"
export_version = 1
labels_tensor = tf.constant(mods)
saver = tf.train.Saver(sharded=True)
model_exporter = exporter.Exporter(saver)
signature = exporter.classification_signature(
input_tensor=new_model.input,classes_tensor=labels_tensor, scores_tensor=new_model.output)
model_exporter.init(
sess.graph.as_graph_def(),
default_graph_signature=signature)
model_exporter.export(export_path, tf.constant(export_version), sess)
开发者ID:chrisruk,项目名称:models,代码行数:101,代码来源:cnn_generate.py
示例13: fam
def fam(train_i, train_o, test_i, test_o):
sess = tf.Session()
K.set_session(sess)
K.set_learning_phase(1)
batch_size = 60
nb_classes = len(MOD)
nb_epoch = 20
img_rows, img_cols = 2 * P * L, 2 * Np
nb_filters = 96
nb_pool = 2
X_train,Y_train = shuffle_in_unison_inplace( np.array(train_i) , np.array(train_o) )
model = Sequential()
model.add(Convolution2D(64, 11, 11,subsample=(2,2),
input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(512,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes,init='normal'))
model.add(Activation('softmax', name="out"))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
"""
datagen = ImageDataGenerator(
#featurewise_center=True,
#featurewise_std_normalization=True,
rotation_range=20,
#width_shift_range=0.3,
#height_shift_range=0.3,
#zoom_range=[0,1.3],
horizontal_flip=True,
vertical_flip=True)
datagen.fit(X_train)
model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size,shuffle=True),
samples_per_epoch=len(X_train), nb_epoch=5,verbose=1,validation_data=(test_i[0], test_o[0]))
"""
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, shuffle=True, validation_data=(test_i[0], test_o[0]))
for s in range(len(test_i)):
if len(test_i[s]) == 0:
continue
X_test = test_i[s]
Y_test = test_o[s]
score = model.evaluate(X_test, Y_test, verbose=0)
print("SNR", SNR[s], "Test accuracy:", score[1])
K.set_learning_phase(0)
config = model.get_config()
weights = model.get_weights()
new_model = Sequential.from_config(config)
new_model.set_weights(weights)
export_path = "/tmp/fam"
export_version = 1
labels_tensor = tf.constant(MOD)
saver = tf.train.Saver(sharded=True)
model_exporter = exporter.Exporter(saver)
signature = exporter.classification_signature(
input_tensor=new_model.input,classes_tensor=labels_tensor,scores_tensor=new_model.output)
model_exporter.init(
sess.graph.as_graph_def(),
default_graph_signature=signature)
model_exporter.export(export_path, tf.constant(export_version), sess)
开发者ID:chrisruk,项目名称:models,代码行数:89,代码来源:fam_generate.py
示例14: main
def main(_):
if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
print('Usage: mnist_export.py [--training_iteration=x] '
'[--export_version=y] export_dir')
sys.exit(-1)
if FLAGS.training_iteration <= 0:
print('Please specify a positive value for training iteration.')
sys.exit(-1)
if FLAGS.export_version <= 0:
print('Please specify a positive value for version number.')
sys.exit(-1)
# Train model
print('Training model...')
mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
sess = tf.InteractiveSession()
serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
feature_configs = {
'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),
}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
x = tf.identity(tf_example['x'], name='x') # use tf.identity() to assign name
y_ = tf.placeholder('float', shape=[None, 10])
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.initialize_all_variables())
y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
values, indices = tf.nn.top_k(y, 10)
prediction_classes = tf.contrib.lookup.index_to_string(
tf.to_int64(indices),
mapping=tf.constant([str(i) for i in range(10)]))
for _ in range(FLAGS.training_iteration):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print('training accuracy %g' % sess.run(accuracy,
feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
print('Done training!')
# Export model
# WARNING(break-tutorial-inline-code): The following code snippet is
# in-lined in tutorials, please update tutorial documents accordingly
# whenever code changes.
export_path = sys.argv[-1]
print('Exporting trained model to %s' % export_path)
init_op = tf.group(tf.initialize_all_tables(), name='init_op')
saver = tf.train.Saver(sharded=True)
model_exporter = exporter.Exporter(saver)
model_exporter.init(
sess.graph.as_graph_def(),
init_op=init_op,
default_graph_signature=exporter.classification_signature(
input_tensor=serialized_tf_example,
classes_tensor=prediction_classes,
scores_tensor=values),
named_graph_signatures={
'inputs': exporter.generic_signature({'images': x}),
'outputs': exporter.generic_signature({'scores': y})})
model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)
print('Done exporting!')
开发者ID:nilbody,项目名称:serving,代码行数:64,代码来源:mnist_export.py
示例15: export
def export(model_path, export_path, export_version, export_for_serving, cfg):
graph = tf.get_default_graph()
sess_config = tf.ConfigProto(
log_device_placement=False,
allow_soft_placement = True,
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=cfg.SESSION_CONFIG.PER_PROCESS_GPU_MEMORY_FRACTION
)
)
# GVH: This is a little tricky.
# tf.image.decode_jpeg does not have a batch implementation, creating a bottleneck
# for batching. We can request the user to send in a raveled image, but this will
# increase our transport size over the network. Also, should we assume that the images
# have been completely preprocessed by the user? (mean subtracted, scaled by std, etc?)
# GVH: We could just make this a switch and let the user decide what to do.
# JPEG bytes:
# jpegs = tf.placeholder(tf.string, shape=(1))
# image_buffer = tf.squeeze(jpegs, [0])
# image = tf.image.decode_jpeg(image_buffer, channels=3)
# image = tf.cast(image, tf.float32)
# images = tf.expand_dims(image, 0)
# images = tf.image.resize_images(images, cfg.INPUT_SIZE, cfg.INPUT_SIZE)
# images -= cfg.IMAGE_MEAN
# images /= cfg.IMAGE_STD
# For now we'll assume that the user is sending us a raveled array, totally preprocessed.
image_data = tf.placeholder(tf.float32, [None, cfg.INPUT_SIZE * cfg.INPUT_SIZE * 3], name="images")
images = tf.reshape(image_data, [-1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
features = model.build(graph, images, cfg)
logits = add_logits(graph, features, cfg.NUM_CLASSES)
class_scores, predicted_classes = tf.nn.top_k(logits, k=cfg.NUM_CLASSES)
ema = tf.train.ExponentialMovingAverage(decay=cfg.MOVING_AVERAGE_DECAY)
shadow_vars = {
ema.average_name(var) : var
for var in graph.get_collection('conv_params')
}
shadow_vars.update({
ema.average_name(var) : var
for var in graph.get_collection('batchnorm_params')
})
shadow_vars.update({
ema.average_name(var) : var
for var in graph.get_collection('softmax_params')
})
shadow_vars.update({
ema.average_name(var) : var
for var in graph.get_collection('batchnorm_mean_var')
})
# Restore the variables
saver = tf.train.Saver(shadow_vars, reshape=True)
with tf.Session(graph=graph, config=sess_config) as sess:
tf.global_variables_initializer()
saver.restore(sess, model_path)
# TODO: Change to options flag
if export_for_serving:
export_saver = tf.train.Saver(sharded=True)
model_exporter = exporter.Exporter(export_saver)
signature = exporter.classification_signature(input_tensor=image_data, scores_tensor=class_scores, classes_tensor=predicted_classes)
model_exporter.init(sess.graph.as_graph_def(),
default_graph_signature=signature)
model_exporter.export(export_path, tf.constant(export_version), sess)
else:
v2c = graph_util.convert_variables_to_constants
deploy_graph_def = v2c(sess, graph.as_graph_def(), [logits.name[:-2]])
if not os.path.exists(export_path):
os.makedirs(export_path)
save_path = os.path.join(export_path, 'constant_model-%d.pb' % (export_version,))
with open(save_path, 'wb') as f:
f.write(deploy_graph_def.SerializeToString())
开发者ID:gvanhorn38,项目名称:inception,代码行数:84,代码来源:export.py
示例16: export
def export():
# Create index->synset mapping
synsets = []
with open(SYNSET_FILE) as f:
synsets = f.read().splitlines()
# Create synset->metadata mapping
texts = {}
with open(METADATA_FILE) as f:
for line in f.read().splitlines():
parts = line.split('\t')
assert len(parts) == 2
texts[parts[0]] = parts[1]
with tf.Graph().as_default():
# Build inference model.
# Please refer to Tensorflow inception model for details.
# Input transformation.
# TODO(b/27776734): Add batching support.
jpegs = tf.placeholder(tf.string, shape=(1))
image_buffer = tf.squeeze(jpegs, [0])
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image,
[FLAGS.image_size, FLAGS.image_size],
align_corners=False)
image = tf.squeeze(image, [0])
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
images = tf.expand_dims(image, 0)
# Run inference.
logits, _ = inception_model.inference(images, NUM_CLASSES + 1)
# Transform output to topK result.
values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)
# Create a constant string Tensor where the i'th element is
# the human readable class description for the i'th index.
# Note that the 0th index is an unused background class
# (see inception model definition code).
class_descriptions = ['unused background']
for s in synsets:
class_descriptions.append(texts[s])
class_tensor = tf.constant(class_descriptions)
classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
mapping=class_tensor)
# Restore variables from training checkpoint.
variable_averages = tf.train.ExponentialMovingAverage(
inception_model.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from training checkpoints.
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Successfully loaded model from %s at step=%s.' %
(ckpt.model_checkpoint_path, global_step))
else:
print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
return
# Export inference model.
init_op = tf.group(tf.initialize_all_tables(), name='init_op')
model_exporter = exporter.Exporter(saver)
signature = exporter.classification_signature(
input_tensor=jpegs, classes_tensor=classes, scores_tensor=values)
model_exporter.init(default_graph_signature=signature, init_op=init_op)
model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
print('Successfully exported model to %s' % FLAGS.export_dir)
开发者ID:charlesDADI,项目名称:serving,代码行数:90,代码来源:inception_export.py
注:本文中的tensorflow.contrib.session_bundle.exporter.classification_signature函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论