本文整理汇总了Python中tensorflow.name_scope函数的典型用法代码示例。如果您正苦于以下问题:Python name_scope函数的具体用法?Python name_scope怎么用?Python name_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了name_scope函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: bboxes_clip
def bboxes_clip(bbox_ref, bboxes, scope=None):
"""Clip bounding boxes to a reference box.
Batch-compatible if the first dimension of `bbox_ref` and `bboxes`
can be broadcasted.
Args:
bbox_ref: Reference bounding box. Nx4 or 4 shaped-Tensor;
bboxes: Bounding boxes to clip. Nx4 or 4 shaped-Tensor or dictionary.
Return:
Clipped bboxes.
"""
# Bboxes is dictionary.
if isinstance(bboxes, dict):
with tf.name_scope(scope, 'bboxes_clip_dict'):
d_bboxes = {}
for c in bboxes.keys():
d_bboxes[c] = bboxes_clip(bbox_ref, bboxes[c])
return d_bboxes
# Tensors inputs.
with tf.name_scope(scope, 'bboxes_clip'):
# Easier with transposed bboxes. Especially for broadcasting.
bbox_ref = tf.transpose(bbox_ref)
bboxes = tf.transpose(bboxes)
# Intersection bboxes and reference bbox.
ymin = tf.maximum(bboxes[0], bbox_ref[0])
xmin = tf.maximum(bboxes[1], bbox_ref[1])
ymax = tf.minimum(bboxes[2], bbox_ref[2])
xmax = tf.minimum(bboxes[3], bbox_ref[3])
# Double check! Empty boxes when no-intersection.
ymin = tf.minimum(ymin, ymax)
xmin = tf.minimum(xmin, xmax)
bboxes = tf.transpose(tf.stack([ymin, xmin, ymax, xmax], axis=0))
return bboxes
开发者ID:bowrian,项目名称:SSD-Tensorflow,代码行数:34,代码来源:bboxes.py
示例2: fc_layers
def fc_layers(self):
# fc1
with tf.name_scope('fc1') as scope:
shape = int(np.prod(self.pool5.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
pool5_flat = tf.reshape(self.pool5, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
self.fc1 = tf.nn.relu(fc1l)
self.parameters += [fc1w, fc1b]
# fc2
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
self.fc2 = tf.nn.relu(fc2l)
self.parameters += [fc2w, fc2b]
# fc3
with tf.name_scope('fc3') as scope:
fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
trainable=True, name='biases')
self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
self.parameters += [fc3w, fc3b]
开发者ID:muhammadzak,项目名称:TensorFlow-Book,代码行数:34,代码来源:vgg16.py
示例3: make_HBF2_model
def make_HBF2_model(x,W1,S1,C1,W2,S2,C2,phase_train):
with tf.name_scope("layer1") as scope:
layer1 = ml.get_Gaussian_layer(x,W1,S1,C1,phase_train)
with tf.name_scope("layer2") as scope:
layer2 = ml.get_Gaussian_layer(layer1,W2,S2,C2,phase_train)
y = layer2
return y
开发者ID:brando90,项目名称:tensor_flow_experiments,代码行数:7,代码来源:test_hbf2_tensorboard.py
示例4: ce
def ce(model, config, scope, connect, threshold = 1e-5):
with tf.variable_scope(scope), tf.name_scope(scope):
with tf.variable_scope('inputs'), tf.name_scope('inputs'):
model['%s_in0length' %scope] = model['%s_out0length' %connect]
model['%s_in1length' %scope] = model['%s_out1length' %connect]
model['%s_in2length' %scope] = model['%s_out2length' %connect]
model['%s_maxin2length' %scope] = model['%s_maxout2length' %connect]
model['%s_inputs' %scope] = tf.clip_by_value(tf.nn.softmax(model['%s_outputs' %connect]), threshold, 1. - threshold, name = '%s_inputs' %scope)
model['%s_out0length' %scope] = model['%s_in0length' %scope]
model['%s_out1length' %scope] = model['%s_in1length' %scope]
model['%s_out2length' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_out2length' %scope)
model['%s_maxout2length' %scope] = model['%s_maxin2length' %scope]
with tf.variable_scope('labels'), tf.name_scope('labels'):
model['%s_labels_len' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_labels_len' %scope)
model['%s_labels_ind' %scope] = tf.placeholder(tf.int64, [None, 2], '%s_labels_ind' %scope)
model['%s_labels_val' %scope] = tf.placeholder(tf.int32, [None], '%s_labels_val' %scope)
model['%s_labels_collapsed' %scope] = tf.sparse_to_dense(model['%s_labels_ind' %scope], [model['%s_maxin2length' %scope], model['%s_in0length' %scope]], model['%s_labels_val' %scope], -1, name = '%s_labels_collapsed' %scope)
model['%s_labels' %scope] = tf.one_hot(model['%s_labels_collapsed' %scope], model['%s_out1length' %scope], name = '%s_labels' %scope)
with tf.variable_scope('loss'), tf.name_scope('loss'):
model['%s_loss' %scope] = tf.reduce_sum(-tf.multiply(model['%s_labels' %scope], tf.log(model['%s_inputs' %scope])), name = '%s_loss' %scope)
with tf.variable_scope('outputs'), tf.name_scope('outputs'):
model['%s_output' %scope] = model['%s_inputs' %scope]
return model
开发者ID:aaiijmrtt,项目名称:DEEPSPEECH,代码行数:27,代码来源:ce.py
示例5: nn_conv_layer
def nn_conv_layer(input_tensor, patch_size, num_channels,output_depth, layer_name, biases=False,act=None, pool=None):
"""Reusable code for making a simple neural net layer.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([patch_size,patch_size,num_channels,output_depth])
# print ("weights:%s"%(weights.get_shape()))
variable_summaries(weights, layer_name + '/weights')
if (biases==True):
with tf.name_scope('biases'):
biases = bias_variable([output_depth])
# print("biases:%s" % (biases.get_shape()))
variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('conv2d'):
# print("input:%s" % (input_tensor.get_shape()))
preactivate = tf.nn.conv2d(input_tensor, weights, [1, 1, 1, 1], padding='SAME')
tf.histogram_summary(layer_name + '/pre_activations', preactivate)
print("preactivate:%s" % (preactivate.get_shape()))
if (pool!=None):
max_pool=pool(preactivate,ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME',name='max_pool')
if (act!=None):
activations = act(max_pool+biases, 'activation')
# tf.histogram_summary(layer_name + '/activations', activations)
return preactivate
开发者ID:KannShi,项目名称:Udacity_DL,代码行数:29,代码来源:CNN.py
示例6: inference
def inference(input_tensor,train,regularizer):
#第一层卷积
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable("weight",
[CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable("biases",[CONV1_DEEP],
initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_tensor,conv1_weights,
strides=[1,1,1,1],padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
#第二层池化
with tf.name_scope('layer2-pool1'):
pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],
strides=[1,2,2,1],padding='SAME')
#第三层卷积
with tf.variable_scope('layer3-conv2'):
conv2_weights = tf.get_variable("weight",
[CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("biases",[CONV2_DEEP],
initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1,conv2_weights,
strides=[1,1,1,1],padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
#第四层池化
with tf.name_scope('layer4-pool2'):
pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],
strides=[1,2,2,1],padding='SAME')
pool_shape = pool2.get_shape().as_list()
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
reshaped = tf.reshape(pool2,[pool_shape[0],nodes])
#第五层全连接层
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.get_variable("weight",[nodes,FC_SIZE],
initializer=tf.truncated_normal_initializer(stddev=0.1))
#只有全连接层的权重需要加入正则化
if regularizer != None:
tf.add_to_collection('losses',regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias",[FC_SIZE],
initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights) + fc1_biases)
if train: fc1 = tf.nn.dropout(fc1,0.5)
#第六层全连接层
with tf.variable_scope('layer6-fc2'):
fc2_weights = tf.get_variable("weight",[FC_SIZE,NUM_LABELS],
initializer=tf.truncated_normal_initializer(stddev=0.1))
#只有全连接层的权重需要加入正则化
if regularizer != None:
tf.add_to_collection('losses',regularizer(fc2_weights))
fc2_biases = tf.get_variable("bias",[NUM_LABELS],
initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc1,fc2_weights) + fc2_biases
return logit
开发者ID:yyzahuopu,项目名称:Deep-learning,代码行数:60,代码来源:mnist_inferenceCNN.py
示例7: build
def build(self):
"""None
Build the model graph
:return:
"""
with tf.name_scope('G_'):
self.predict_g = self.__G__()
self.predict_g2 = self.__G2__()
with tf.name_scope('D_'):
# Create reference examples
# Input d holds real&imaginary values. The discriminative decision based on reconstructed image
self.reconstructed_image_reference = self.get_reconstructed_image(real=self.input_d['real'],
imag=self.input_d['imag'], name='Both_gt')
predict_g2_stacked = tf.stack([self.predict_g2['real'][:,0,:,:], self.predict_g2['imag'][:,0,:,:]], axis=1)
self.predict, self.predict_logits = self.__D__([self.reconstructed_image_reference, predict_g2_stacked])
self.predict_d, self.predict_d_for_g = tf.split(value=self.predict, num_or_size_splits=2, axis=0)
self.predict_d_logits, self.predict_d_logits_for_g = tf.split(value=self.predict_logits,
num_or_size_splits=2, axis=0)
self.clip_weights = self.__clip_weights__()
with tf.name_scope('loss'):
# self.loss_g = self.__loss_g__(predict=self.predict_g, self.labels, reg=self.regularization_sum)
self.__loss__()
with tf.name_scope('training'):
self.train_op_d, self.train_op_g = self.__training__(learning_rate=self.FLAGS.learning_rate)
with tf.name_scope('evaluation'):
# Calculate accuracy L2 norm
self.evaluation = self.__evaluation__(predict=self.predict_g, labels=self.labels)
开发者ID:shohad25,项目名称:thesis,代码行数:35,代码来源:k_space_wgan_gl_g2_unet_Gloss.py
示例8: loss
def loss(self, logits, labels, regularization):
"""Adds to the inference model the layers required to generate loss."""
with tf.name_scope('loss'):
with tf.name_scope('var_loss'):
labels = tf.cast(labels, tf.float32)
shape = labels.get_shape()
same_class = tf.boolean_mask(logits, tf.equal(labels, tf.ones(shape)))
diff_class = tf.boolean_mask(logits, tf.not_equal(labels, tf.ones(shape)))
same_mean, same_var = tf.nn.moments(same_class, [0])
diff_mean, diff_var = tf.nn.moments(diff_class, [0])
var_loss = same_var + diff_var
with tf.name_scope('mean_loss'):
mean_loss = self.lamda * tf.where(tf.greater(self.mu - (same_mean - diff_mean), 0),
self.mu - (same_mean - diff_mean), 0)
with tf.name_scope('regularization'):
regularization *= tf.add_n(self.regularizers)
loss = var_loss + mean_loss + regularization
# Summaries for TensorBoard.
tf.summary.scalar('loss/total', loss)
with tf.name_scope('averages'):
averages = tf.train.ExponentialMovingAverage(0.9)
op_averages = averages.apply([var_loss, mean_loss, regularization, loss])
tf.summary.scalar('loss/avg/var_loss', averages.average(var_loss))
tf.summary.scalar('loss/avg/mean_loss', averages.average(mean_loss))
tf.summary.scalar('loss/avg/regularization', averages.average(regularization))
tf.summary.scalar('loss/avg/total', averages.average(loss))
with tf.control_dependencies([op_averages]):
loss_average = tf.identity(averages.average(loss), name='control')
return loss, loss_average
开发者ID:parisots,项目名称:gcn_metric_learning,代码行数:34,代码来源:models_siamese.py
示例9: inference
def inference(images,hidden1_units,hidden2_units):
"""建立前馈神经网络模型
Args:
images:输入图像数据
hidden1_units:第一个隐藏层的神经元数目
hidden2_units:第二个隐藏层 的神经元数目
returns:
softmax_linear:输出张量为计算后的结果
"""
#隐藏层1
with tf.name_scope('hidden1'):
weights = tf.Variable(tf.truncated_normal([IMAGE_PIXELS,hidden1_units],stddev=1.0/math.sqrt(float(IMAGE_PIXELS))),name='weights')#?
biases = tf.Variable(tf.zeros([hidden1_units]),name='biases')
hidden1 = tf.nn.relu(tf.matmul(images,weights)+biases)
#隐藏层2
with tf.name_scope('hidden2'):
weights = tf.Variable(tf.truncated_normal([hidden1_units,hidden2_units],stddev=1.0/math.sqrt(float(hidden1_units))),name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1,weights)+biases)
#线性输出层
with tf.name_scope('softmax_linear'):
weights = tf.Variable(tf.truncated_normal([hidden2_units,NUM_CLASSES]),name='biases')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),name='biases')
logits = tf.matmul(hidden2,weights) + biases
return logits
开发者ID:rickyall,项目名称:tensorflow,代码行数:26,代码来源:MNIST_FFNN.py
示例10: generate_model
def generate_model(self, model, name=''):
if not model: return self
with tf.name_scope('state'):
self.keep_prob = tf.placeholder(tf.float32) # 1 for testing! else 1 - dropout
self.train_phase = tf.placeholder(tf.bool, name='train_phase')
with tf.device(_cpu): self.global_step = tf.Variable(
0) # dont set, feed or increment global_step, tensorflow will do it automatically
with tf.name_scope('data'):
if len(self.input_shape) == 1:
self.input_width = self.input_shape[0]
elif self.input_shape:
self.x = x = self.input = tf.placeholder(tf.float32, [None, self.input_shape[0], self.input_shape[1]])
# todo [None, self.input_shape]
self.last_layer = x
self.last_shape = x
elif self.input_width:
self.x = x = self.target = tf.placeholder(tf.float32, [None, self.input_width])
self.last_layer = x
else:
raise Exception("need input_shape or input_width by now")
self.y = y = self.target = tf.placeholder(tf.float32, [None, self.output_width])
with tf.name_scope('model'):
model(self)
if (self.last_width != self.output_width):
self.classifier() # 10 classes auto
开发者ID:duydb2,项目名称:tensorflow-speech-recognition,代码行数:25,代码来源:net.py
示例11: build
def build(self):
"""None
Build the model graph
:return:
"""
with tf.name_scope('G_'):
self.predict_g = self.__G__()
with tf.name_scope('D_'):
self.predict, self.predict_logits = self.__D__([self.input_d, self.predict_g], input_type="Real")
self.predict_d, self.predict_d_for_g = tf.split(value=self.predict, num_or_size_splits=2, axis=0)
self.predict_d_logits, self.predict_d_logits_for_g = tf.split(value=self.predict_logits,
num_or_size_splits=2, axis=0)
# self.predict_d, self.predict_d_logits
# with tf.variable_scope(tf.get_variable_scope(), reuse=True):
# self.predict_d_for_g, self.predict_d_logits_for_g = self.__D__(self.predict_g, input_type="Gen")
if len(self.regularization_values_d) > 0:
self.regularization_sum_d = sum(self.regularization_values_d)
with tf.name_scope('loss'):
# self.loss_g = self.__loss_g__(predict=self.predict_g, self.labels, reg=self.regularization_sum)
self.__loss__()
with tf.name_scope('training'):
self.train_op_d, self.train_op_g = self.__training__(learning_rate=self.FLAGS.learning_rate)
with tf.name_scope('evaluation'):
# Calculate accuracy L2 norm
self.evaluation = self.__evaluation__(predict=self.predict_g, labels=self.labels)
开发者ID:shohad25,项目名称:thesis,代码行数:32,代码来源:k_space_gan_unet2.py
示例12: testSharingWeightsWithDifferentNamescope
def testSharingWeightsWithDifferentNamescope(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = 10 * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = rnn_cell.LSTMCell(
num_units, input_size, use_peepholes=True,
num_proj=num_proj, initializer=initializer)
with tf.name_scope("scope0"):
with tf.variable_scope("share_scope"):
outputs0, _ = rnn.rnn(cell, inputs, dtype=tf.float32)
with tf.name_scope("scope1"):
with tf.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.rnn(cell, inputs, dtype=tf.float32)
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1, feed_dict={inputs[0]: input_value})
outputs0_values = output_values[:10]
outputs1_values = output_values[10:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
for out0, out1 in zip(outputs0_values, outputs1_values):
self.assertAllEqual(out0, out1)
开发者ID:adam-erickson,项目名称:tensorflow,代码行数:29,代码来源:rnn_test.py
示例13: bboxes_resize
def bboxes_resize(bbox_ref, bboxes, name=None):
"""Resize bounding boxes based on a reference bounding box,
assuming that the latter is [0, 0, 1, 1] after transform. Useful for
updating a collection of boxes after cropping an image.
"""
# Bboxes is dictionary.
if isinstance(bboxes, dict):
with tf.name_scope(name, 'bboxes_resize_dict'):
d_bboxes = {}
for c in bboxes.keys():
d_bboxes[c] = bboxes_resize(bbox_ref, bboxes[c])
return d_bboxes
# Tensors inputs.
with tf.name_scope(name, 'bboxes_resize'):
# Translate.
v = tf.stack([bbox_ref[0], bbox_ref[1], bbox_ref[0], bbox_ref[1]])
bboxes = bboxes - v
# Scale.
s = tf.stack([bbox_ref[2] - bbox_ref[0],
bbox_ref[3] - bbox_ref[1],
bbox_ref[2] - bbox_ref[0],
bbox_ref[3] - bbox_ref[1]])
bboxes = bboxes / s
return bboxes
开发者ID:bowrian,项目名称:SSD-Tensorflow,代码行数:25,代码来源:bboxes.py
示例14: feature_extraction
def feature_extraction(x, Nx, Ny, Ch, conv_w_1, conv_b_1, conv_w_2, conv_b_2, ff_w_1, ff_b_1, m1s = 4, m2s = 2, No = 512):
""" Creates a convolutional neural network to analyze some world tensor and return features from it. """
# Check that all the sizes are consistent
assert(Nx/m1s == int(Nx/m1s) and (Nx/m1s/m2s == int(Nx/m1s/m2s)))
assert(Ny/m1s == int(Ny/m1s) and (Ny/m1s/m2s == int(Ny/m1s/m2s)))
# First Convolutional Layer
with tf.name_scope("Convolution1"):
conv1_act = tf.nn.conv2d(x, conv_w_1, strides=[1, 1, 1, 1], padding='SAME') + conv_b_1
conv1 = tf.nn.relu(conv1_act, 'relu')
# First Max Pooling Layer
with tf.name_scope("Max1"):
max1 = tf.nn.max_pool(conv1, ksize=[1, m1s, m1s, 1], strides=[1, m1s, m1s, 1], padding='SAME')
# Second Convolutional Layer
with tf.name_scope("Convolution2"):
conv2_act = tf.nn.conv2d(max1, conv_w_2, strides=[1, 1, 1, 1], padding='SAME') + conv_b_2
conv2 = tf.nn.relu(conv2_act, 'relu')
# Second Max Pooling Layer
with tf.name_scope("Max2"):
max2 = tf.nn.max_pool(conv2, ksize=[1, m2s, m2s, 1], strides=[1, m2s, m2s, 1], padding='SAME')
# Reshaping max2 for FF1
max2_rshp = tf.reshape(max2, [-1, 288]) # Layer shape [None, 5, 5, 64] 1600 Total
# First Feed Forward Layer
with tf.name_scope('FF1'):
ff1_act = tf.matmul(max2_rshp, ff_w_1) + ff_b_1
ff1 = tf.nn.relu(ff1_act, 'relu')
return ff1
开发者ID:ryanpeach,项目名称:SurvivalAI,代码行数:33,代码来源:AnalogyBuilder.py
示例15: testVarOpScopeReuseParam
def testVarOpScopeReuseParam(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_op_scope([], "tower", "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with tf.variable_op_scope([], None, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer) as outer:
with tf.variable_op_scope([], "tower", "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with tf.variable_op_scope([], None, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
开发者ID:285219011,项目名称:hello-world,代码行数:26,代码来源:variable_scope_test.py
示例16: encode
def encode(self, inputs, attention_bias):
"""Generate continuous representation for inputs.
Args:
inputs: int tensor with shape [batch_size, input_length].
attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
Returns:
float tensor with shape [batch_size, input_length, hidden_size]
"""
with tf.name_scope("encode"):
# Prepare inputs to the layer stack by adding positional encodings and
# applying dropout.
embedded_inputs = self.embedding_softmax_layer(inputs)
inputs_padding = model_utils.get_padding(inputs)
with tf.name_scope("add_pos_encoding"):
length = tf.shape(embedded_inputs)[1]
pos_encoding = model_utils.get_position_encoding(
length, self.params.hidden_size)
encoder_inputs = embedded_inputs + pos_encoding
if self.train:
encoder_inputs = tf.nn.dropout(
encoder_inputs, 1 - self.params.layer_postprocess_dropout)
return self.encoder_stack(encoder_inputs, attention_bias, inputs_padding)
开发者ID:cybermaster,项目名称:reference,代码行数:27,代码来源:transformer.py
示例17: testVarOpScopeOuterScope
def testVarOpScopeOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
pass
with tf.variable_op_scope([], outer, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_op_scope([], None, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with tf.variable_op_scope([], outer, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with tf.variable_op_scope([], None, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
开发者ID:285219011,项目名称:hello-world,代码行数:26,代码来源:variable_scope_test.py
示例18: add_evaluation_step
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Nothing.
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
# tf.argmax(result_tensor, 1) = return index of maximal value (= 1 in a 1-of-N encoding vector) in each row (axis = 1)
# But we have more ones (indicating multiple labels) in one row of result_tensor due to the multi-label classification
# correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
# tf.argmax(ground_truth_tensor, 1))
# ground_truth is not a binary tensor, it contains the probabilities of each label = we need to tf.round() it
# to acquire a binary tensor allowing comparison by tf.equal()
# See: http://stackoverflow.com/questions/39219414/in-tensorflow-how-can-i-get-nonzero-values-and-their-indices-from-a-tensor-with
correct_prediction = tf.equal(tf.round(result_tensor), ground_truth_tensor)
with tf.name_scope('accuracy'):
# Mean accuracy over all labels:
# http://stackoverflow.com/questions/37746670/tensorflow-multi-label-accuracy-calculation
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', evaluation_step)
return evaluation_step
开发者ID:samhains,项目名称:Multi-label-Inception-net,代码行数:29,代码来源:retrain.py
示例19: ced
def ced(model, config, scope, connect, threshold = 1e-5):
with tf.variable_scope(scope), tf.name_scope(scope):
with tf.variable_scope('inputs'), tf.name_scope('inputs'):
model['%s_in0length' %scope] = model['%s_out0length' %connect]
model['%s_in1length' %scope] = model['%s_out1length' %connect]
model['%s_in2length' %scope] = model['%s_out2length' %connect]
model['%s_maxin2length' %scope] = model['%s_maxout2length' %connect]
model['%s_inputs' %scope] = tf.clip_by_value(model['%s_outputs' %connect], threshold, 1. - threshold, name = '%s_inputs' %scope)
model['%s_out0length' %scope] = model['%s_in0length' %scope]
model['%s_out1length' %scope] = model['%s_in1length' %scope]
model['%s_out2length' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_out2length' %scope)
model['%s_maxout2length' %scope] = model['%s_maxin2length' %scope]
with tf.variable_scope('labels'), tf.name_scope('labels'):
model['%s_labels_len' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_labels_len' %scope)
model['%s_labels_ind' %scope] = tf.placeholder(tf.int64, [None, 3], '%s_labels_ind' %scope)
model['%s_labels_val' %scope] = tf.placeholder(tf.float32, [None], '%s_labels_val' %scope)
model['%s_labels' %scope] = tf.sparse_to_dense(model['%s_labels_ind' %scope], [model['%s_in0length' %scope], model['%s_maxin2length' %scope], model['%s_maxin2length' %scope]], model['%s_labels_val' %scope], -1, name = '%s_labels' %scope)
with tf.variable_scope('loss'), tf.name_scope('loss'):
model['%s_loss' %scope] = tf.reduce_sum(tf.where(tf.less(model['%s_labels' %scope], tf.zeros([model['%s_in0length' %scope], model['%s_maxin2length' %scope], model['%s_maxin2length' %scope]], tf.float32)), tf.zeros([model['%s_in0length' %scope], model['%s_maxin2length' %scope], model['%s_maxin2length' %scope]], tf.float32), -tf.add(tf.multiply(model['%s_labels' %scope], tf.log(model['%s_inputs' %scope])), tf.multiply(tf.subtract(1., model['%s_labels' %scope]), tf.log(tf.subtract(1., model['%s_inputs' %scope]))))), name = '%s_loss' %scope)
with tf.variable_scope('outputs'), tf.name_scope('outputs'):
model['%s_output' %scope] = model['%s_inputs' %scope]
return model
开发者ID:aaiijmrtt,项目名称:DEEPSPEECH,代码行数:26,代码来源:ced.py
示例20: conv_net
def conv_net(x, weights, biases, dropout):
with tf.name_scope('input_czm'):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 28, 28, 1])
with tf.name_scope('first_layer'):
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)
with tf.name_scope('sec_layer'):
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=2)
with tf.name_scope('full_conn'):
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
with tf.name_scope('dropout_ops'):
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
开发者ID:nanqiangyipo,项目名称:PyCodeFragment,代码行数:32,代码来源:04_tensorboard_cnn.py
注:本文中的tensorflow.name_scope函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论