本文整理汇总了Python中tensorflow.identity函数的典型用法代码示例。如果您正苦于以下问题:Python identity函数的具体用法?Python identity怎么用?Python identity使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了identity函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: main
def main():
tf.set_random_seed(10)
with tf.Session() as sess:
rnn_cell = tf.nn.rnn_cell.LSTMCell(10)
# defining initial state
initial_state = rnn_cell.zero_state(4, dtype=tf.float32)
inputs = tf.Variable(tf.random_uniform(shape = (4, 30, 100)), name='input')
inputs = tf.identity(inputs, "input_node")
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, inputs, initial_state=initial_state, dtype=tf.float32)
y1 = tf.identity(outputs, 'outputs')
y2 = tf.identity(state, 'state')
t1 = tf.ones([4, 30, 10])
t2 = tf.ones([4, 10])
loss = tf.reduce_sum((y1 - t1) * (y1 - t1)) + tf.reduce_sum((y2 - t2) * (y2 - t2))
tf.identity(loss, name = "lstm_loss")
# tf.summary.FileWriter('/tmp/log', tf.get_default_graph())
net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
run_model(net_outputs, argv[1], None, argv[3] == 'True')
开发者ID:ru003ar,项目名称:BigDL,代码行数:26,代码来源:dynamic_lstm.py
示例2: testConstructionAndValue
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = tf.constant([1.1, 1.2, 1.3])
sigma2 = tf.constant([0.1, 0.2, 0.3])
with self.assertRaisesRegexp(ValueError, 'No value type currently set'):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
prior_0 = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma,
dist_value_type=sg.SampleAndReshapeValue())
with sg.value_type(sg.SampleAndReshapeValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
likelihood = sg.DistributionTensor(
distributions.Normal, mu=prior, sigma=sigma2)
coll = tf.get_collection(sg.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [prior_0, prior, likelihood])
prior_0 = tf.identity(prior_0)
prior = tf.identity(prior) # Also works: tf.convert_to_tensor(prior)
likelihood = tf.identity(likelihood)
# Mostly a smoke test for now...
prior_0_val, prior_val, _ = sess.run(
[prior_0, prior, likelihood])
self.assertEqual(prior_0_val.shape, prior_val.shape)
# These are different random samples from the same distribution,
# so the values should differ.
self.assertGreater(np.abs(prior_0_val - prior_val).sum(), 1e-6)
开发者ID:285219011,项目名称:hello-world,代码行数:32,代码来源:stochastic_graph_test.py
示例3: testBatchedBijectorWithMLPTransform
def testBatchedBijectorWithMLPTransform(self):
x_ = np.random.normal(0., 1., (3, 8)).astype(np.float32)
nvp = tfb.RealNVP(
num_masked=4, validate_args=True, **self._real_nvp_kwargs)
x = tf.constant(x_)
forward_x = nvp.forward(x)
# Use identity to invalidate cache.
inverse_y = nvp.inverse(tf.identity(forward_x))
forward_inverse_y = nvp.forward(inverse_y)
fldj = nvp.forward_log_det_jacobian(x, event_ndims=1)
# Use identity to invalidate cache.
ildj = nvp.inverse_log_det_jacobian(tf.identity(forward_x), event_ndims=1)
self.evaluate(tf.global_variables_initializer())
[
forward_x_,
inverse_y_,
forward_inverse_y_,
ildj_,
fldj_,
] = self.evaluate([
forward_x,
inverse_y,
forward_inverse_y,
ildj,
fldj,
])
self.assertEqual("real_nvp", nvp.name)
self.assertAllClose(forward_x_, forward_inverse_y_, rtol=1e-4, atol=0.)
self.assertAllClose(x_, inverse_y_, rtol=1e-4, atol=0.)
self.assertAllClose(ildj_, -fldj_, rtol=1e-6, atol=0.)
开发者ID:asudomoeva,项目名称:probability,代码行数:30,代码来源:real_nvp_test.py
示例4: _fn
def _fn(*args):
p = tf.identity(proposal_log_prob_fn(*args), name="proposal_log_prob")
t = tf.identity(target_log_prob_fn(*args), name="target_log_prob")
dtype = p.dtype.base_dtype
beta = tf.cast(iter_ + 1, dtype) / tf.cast(num_steps, dtype)
return tf.identity(beta * t + (1. - beta) * p,
name="convex_combined_log_prob")
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:sample_annealed_importance.py
示例5: _kl_entropy
def _kl_entropy(self):
"""
Add to Graph:
1. KL divergence between old and new distributions
2. Entropy of present policy given states and actions
"""
log_det_cov_old = tf.reduce_sum(self.old_log_vars_ph)
log_det_cov_new = tf.reduce_sum(self.log_vars)
tr_old_new = tf.reduce_sum(tf.exp(self.old_log_vars_ph - self.log_vars))
#KL Divergence formultivariate normal ditributions
#https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#Multivariate_normal_distributions
# log(sigma1/sigma0) = log(sigma1)-log(sigma0)
# tr: matrix trace
self.kl = 0.5 * tf.reduce_mean(log_det_cov_new - log_det_cov_old + tr_old_new +
# (mu1-mu0 )T*SIGMA^-1*(mu1-mu0):
tf.reduce_sum(tf.square(self.means - self.old_means_ph) /
tf.exp(self.log_vars), axis=1) -
self.act_dim)
# k = act_dim;
self.kl = tf.identity(self.kl, name="kl")
# simply the entropy formula of a multivariate normal distribution
# https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Entropy
self.entropy = 0.5 * (self.act_dim * (np.log(2 * np.pi) + 1) +
tf.reduce_sum(self.log_vars))
self.entropy = tf.identity(self.entropy, name="entropy")
开发者ID:projectchrono,项目名称:chrono,代码行数:26,代码来源:policy.py
示例6: mean_var_with_update
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
pop_mean_op = tf.assign(pop_mean, ema.average(batch_mean))
pop_var_op = tf.assign(pop_var, ema.average(batch_var))
with tf.control_dependencies([ema_apply_op, pop_mean_op, pop_var_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
开发者ID:deworrall92,项目名称:groupConvolutions,代码行数:7,代码来源:BSD_model.py
示例7: _update_policy_step
def _update_policy_step(self, observ, action, old_mean, old_logstd, advantage, length):
"""Compute the current policy loss and perform a gradient update step.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_mean: Sequences of action means of the behavioral policy.
old_logstd: Sequences of action log stddevs of the behavioral policy.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
network = self._network(observ, length)
loss, summary = self._policy_loss(network.mean, network.logstd, old_mean, old_logstd, action,
advantage, length)
gradients, variables = (zip(*self._policy_optimizer.compute_gradients(loss)))
optimize = self._policy_optimizer.apply_gradients(zip(gradients, variables))
summary = tf.summary.merge([
summary,
tf.summary.scalar('gradient_norm', tf.global_norm(gradients)),
utility.gradient_summaries(zip(gradients, variables), dict(policy=r'.*'))
])
with tf.control_dependencies([optimize]):
return [tf.identity(loss), tf.identity(summary)]
开发者ID:bulletphysics,项目名称:bullet3,代码行数:26,代码来源:algorithm.py
示例8: _test_no_active_dims
def _test_no_active_dims(Kern, sess):
S, N, M, D = 5, 4, 3, 2
X1 = tf.identity(np.random.randn(S, N, D))
X2 = tf.identity(np.random.randn(S, M, D))
kern = Kern(D) + gpflow.kernels.White(2)
compare_vs_map(X1, X2, kern, sess)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:7,代码来源:test_broadcasting.py
示例9: setup
def setup(self):
"""Sets up all components of the computation graph."""
self.x, self.y = self.get_xy_placeholders()
with tf.variable_scope('core', reuse=None):
self.loss, self.gradient_ops = self.train(self.x, self.y)
with tf.variable_scope('core', reuse=True):
self.y_preds = self.eval(self.x, self.y)
# setup memory "reset" ops
(self.mem_keys, self.mem_vals,
self.mem_age, self.recent_idx) = self.memory.get()
self.mem_keys_reset = tf.placeholder(self.mem_keys.dtype,
tf.identity(self.mem_keys).shape)
self.mem_vals_reset = tf.placeholder(self.mem_vals.dtype,
tf.identity(self.mem_vals).shape)
self.mem_age_reset = tf.placeholder(self.mem_age.dtype,
tf.identity(self.mem_age).shape)
self.recent_idx_reset = tf.placeholder(self.recent_idx.dtype,
tf.identity(self.recent_idx).shape)
self.mem_reset_op = self.memory.set(self.mem_keys_reset,
self.mem_vals_reset,
self.mem_age_reset,
None)
开发者ID:JiweiHe,项目名称:models,代码行数:25,代码来源:model.py
示例10: mean_var_with_update
def mean_var_with_update():
if self.ema_apply_op is None:
self.ema_apply_op = self.ema.apply(
[self.batch_mean, self.batch_var])
with tf.control_dependencies([self.ema_apply_op]):
return tf.identity(self.batch_mean), \
tf.identity(self.batch_var)
开发者ID:renmengye,项目名称:tfplus,代码行数:7,代码来源:batch_norm.py
示例11: __build_game_state_as_update_graph
def __build_game_state_as_update_graph(self, training, global_step):
print('game_state_as_update')
with tf.variable_scope('game_state_as_update') as variable_scope:
seed = tf.placeholder(
tf.int64,
[self.seed_size],
'seed')
update_statistic = tf.placeholder(
tf.float32,
[None, self.update_statistic_size],
'update_statistic')
with tf.variable_scope('transformation') as \
transformation_variable_scope:
signal = self._game_state_as_update(
training, global_step,
seed, update_statistic)
output = tf.identity(signal, 'output')
output_gradient = tf.placeholder(
tf.float32,
[None, self.update_size],
'output_gradient')
update_statistic_gradient, = tf.gradients(
output, [update_statistic], output_gradient)
tf.identity(
update_statistic_gradient, 'update_statistic_gradient')
self.__model_gradients(
variable_scope, transformation_variable_scope, output,
output_gradient)
开发者ID:thomasste,项目名称:ugtsa,代码行数:33,代码来源:model_builder.py
示例12: testBijector
def testBijector(self):
x_ = np.arange(3 * 4 * 2).astype(np.float32).reshape(3, 4, 2)
with self.test_session() as sess:
ma = tfb.MaskedAutoregressiveFlow(
validate_args=True, **self._autoregressive_flow_kwargs)
x = tf.constant(x_)
forward_x = ma.forward(x)
# Use identity to invalidate cache.
inverse_y = ma.inverse(tf.identity(forward_x))
fldj = ma.forward_log_det_jacobian(x, event_ndims=1)
# Use identity to invalidate cache.
ildj = ma.inverse_log_det_jacobian(tf.identity(forward_x), event_ndims=1)
tf.global_variables_initializer().run()
[
forward_x_,
inverse_y_,
ildj_,
fldj_,
] = sess.run([
forward_x,
inverse_y,
ildj,
fldj,
])
self.assertEqual("masked_autoregressive_flow", ma.name)
self.assertAllClose(forward_x_, forward_x_, rtol=1e-6, atol=0.)
self.assertAllClose(x_, inverse_y_, rtol=1e-5, atol=0.)
self.assertAllClose(ildj_, -fldj_, rtol=1e-6, atol=0.)
开发者ID:lewisKit,项目名称:probability,代码行数:28,代码来源:masked_autoregressive_test.py
示例13: __init__
def __init__(self, net, scope, classes, boxes_per_cell, training=False):
_, self.cell_height, self.cell_width, _ = tf.get_default_graph().get_tensor_by_name(scope + '/conv:0').get_shape().as_list()
cells = self.cell_height * self.cell_width
with tf.name_scope('regress'):
with tf.name_scope('inputs'):
end = cells * classes
self.prob = tf.reshape(net[:, :end], [-1, cells, 1, classes], name='prob')
inputs_remaining = tf.reshape(net[:, end:], [-1, cells, boxes_per_cell, 5], name='inputs_remaining')
self.iou = tf.identity(inputs_remaining[:, :, :, 0], name='iou')
self.offset_xy = tf.identity(inputs_remaining[:, :, :, 1:3], name='offset_xy')
wh01_sqrt_base = tf.identity(inputs_remaining[:, :, :, 3:], name='wh01_sqrt_base')
wh01 = tf.square(wh01_sqrt_base, name='wh01')
wh01_sqrt = tf.abs(wh01_sqrt_base, name='wh01_sqrt')
self.coords = tf.concat([self.offset_xy, wh01_sqrt], -1, name='coords')
self.wh = tf.identity(wh01 * [self.cell_width, self.cell_height], name='wh')
_wh = self.wh / 2
self.offset_xy_min = tf.identity(self.offset_xy - _wh, name='offset_xy_min')
self.offset_xy_max = tf.identity(self.offset_xy + _wh, name='offset_xy_max')
self.areas = tf.reduce_prod(self.wh, -1, name='areas')
if not training:
with tf.name_scope('detection'):
cell_xy = calc_cell_xy(self.cell_height, self.cell_width).reshape([1, cells, 1, 2])
self.xy = tf.identity(cell_xy + self.offset_xy, name='xy')
self.xy_min = tf.identity(cell_xy + self.offset_xy_min, name='xy_min')
self.xy_max = tf.identity(cell_xy + self.offset_xy_max, name='xy_max')
self.conf = tf.identity(tf.expand_dims(self.iou, -1) * self.prob, name='conf')
self.inputs = net
self.classes = classes
self.boxes_per_cell = boxes_per_cell
开发者ID:happog,项目名称:yolo-tf,代码行数:29,代码来源:__init__.py
示例14: test_raises_if_rank_is_not_integer_dynamic
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.test_session():
tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
rank_tensor = tf.placeholder(tf.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError, "must be of type <dtype: 'int32'>"):
with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
tf.identity(tensor).eval(feed_dict={rank_tensor: 0.5})
开发者ID:BloodD,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例15: test_rank_zero_tensor_raises_if_rank_too_small_static_rank
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "fail.*my_tensor.*must have rank 1"):
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank, message="fail")]):
tf.identity(tensor).eval()
开发者ID:BloodD,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例16: test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
开发者ID:3kwa,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例17: inference
def inference(inputs, is_training=True, scope=''):
batch_norm_params = {'decay': 0.99, 'epsilon': 0.001}
with scopes.arg_scope([ops.conv2d, ops.fc], weight_decay=0.0005,
is_training=is_training, batch_norm_params=batch_norm_params):
# get features from coarse layers
coarse_features = coarse_layers(inputs)
coarse_features_dim = coarse_features.get_shape()[1] # width
# calculate saliency scores and extract top k
coarse_output = top_layers(coarse_features)
coarse_h = entropy(tf.nn.softmax(coarse_output))
coarse_grads = tf.gradients(coarse_h, coarse_features, name='gradient_entropy')
top_k_values, top_k_idxs, M = identify_saliency(coarse_grads[0])
with tf.control_dependencies([top_k_idxs]):
top_k_idxs = tf.identity(top_k_idxs)
coarse_features = tf.identity(coarse_features)
# get features from fine layers
fine_features, src_idxs, k_patches = extract_features(inputs, top_k_idxs, coarse_features_dim)
# merge two feature maps
merged, flat_coarse, flat_fine = replace_features(coarse_features, fine_features, src_idxs)
raw_hint_loss = tf.reduce_sum(tf.square(flat_coarse - flat_fine), name='raw_hint_loss')
# scale hint loss per example in batch
# still does not match range of 5-25 shown in figure 2 in paper???
hint_loss = tf.div( raw_hint_loss, inputs.get_shape()[0].value*N_PATCHES, name='objective_hint')
tf.get_variable_scope().reuse_variables()
final_logits = top_layers(merged)
return final_logits, hint_loss
开发者ID:jazzsaxmafia,项目名称:dcn.tf,代码行数:34,代码来源:dcn.py
示例18: __call__
def __call__(self, x, train=True):
shape = x.get_shape().as_list()
if train:
with tf.variable_scope(self.name) as scope:
self.beta = tf.get_variable("beta", [shape[-1]],
initializer=tf.constant_initializer(0.))
self.gamma = tf.get_variable("gamma", [shape[-1]],
initializer=tf.random_normal_initializer(1., 0.02))
try:
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
except:
batch_mean, batch_var = tf.nn.moments(x, [0, 1], name='moments')
ema_apply_op = self.ema.apply([batch_mean, batch_var])
self.ema_mean, self.ema_var = self.ema.average(batch_mean), self.ema.average(batch_var)
with tf.control_dependencies([ema_apply_op]):
mean, var = tf.identity(batch_mean), tf.identity(batch_var)
else:
mean, var = self.ema_mean, self.ema_var
normed = tf.nn.batch_norm_with_global_normalization(
x, mean, var, self.beta, self.gamma, self.epsilon, scale_after_normalization=True)
return normed
开发者ID:dvolkhonskiy,项目名称:Adversarial-Model-For-Steganography,代码行数:27,代码来源:sgan.py
示例19: testConstructionAndValue
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = tf.constant([1.1, 1.2, 1.3])
sigma2 = tf.constant([0.1, 0.2, 0.3])
prior_default = st.StochasticTensor(distributions.Normal(mu=mu, sigma=sigma))
self.assertTrue(isinstance(prior_default.value_type, st.SampleValue))
prior_0 = st.StochasticTensor(distributions.Normal(mu=mu, sigma=sigma), dist_value_type=st.SampleValue())
self.assertTrue(isinstance(prior_0.value_type, st.SampleValue))
with st.value_type(st.SampleValue()):
prior = st.StochasticTensor(distributions.Normal(mu=mu, sigma=sigma))
self.assertTrue(isinstance(prior.value_type, st.SampleValue))
likelihood = st.StochasticTensor(distributions.Normal(mu=prior, sigma=sigma2))
self.assertTrue(isinstance(likelihood.value_type, st.SampleValue))
coll = tf.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [prior_default, prior_0, prior, likelihood])
# Also works: tf.convert_to_tensor(prior)
prior_default = tf.identity(prior_default)
prior_0 = tf.identity(prior_0)
prior = tf.identity(prior)
likelihood = tf.identity(likelihood)
# Mostly a smoke test for now...
prior_0_val, prior_val, prior_default_val, _ = sess.run([prior_0, prior, prior_default, likelihood])
self.assertEqual(prior_0_val.shape, prior_val.shape)
self.assertEqual(prior_default_val.shape, prior_val.shape)
# These are different random samples from the same distribution,
# so the values should differ.
self.assertGreater(np.abs(prior_0_val - prior_val).sum(), 1e-6)
self.assertGreater(np.abs(prior_default_val - prior_val).sum(), 1e-6)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:35,代码来源:stochastic_tensor_test.py
示例20: _batch_norm
def _batch_norm(self, output,
lr_mult=1.0, scope='bn', restore=True):
with tf.variable_scope(scope):
shape = Network.shape(output)
# we don't squeeze only the last dimension, i.e. feature maps
squeeze_dims = range(len(shape)-1)
input_maps = shape[-1]
batch_mean, batch_var = tf.nn.moments(output, squeeze_dims, name='moments')
ema = tf.train.ExponentialMovingAverage(decay=self.decay)
ema_apply_op = ema.apply([batch_mean, batch_var])
# Needed for partial restoration from an existing model
self._set_restoring(ema.average(batch_mean),
Network._append(restore, 'moving_mean'))
self._set_restoring(ema.average(batch_var),
Network._append(restore, 'moving_variance'))
if self.is_train: # and lr_mult > 0):
with tf.control_dependencies([ema_apply_op]):
mean, var = tf.identity(batch_mean), tf.identity(batch_var)
else:
#mean, var = batch_mean, batch_var
mean, var = ema.average(batch_mean), ema.average(batch_var)
beta = self._constant_variable('beta', [input_maps], 0.0, 0.0,
lr_mult, Network._append(restore, 'beta'))
gamma = self._constant_variable('gamma', [input_maps], 1.0, 0.0,
lr_mult, Network._append(restore, 'gamma'))
output = tf.nn.batch_normalization(output, mean, var, beta, gamma, Network.BN_EPS)
return output
开发者ID:sdemyanov,项目名称:tensorflow-worklab,代码行数:28,代码来源:network.py
注:本文中的tensorflow.identity函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论