本文整理汇总了Python中tensorflow.concat_v2函数的典型用法代码示例。如果您正苦于以下问题:Python concat_v2函数的具体用法?Python concat_v2怎么用?Python concat_v2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了concat_v2函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: symmetric_feedforward_weights
def symmetric_feedforward_weights(weights):
sizes = [weights[0].get_shape()[0].value]
sizes += [ w.get_shape()[0].value for w in weights[1:] ]
sizes += [weights[-1].get_shape()[1].value]
res = []
for pre_id, pre_size in enumerate(sizes):
stack = []
for post_id, post_size in enumerate(sizes):
if pre_id == post_id - 1 and pre_id < len(weights):
stack.append(weights[pre_id])
elif post_id == pre_id -1 and post_id < len(weights):
stack.append(tf.transpose(weights[post_id]))
else:
pre_zeros = tf.zeros((pre_size, post_size))
stack.append(pre_zeros)
res.append(tf.concat_v2(stack, 1))
res = tf.concat_v2(res, 0)
# for i in xrange(res_v.shape[0]):
# for j in xrange(res_v.shape[1]):
# assert res_v[i, j] == res_v[j, i]
return res
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:27,代码来源:hopfield_utils.py
示例2: testAttentionCellWrapperCorrectResult
def testAttentionCellWrapperCorrectResult(self):
num_units = 4
attn_length = 6
batch_size = 2
expected_output = np.array(
[[0.955392, 0.408507, -0.60122, 0.270718],
[0.903681, 0.331165, -0.500238, 0.224052]],
dtype=np.float32)
expected_state = np.array(
[[
0.81331915, 0.32036272, 0.28079176, 1.08888793, 0.41264394,
0.1062041, 0.10444493, 0.32050529, 0.64655536, 0.70794445,
0.51896095, 0.31809306, 0.58086717, 0.49446869, 0.7641536,
0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063,
0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228,
0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432,
0.99211812, 0.12295902, 1.01412082, 0.33123279, -0.71114945,
0.40583119
], [
0.59962207, 0.42597458, -0.22491696, 0.98063421, 0.32548007,
0.11623692, -0.10100613, 0.27708149, 0.76956916, 0.6360054,
0.51719815, 0.50458527, 0.73000264, 0.66986895, 0.73576689,
0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957,
0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421,
0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971,
0.36127412, 0.12125921, 0.99780077, 0.31886846, -0.67595094,
0.56531656
]],
dtype=np.float32)
seed = 12345
tf.set_random_seed(seed)
for state_is_tuple in [False, True]:
with tf.Session() as sess:
with tf.variable_scope("state_is_tuple", reuse=state_is_tuple):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = tf.contrib.rnn.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
zeros1 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 1)
zeros2 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 2)
zeros3 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 3)
attn_state_zeros = tf.random_uniform(
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
zero_state = tf.concat_v2([
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
], 1)
inputs = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 5)
output, state = cell(inputs, zero_state)
if state_is_tuple:
state = tf.concat_v2([state[0][0], state[0][1], state[1], state[2]],
1)
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(output), expected_output)
self.assertAllClose(sess.run(state), expected_state)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:60,代码来源:rnn_cell_test.py
示例3: _testConfMatrixOnTensors
def _testConfMatrixOnTensors(self, tf_dtype, np_dtype):
with self.test_session() as sess:
m_neg = tf.placeholder(dtype=tf.float32)
m_pos = tf.placeholder(dtype=tf.float32)
s = tf.placeholder(dtype=tf.float32)
neg = tf.random_normal([20], mean=m_neg, stddev=s, dtype=tf.float32)
pos = tf.random_normal([20], mean=m_pos, stddev=s, dtype=tf.float32)
data = tf.concat_v2([neg, pos], 0)
data = tf.cast(tf.round(data), tf_dtype)
data = tf.minimum(tf.maximum(data, 0), 1)
lab = tf.concat_v2(
[tf.zeros(
[20], dtype=tf_dtype), tf.ones(
[20], dtype=tf_dtype)], 0)
cm = tf.confusion_matrix(
lab, data, dtype=tf_dtype, num_classes=2)
d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0,
m_pos: 1.0,
s: 1.0})
truth = np.zeros([2, 2], dtype=np_dtype)
try:
range_builder = xrange
except NameError: # In Python 3.
range_builder = range
for i in range_builder(len(d)):
truth[d[i], l[i]] += 1
self.assertEqual(cm_out.dtype, np_dtype)
self.assertAllClose(cm_out, truth, atol=1e-10)
开发者ID:BloodD,项目名称:tensorflow,代码行数:34,代码来源:confusion_matrix_test.py
示例4: _define_distance_to_clusters
def _define_distance_to_clusters(self, data):
"""Defines the Mahalanobis distance to the assigned Gaussian."""
# TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
# mean) from log probability function.
self._all_scores = []
for shard in data:
all_scores = []
shard = tf.expand_dims(shard, 0)
for c in xrange(self._num_classes):
if self._covariance_type == FULL_COVARIANCE:
cov = self._covs[c, :, :]
elif self._covariance_type == DIAG_COVARIANCE:
cov = tf.diag(self._covs[c, :])
inverse = tf.matrix_inverse(cov + self._min_var)
inv_cov = tf.tile(
tf.expand_dims(inverse, 0), tf.stack([self._num_examples, 1, 1]))
diff = tf.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
m_left = tf.matmul(diff, inv_cov)
all_scores.append(
tf.sqrt(tf.matmul(
m_left, tf.transpose(
diff, perm=[0, 2, 1]))))
self._all_scores.append(
tf.reshape(
tf.concat_v2(all_scores, 1),
tf.stack([self._num_examples, self._num_classes])))
# Distance to the associated class.
self._all_scores = tf.concat_v2(self._all_scores, 0)
assignments = tf.concat_v2(self.assignments(), 0)
rows = tf.to_int64(tf.range(0, self._num_examples))
indices = tf.concat_v2(
[tf.expand_dims(rows, 1), tf.expand_dims(assignments, 1)], 1)
self._scores = tf.gather_nd(self._all_scores, indices)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:34,代码来源:gmm_ops.py
示例5: __call__
def __call__(self, input_tuple, state, scope=None):
with vs.variable_scope(scope or type(self).__name__):
self._init_parameters()
_input, target, dInput_dF = input_tuple
u, s, r, inner_spikes, dW, dF, reward, reward_mean = state
s = (1.0 - tau_syn) * s + tf.concat_v2([_input, inner_spikes], 1)
u = (1.0 - tau_mem) * u + mo.matmul(s, self.W)
r = (1.0 - tau_refr) * r
act_raw = self._activation(u)
act = act_raw * tf.exp(-r)
spikes = tf.where(
act > tf.random_uniform([batch_size, self._num_units]),
tf.ones([batch_size, self._num_units]),
tf.zeros([batch_size, self._num_units])
)
hidden_spikes, _ = self.slice(spikes)
_, act_visible = self.slice(act)
reward_mean = (1.0 - tau_long) * reward_mean + tau_long * reward
reward = (1.0 - tau_learn) * reward + tau_learn * tf.reduce_sum(
target * safe_log(act_visible*dt) + (1.0 - target) * safe_log(1.0 - act_visible*dt)
, 1)
r += spikes * amp_refr
act_grad = tf.gradients([act], [u])[0]
learn_target = tf.concat_v2([hidden_spikes, target], 1)
factor = tf.concat_v2([
(reward - reward_mean) * tf.ones((batch_size, hidden_size,)),
tf.ones((batch_size, visible_size,))
], 1)
neuron_derivative = tf.reduce_sum( (act_grad/act_raw) * (learn_target - act) * factor, 0)
Wsliced = tf.slice(self.W, [0, 0], [self._filters_num, self._num_units])
dF_deriv_part = tf.squeeze(mo.matmul(Wsliced, tf.expand_dims(neuron_derivative, 1)))
dW += lrate * outer(
tf.reduce_sum(s, 0),
neuron_derivative
)
dInput_dF = tf.reduce_mean(dInput_dF, 0)
dF += lrate * dF_deriv_part * dInput_dF
return GLMOutputTuple(spikes, act, factor, reward, reward_mean), GLMStateTuple(u, s, r, spikes, dW, dF, reward, reward_mean)
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:60,代码来源:tf_002.py
示例6: boston_eval_fn
def boston_eval_fn():
boston = tf.contrib.learn.datasets.load_boston()
n_examples = len(boston.target)
features = tf.reshape(
tf.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = tf.reshape(tf.constant(boston.target), [n_examples, 1])
return tf.concat_v2([features, features], 0), tf.concat_v2([labels, labels],
0)
开发者ID:tensorflow,项目名称:tensorflow,代码行数:8,代码来源:estimator_test.py
示例7: refresh_shortlist
def refresh_shortlist():
"""Update the shortlist with the highest scores in id_to_score."""
new_scores, new_ids = tf.nn.top_k(self.id_to_score, self.shortlist_size)
smallest_new_score = tf.reduce_min(new_scores)
new_length = tf.reduce_sum(tf.to_int32(tf.greater(new_scores, tf.float32.min)))
u1 = self.sl_ids.assign(tf.to_int64(tf.concat_v2([[new_length], new_ids], 0)))
u2 = self.sl_scores.assign(tf.concat_v2([[smallest_new_score], new_scores], 0))
self.last_ops = [u1, u2]
return tf.group(u1, u2)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:9,代码来源:topn.py
示例8: build_model
def build_model(self):
sc = predictron_arg_scope()
with tf.variable_scope('state'):
with slim.arg_scope(sc):
state = slim.conv2d(self.inputs, 32, [3, 3], scope='conv1')
state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv1/preact')
state = slim.conv2d(state, 32, [3, 3], scope='conv2')
state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv2/preact')
iter_template = tf.make_template('iter', self.iter_func, unique_name_='iter')
rewards_arr = []
gammas_arr = []
lambdas_arr = []
values_arr = []
for k in range(self.max_depth):
state, reward, gamma, lambda_, value = iter_template(state)
rewards_arr.append(reward)
gammas_arr.append(gamma)
lambdas_arr.append(lambda_)
values_arr.append(value)
_, _, _, _, value = iter_template(state)
# K + 1 elements
values_arr.append(value)
bs = tf.shape(self.inputs)[0]
# [batch_size, K * maze_size]
self.rewards = tf.pack(rewards_arr, axis=1)
# [batch_size, K, maze_size]
self.rewards = tf.reshape(self.rewards, [bs, self.max_depth, self.maze_size])
# [batch_size, K + 1, maze_size]
self.rewards = tf.concat_v2(values=[tf.zeros(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.rewards],
axis=1, name='rewards')
# [batch_size, K * maze_size]
self.gammas = tf.pack(gammas_arr, axis=1)
# [batch_size, K, maze_size]
self.gammas = tf.reshape(self.gammas, [bs, self.max_depth, self.maze_size])
# [batch_size, K + 1, maze_size]
self.gammas = tf.concat_v2(values=[tf.ones(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.gammas],
axis=1, name='gammas')
# [batch_size, K * maze_size]
self.lambdas = tf.pack(lambdas_arr, axis=1)
# [batch_size, K, maze_size]
self.lambdas = tf.reshape(self.lambdas, [-1, self.max_depth, self.maze_size])
# [batch_size, (K + 1) * maze_size]
self.values = tf.pack(values_arr, axis=1)
# [batch_size, K + 1, maze_size]
self.values = tf.reshape(self.values, [-1, (self.max_depth + 1), self.maze_size])
self.build_preturns()
self.build_lambda_preturns()
开发者ID:b-kartal,项目名称:predictron,代码行数:56,代码来源:predictron.py
示例9: testConcat
def testConcat(self):
tf_val = tf.concat_v2([[16, 37], tf.placeholder(tf.int32, shape=(2,))], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, None], c_val.as_list())
tf_val = tf.concat_v2(
[[16, 37], tf.placeholder(
tf.int32, shape=(1,)), [48]], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:10,代码来源:tensor_util_test.py
示例10: update_tensor
def update_tensor(V, dim2, val): # Update tensor V, with index(:,dim2[:]) by val[:]
print 'Shapes Recieved in Update: V, dim, val are ==> ',V.get_shape().as_list(), dim2.get_shape().as_list(), val.get_shape().as_list()
val = tf.cast(val, V.dtype)
def body(_, (v, d2, chg)):
print 'Shapes Recieved in Body of Update: v, d2, chg are ==> ', v.get_shape().as_list(), d2.get_shape().as_list(), chg.get_shape().as_list()
d2_int = tf.cast(d2, tf.int32)
if len(chg.get_shape().as_list()) == 0:
chg = [chg]
else:
chg = tf.reshape(chg, shape=[1]+chg.get_shape().as_list())
oob = lambda : tf.slice(tf.concat_v2([v[:d2_int], chg], axis=0), tf.range(0,len(v.get_shape().as_list())), v.get_shape().as_list())
inb = lambda : tf.slice(tf.concat_v2([v[:d2_int], chg, v[d2_int + 1:]], axis=0), tf.constant(0,shape=[len(v.get_shape().as_list())]), v.get_shape().as_list())
return tf.cond(tf.less(d2_int + 1, v.get_shape().as_list()[0]), inb, oob)
开发者ID:zhaoxin111,项目名称:How-to-Learn-from-Little-Data,代码行数:14,代码来源:tf_utils.py
示例11: unzip
def unzip(x, split_dim, current_length, num_splits=2, name=None):
"""Splits a tensor by unzipping along the split_dim.
For example the following array split into 2 would be:
[1, 2, 3, 4, 5, 6] -> [1, 3, 5], [2, 4, 6]
and by 3:
[1, 2, 3, 4] -> [1, 4], [2], [3]
Args:
x: The tensor to split.
split_dim: The dimension to split along.
current_length: Current length along the split_dim.
num_splits: The number of splits.
name: Optional name for this op.
Returns:
A length num_splits sequence.
"""
with tf.name_scope(name, "unzip", [x]) as scope:
x = tf.convert_to_tensor(x, name="x")
# There is probably a more efficient way to do this.
all_splits = tf.split(value=x, num_or_size_splits=current_length, axis=split_dim, name=scope)
splits = [[] for _ in xrange(num_splits)]
for i in xrange(current_length):
splits[i % num_splits].append(all_splits[i])
return [tf.concat_v2(s, split_dim) for s in splits]
开发者ID:google,项目名称:prettytensor,代码行数:25,代码来源:functions.py
示例12: testDynamicAttentionDecoderStateIsTuple
def testDynamicAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root",
initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.BasicLSTMCell(2, state_is_tuple=True)
cell = tf.contrib.rnn.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = tf.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = tf.contrib.rnn.static_rnn(
cell, inp, dtype=tf.float32)
attn_states = tf.concat_v2(
[tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs],
1)
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.contrib.legacy_seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:28,代码来源:seq2seq_test.py
示例13: average_gradients
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat_v2(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
开发者ID:b-kartal,项目名称:predictron,代码行数:34,代码来源:train_multigpu.py
示例14: testRandomInitUnevenPartitions
def testRandomInitUnevenPartitions(self):
with self.test_session():
rnd = tf.Variable(tf.random_uniform([20, 43], dtype=tf.float64))
var_lists = [
tf.create_partitioned_variables(rnd.get_shape(), [1, i], rnd.initialized_value()) for i in xrange(1, 10)
]
tf.global_variables_initializer().run()
rnd_val = rnd.eval()
# Only check the slice save specs for the first 5 tf.
save_specs = [
# One slice
["20 43 0,20:0,43"],
# Two slices
["20 43 0,20:0,22", "20 43 0,20:22,21"],
# Three slices
["20 43 0,20:0,15", "20 43 0,20:15,14", "20 43 0,20:29,14"],
# Four slices
["20 43 0,20:0,11", "20 43 0,20:11,11", "20 43 0,20:22,11", "20 43 0,20:33,10"],
# Five slices
["20 43 0,20:0,9", "20 43 0,20:9,9", "20 43 0,20:18,9", "20 43 0,20:27,8", "20 43 0,20:35,8"],
]
for i, vs in enumerate(var_lists):
var_val = tf.concat_v2(vs, 1).eval()
self.assertAllClose(rnd_val, var_val)
self.assertEqual([tf.float64] * len(vs), [v.dtype.base_dtype for v in vs])
if i < len(save_specs):
self._TestSaveSpec(vs, save_specs[i])
开发者ID:BloodD,项目名称:tensorflow,代码行数:27,代码来源:partitioned_variables_test.py
示例15: concat
def concat(input_layer, concat_dim, other_tensors=None):
"""Concatenates input PrettyTensor with other_tensors along the specified dim.
This adds the Pretty Tensor passed via input_layer to the front of the list of
tensors to concat.
Args:
input_layer: The input layer.
concat_dim: The dimension along which to concat.
other_tensors: The tensors to concatenate with as an iterable or None if
this is called on a sequence.
Returns:
A new PrettyTensor.
Raises:
ValueError: If other_tensors is None and this is not a sequence.
"""
if input_layer.is_sequence():
all_tensors = input_layer.sequence
all_tensors.extend(other_tensors or [])
else:
all_tensors = [input_layer]
if other_tensors is None:
raise ValueError("Other Tensors must be supplied.")
all_tensors.extend(other_tensors)
# Edge cases really only apply when this is a sequence with 0 or 1 element.
if not all_tensors:
return prettytensor.wrap_sequence([])
else:
return tf.concat_v2(all_tensors, concat_dim)
开发者ID:google,项目名称:prettytensor,代码行数:29,代码来源:pretty_tensor_methods.py
示例16: one_hot_mask
def one_hot_mask(labels, num_classes, scope=None):
"""Compute 1-hot encodings for masks.
Given a label image, this computes the one hot encoding at
each pixel.
Args:
labels: (batch_size, width, height, 1) tensor containing labels.
num_classes: number of classes
scope: optional scope name
Returns:
Tensor of shape (batch_size, width, height, num_classes) with
a 1-hot encoding.
"""
with tf.name_scope(scope, "OneHotMask", [labels]):
height, width, depth = _shape(labels)
assert depth == 1
sparse_labels = tf.to_int32(tf.reshape(labels, [-1, 1]))
sparse_size, _ = _shape(sparse_labels)
indices = tf.reshape(tf.range(0, sparse_size, 1), [-1, 1])
concated = tf.concat_v2([indices, sparse_labels], 1)
dense_result = tf.sparse_to_dense(concated, [sparse_size, num_classes], 1.0,
0.0)
result = tf.reshape(dense_result, [height, width, num_classes])
return result
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:26,代码来源:misc.py
示例17: test_broadcast_apply_and_solve
def test_broadcast_apply_and_solve(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.matmul cannot handle.
# In particular, tf.matmul does not broadcast.
with self.test_session() as sess:
x = tf.random_normal(shape=(2, 2, 3, 4))
# This LinearOperatorDiag will be brodacast to (2, 2, 3, 3) during solve
# and apply with 'x' as the argument.
diag = tf.random_uniform(shape=(2, 1, 3))
operator = linalg.LinearOperatorDiag(diag, is_self_adjoint=True)
self.assertAllEqual((2, 1, 3, 3), operator.shape)
# Create a batch matrix with the broadcast shape of operator.
diag_broadcast = tf.concat_v2((diag, diag), 1)
mat = tf.matrix_diag(diag_broadcast)
self.assertAllEqual((2, 2, 3, 3), mat.get_shape()) # being pedantic.
operator_apply = operator.apply(x)
mat_apply = tf.matmul(mat, x)
self.assertAllEqual(operator_apply.get_shape(), mat_apply.get_shape())
self.assertAllClose(*sess.run([operator_apply, mat_apply]))
operator_solve = operator.solve(x)
mat_solve = tf.matrix_solve(mat, x)
self.assertAllEqual(operator_solve.get_shape(), mat_solve.get_shape())
self.assertAllClose(*sess.run([operator_solve, mat_solve]))
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:27,代码来源:linear_operator_diag_test.py
示例18: horizontal_lstm
def horizontal_lstm(images, num_filters_out, scope=None):
"""Run an LSTM bidirectionally over all the rows of each image.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output depth
scope: optional scope name
Returns:
(num_images, height, width, num_filters_out) tensor, where
num_steps is width and new num_batches is num_image_batches * height
"""
with tf.variable_scope(scope, "HorizontalLstm", [images]):
batch_size, _, _, _ = _shape(images)
sequence = images_to_sequence(images)
with tf.variable_scope("lr"):
hidden_sequence_lr = lstm1d.ndlstm_base(sequence, num_filters_out // 2)
with tf.variable_scope("rl"):
hidden_sequence_rl = (
lstm1d.ndlstm_base(sequence,
num_filters_out - num_filters_out // 2,
reverse=1))
output_sequence = tf.concat_v2([hidden_sequence_lr, hidden_sequence_rl], 2)
output = sequence_to_images(output_sequence, batch_size)
return output
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:25,代码来源:lstm2d.py
示例19: LSTMCell
def LSTMCell(cls, x, mprev, cprev, weights):
xm = tf.concat_v2([x, mprev], 1)
i_i, i_g, f_g, o_g = tf.split(
value=tf.matmul(xm, weights), num_or_size_splits=4, axis=1)
new_c = tf.sigmoid(f_g) * cprev + tf.sigmoid(i_g) * tf.tanh(i_i)
new_c = tf.clip_by_value(new_c, -50.0, 50.0)
new_m = tf.sigmoid(o_g) * tf.tanh(new_c)
return new_m, new_c
开发者ID:BloodD,项目名称:tensorflow,代码行数:8,代码来源:function_test.py
示例20: _inference
def _inference(self, docs, queries):
"""
Computes document attentions given a document batch and query batch.
"""
with tf.name_scope("inference"):
# Compute document lengths / query lengths for batch
doc_lens = length(docs)
query_lens = length(queries)
batch_size = tf.shape(docs)[0]
with tf.variable_scope('encode'):
# Encode Document / Query
with tf.variable_scope('docs'), tf.device('/gpu:0'):
encoded_docs = tf.nn.dropout(self._embed(docs), self._keep_prob)
encoded_docs = self._bidirectional_encode(encoded_docs, doc_lens, self._encode_size)
with tf.variable_scope('queries'), tf.device('/gpu:1'):
encoded_queries = tf.nn.dropout(self._embed(queries), self._keep_prob)
encoded_queries = self._bidirectional_encode(encoded_queries, query_lens, self._encode_size)
with tf.variable_scope('attend') as scope:
infer_gru = tf.nn.rnn_cell.GRUCell(self._infer_size)
infer_state = infer_gru.zero_state(batch_size, tf.float32)
for iter_step in range(self._num_glimpses):
if iter_step > 0:
scope.reuse_variables()
# Glimpse query and document
with tf.device('/gpu:0'):
q_attention, q_glimpse = self._glimpse(self._A_q, self._a_q, encoded_queries, infer_state)
tf.add_to_collection('query_attentions', q_attention)
with tf.device('/gpu:1'):
d_attention, d_glimpse = self._glimpse(self._A_d, self._a_d, encoded_docs, tf.concat_v2([infer_state, q_glimpse], 1))
tf.add_to_collection('doc_attentions', d_attention)
# Search Gates
gate_concat = tf.concat_v2([infer_state, q_glimpse, d_glimpse, q_glimpse * d_glimpse], 1)
r_d = tf.sigmoid(tf.matmul(gate_concat, self._g_d))
r_d = tf.nn.dropout(r_d, self._keep_prob)
r_q = tf.sigmoid(tf.matmul(gate_concat, self._g_q))
r_q = tf.nn.dropout(r_q, self._keep_prob)
combined_gated_glimpse = tf.concat_v2([r_q * q_glimpse, r_d * d_glimpse], 1)
_, infer_state = infer_gru(combined_gated_glimpse, infer_state)
return tf.to_float(tf.sign(tf.abs(docs))) * d_attention
开发者ID:MrCrumpets,项目名称:alternating-reader-tf,代码行数:46,代码来源:AlternatingAttention.py
注:本文中的tensorflow.concat_v2函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论