本文整理汇总了Python中tensorflow.sequence_mask函数的典型用法代码示例。如果您正苦于以下问题:Python sequence_mask函数的具体用法?Python sequence_mask怎么用?Python sequence_mask使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sequence_mask函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: sovle_problem_1
def sovle_problem_1():
input = tf.constant([range(5),
np.array(range(5)) + 1,
np.array(range(5)) + 2])
'''
input = [ [0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6]
]
'''
input = tf.constant([
[0.99, 0.8, 0.7, 0.5, 0.5],
[0.2, 0.3, 0.6, 0.7, 0.8],
[0.1, 0.1, 0.1, 0.5, 1]
])
sess = tf.Session()
mask = tf.cast(tf.cast(tf.greater(input, 3), tf.int32), tf.float32)
start_label = tf.constant(np.array([0, 2, 3]))
start_label = tf.sequence_mask(start_label, 5, dtype=tf.int32) # not include the index
end_label = tf.constant(np.array([2, 4, 3]))
end_label = tf.sequence_mask(end_label+1, 5, dtype=tf.int32)
res = end_label - start_label
log_loss = tf.losses.log_loss(res, input)
print sess.run([mask, start_label, end_label, res, log_loss])
开发者ID:Accagain2014,项目名称:ML,代码行数:30,代码来源:tensorflow_examples.py
示例2: get_start_end_seq_mask
def get_start_end_seq_mask(seq_len=5, length=3):
'''
:param seq_len: n_ctx
:param length: ans_avg_len
:return:
'''
s = tf.constant(np.array(range(seq_len))) # [0, 1, ..., seq_len-1]
s = tf.expand_dims(s, axis=-1) # [[0], [1], ..., [seq_len-1]]
s = tf.tile(s, [1, length]) # [[0, 0, 0], [1, 1, 1], ..., [seq_len-1, seq_len-1, seq_len-1]]
s = tf.concat(tf.unstack(s, axis=0), axis=0) # [0, 0, 0, 1, 1, 1, 2, 2, 2, ..., 4, 4, 4]
gap = tf.constant(np.array(range(length))) # [0, 1, 2]
gap = tf.tile(gap, [seq_len]) # [0, 1, 2, 0, 1, 2, ..., 0, 1, 2]
e = s + gap # [0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4, 5, ... ]
s_mask = tf.cast(tf.sequence_mask(s+1, seq_len, dtype=tf.int32), tf.float32) #
s_mask_ = tf.cast(tf.sequence_mask(s, seq_len, dtype=tf.int32), tf.float32)
s_mask = s_mask - s_mask_
e_mask = tf.cast(tf.sequence_mask(e + 1, seq_len, dtype=tf.int32), tf.float32)
e_mask_ = tf.cast(tf.sequence_mask(e, seq_len, dtype=tf.int32), tf.float32)
e_mask = e_mask - e_mask_
#res = e_mask - s_mask
res = e_mask + s_mask
res = res / tf.reduce_sum(res, axis=-1, keepdims=True)
res = 2.0 * res
return res
开发者ID:Accagain2014,项目名称:ML,代码行数:29,代码来源:tensorflow_examples.py
示例3: pad_with_identity
def pad_with_identity(x, sequence_length, max_sequence_length, identity_values=0):
"""Pads a tensor with identity values up to :obj:`max_sequence_length`.
Args:
x: A ``tf.Tensor`` of shape ``[batch_size, max(sequence_length), depth]``.
sequence_length: The true sequence length of :obj:`x`.
max_sequence_length: The sequence length up to which the tensor must contain
:obj:`identity values`.
identity_values: The identity value.
Returns:
A ``tf.Tensor`` of shape ``[batch_size, max(max_sequence_length), depth]``.
"""
maxlen = tf.reduce_max(max_sequence_length)
mask = tf.sequence_mask(sequence_length, maxlen=maxlen, dtype=x.dtype)
mask = tf.expand_dims(mask, axis=-1)
mask_combined = tf.sequence_mask(max_sequence_length, dtype=x.dtype)
mask_combined = tf.expand_dims(mask_combined, axis=-1)
identity_mask = mask_combined * (1.0 - mask)
x = pad_in_time(x, maxlen - tf.shape(x)[1])
x = x * mask + (identity_mask * identity_values)
return x
开发者ID:yhgon,项目名称:OpenNMT-tf,代码行数:26,代码来源:reducer.py
示例4: sequence_mask
def sequence_mask(input_lengths, max_len=None, expand=True):
if max_len is None:
max_len = tf.reduce_max(input_lengths)
if expand:
return tf.expand_dims(tf.sequence_mask(input_lengths, max_len, dtype=tf.float32), axis=-1)
return tf.sequence_mask(input_lengths, max_len, dtype=tf.float32)
开发者ID:duvtedudug,项目名称:Tacotron-2,代码行数:7,代码来源:util.py
示例5: sequence_mask
def sequence_mask(lengths, r, expand=True):
'''Returns a 2-D or 3-D tensorflow sequence mask depending on the argument 'expand'
'''
max_len = tf.reduce_max(lengths)
max_len = _round_up_tf(max_len, tf.convert_to_tensor(r))
if expand:
return tf.expand_dims(tf.sequence_mask(lengths, maxlen=max_len, dtype=tf.float32), axis=-1)
return tf.sequence_mask(lengths, maxlen=max_len, dtype=tf.float32)
开发者ID:duvtedudug,项目名称:Tacotron-2,代码行数:8,代码来源:modules.py
示例6: testNormal
def testNormal(self):
with self.test_session():
res = tf.sequence_mask(tf.constant([1, 3, 2]), 5)
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(res.eval(), [[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]])
# test dtype and default maxlen:
res = tf.sequence_mask(tf.constant([0, 1, 4]), dtype=tf.float32)
self.assertAllEqual(res.get_shape().as_list(), [3, None])
self.assertAllEqual(res.eval(), [[0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0]])
开发者ID:Qstar,项目名称:tensorflow,代码行数:14,代码来源:array_ops_test.py
示例7: attend
def attend(x, sequence_length=None, method="ave", context=None, feature_dim=None, mask_zero=False, maxlen=None,
epsilon=1e-8, bn=True, training=False, seed=0, reuse=True, name="attend"):
if method == "ave":
if mask_zero:
# None * step_dim
mask = tf.sequence_mask(sequence_length, maxlen)
mask = tf.reshape(mask, (-1, tf.shape(x)[1], 1))
mask = tf.cast(mask, tf.float32)
z = tf.reduce_sum(x * mask, axis=1)
l = tf.reduce_sum(mask, axis=1)
# in some cases especially in the early stages of training the sum may be almost zero
z /= tf.cast(l + epsilon, tf.float32)
else:
z = tf.reduce_mean(x, axis=1)
elif method == "sum":
if mask_zero:
# None * step_dim
mask = tf.sequence_mask(sequence_length, maxlen)
mask = tf.reshape(mask, (-1, tf.shape(x)[1], 1))
mask = tf.cast(mask, tf.float32)
z = tf.reduce_sum(x * mask, axis=1)
else:
z = tf.reduce_sum(x, axis=1)
elif method == "max":
if mask_zero:
# None * step_dim
mask = tf.sequence_mask(sequence_length, maxlen)
mask = tf.expand_dims(mask, axis=-1)
mask = tf.tile(mask, (1, 1, tf.shape(x)[2]))
masked_data = tf.where(tf.equal(mask, tf.zeros_like(mask)),
tf.ones_like(x) * -np.inf, x) # if masked assume value is -inf
z = tf.reduce_max(masked_data, axis=1)
else:
z = tf.reduce_max(x, axis=1)
elif method == "attention":
if context is not None:
step_dim = tf.shape(x)[1]
context = tf.expand_dims(context, axis=1)
context = tf.tile(context, [1, step_dim, 1])
y = tf.concat([x, context], axis=-1)
else:
y = x
a = attention(y, feature_dim, sequence_length, mask_zero, maxlen, seed=seed)
z = tf.reduce_sum(x * a, axis=1)
if bn:
# training=False has slightly better performance
z = tf.layers.BatchNormalization()(z, training=False)
# z = batch_normalization(z, training=training, name=name)
return z
开发者ID:jkhlot,项目名称:tensorflow-XNN,代码行数:49,代码来源:nn_module.py
示例8: attention
def attention(queries, keys, keys_length):
'''
queries: [B, H]
keys: [B, T, H]
keys_length: [B]
'''
queries_hidden_units = queries.get_shape().as_list()[-1]
queries = tf.tile(queries, [1, tf.shape(keys)[1]])
queries = tf.reshape(queries, [-1, tf.shape(keys)[1], queries_hidden_units])
din_all = tf.concat([queries, keys, queries-keys, queries*keys], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att')
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att')
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att')
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(keys)[1]])
outputs = d_layer_3_all
# Mask
key_masks = tf.sequence_mask(keys_length, tf.shape(keys)[1]) # [B, T]
key_masks = tf.expand_dims(key_masks, 1) # [B, 1, T]
paddings = tf.ones_like(outputs) * (-2 ** 32 + 1)
outputs = tf.where(key_masks, outputs, paddings) # [B, 1, T]
# Scale
outputs = outputs / (keys.get_shape().as_list()[-1] ** 0.5)
# Activation
outputs = tf.nn.softmax(outputs) # [B, 1, T]
# Weighted sum
outputs = tf.matmul(outputs, keys) # [B, 1, H]
return outputs
开发者ID:13162201530,项目名称:DeepInterestNetwork,代码行数:31,代码来源:model.py
示例9: calculate_outputs
def calculate_outputs(self, x):
h = lstm_layer(x, self.history_length, self.lstm_size, scope='lstm-1')
h = tf.concat([h, x], axis=2)
h_final = time_distributed_dense_layer(h, 50, activation=tf.nn.relu, scope='dense-1')
n_components = 1
params = time_distributed_dense_layer(h_final, n_components*2, scope='dense-2', activation=None)
ps, mixing_coefs = tf.split(params, 2, axis=2)
# this is implemented incorrectly, but it still helped...
mixing_coefs = tf.nn.softmax(mixing_coefs - tf.reduce_min(mixing_coefs, 2, keep_dims=True))
ps = tf.nn.sigmoid(ps)
labels = tf.tile(tf.expand_dims(self.next_is_ordered, 2), (1, 1, n_components))
losses = tf.reduce_sum(mixing_coefs*log_loss(labels, ps), axis=2)
sequence_mask = tf.cast(tf.sequence_mask(self.history_length, maxlen=100), tf.float32)
avg_loss = tf.reduce_sum(losses*sequence_mask) / tf.cast(tf.reduce_sum(self.history_length), tf.float32)
final_temporal_idx = tf.stack([tf.range(tf.shape(self.history_length)[0]), self.history_length - 1], axis=1)
self.final_states = tf.gather_nd(h_final, final_temporal_idx)
self.prediction_tensors = {
'user_ids': self.user_id,
'product_ids': self.product_id,
'final_states': self.final_states
}
return avg_loss
开发者ID:dengminna,项目名称:instacart-basket-prediction,代码行数:28,代码来源:rnn_product_bmm.py
示例10: reduce_sequence
def reduce_sequence(self, inputs, sequence_lengths):
axis = self.axis % inputs[0].shape.ndims
if axis == 2:
padded, combined_length = pad_n_with_identity(inputs, sequence_lengths)
return self.reduce(padded), combined_length
elif axis == 1:
# Pad all input tensors up to maximum combined length.
combined_length = tf.add_n(sequence_lengths)
maxlen = tf.reduce_max(combined_length)
padded = [pad_in_time(x, maxlen - tf.shape(x)[1]) for x in inputs]
current_length = None
accumulator = None
for elem, length in zip(padded, sequence_lengths):
# Make sure padding are 0 vectors as it is required for the next step.
mask = tf.sequence_mask(length, maxlen=maxlen, dtype=elem.dtype)
elem = elem * tf.expand_dims(mask, -1)
if accumulator is None:
accumulator = elem
current_length = length
else:
accumulator += roll_sequence(elem, current_length)
current_length += length
return accumulator, combined_length
else:
raise ValueError("Unsupported concatenation on axis {}".format(axis))
开发者ID:yhgon,项目名称:OpenNMT-tf,代码行数:30,代码来源:reducer.py
示例11: call
def call(self, inputs, **kwargs):
query_key_keylen_list = inputs
queries, keys, keys_length = query_key_keylen_list
hist_len = keys.get_shape()[1]
attention_score = LocalActivationUnit(
self.hidden_size, self.activation, 0, 1, False, 1024,)([queries, keys])
outputs = tf.transpose(attention_score, (0, 2, 1))
key_masks = tf.sequence_mask(keys_length, hist_len)
if self.weight_normalization:
paddings = tf.ones_like(outputs) * (-2 ** 32 + 1)
else:
paddings = tf.zeros_like(outputs)
outputs = tf.where(key_masks, outputs, paddings)
if self.weight_normalization:
outputs = tf.nn.softmax(outputs)
outputs = tf.matmul(outputs, keys)
return outputs
开发者ID:SundeepMehta,项目名称:DeepCTR,代码行数:25,代码来源:sequence.py
示例12: smoothing_crossentropy_avgall
def smoothing_crossentropy_avgall(logits, targets, sequence_length):
""" Computes cross entropy loss of a batch of data with label smoothing.
The final loss is averaged by the length of each
sequence and then averaged by the batch size.
Args:
logits: The logits Tensor with shape [timesteps, batch_size, vocab_size].
targets: The gold labels Tensor with shape [timesteps, batch_size].
sequence_length: The length of `targets`, [batch_size, ]
Returns: Loss sum and weight sum.
"""
soft_targets, normalizing = label_smoothing(targets, logits.get_shape().as_list()[-1])
losses = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=soft_targets) - normalizing
# [timesteps, batch_size]
loss_mask = tf.transpose(
tf.sequence_mask(
lengths=tf.to_int32(sequence_length),
maxlen=tf.to_int32(tf.shape(targets)[0]),
dtype=tf.float32), [1, 0])
losses = losses * loss_mask
# average loss
avg_length = tf.to_float(sequence_length)
loss_by_time = tf.reduce_sum(losses, axis=0) / avg_length
loss_sum = tf.reduce_sum(loss_by_time)
return loss_sum, tf.to_float(tf.shape(sequence_length)[0])
开发者ID:KIngpon,项目名称:NJUNMT-tf,代码行数:27,代码来源:loss_fns.py
示例13: create_variables_for_optimization
def create_variables_for_optimization(self):
with tf.name_scope("optimization"):
with tf.name_scope("masker"):
self.mask = tf.sequence_mask(self.seq_len, self.num_step)
self.mask = tf.reshape(tf.cast(self.mask, tf.float32), (-1,))
if self.loss_function == "cross_entropy":
self.pl_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logit,
labels=self.actions_flatten)
elif self.loss_function == "l2":
self.one_hot_actions = tf.one_hot(self.actions_flatten, self.num_actions)
self.pl_loss = tf.reduce_mean((self.probs - self.one_hot_actions) ** 2,
axis=1)
else:
raise ValueError("loss function type is not defined")
self.pl_loss = tf.multiply(self.pl_loss, self.mask)
self.pl_loss = tf.reduce_mean(tf.multiply(self.pl_loss, self.returns_flatten))
self.entropy = tf.multiply(self.entropy, self.mask)
self.entropy = tf.reduce_mean(self.entropy)
self.loss = self.pl_loss - self.entropy_bonus * self.entropy
self.trainable_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="policy_network")
self.gradients = self.optimizer.compute_gradients(self.loss, var_list=self.trainable_variables)
self.clipped_gradients = [(tf.clip_by_norm(grad, self.max_gradient), var)
for grad, var in self.gradients]
self.train_op = self.optimizer.apply_gradients(self.clipped_gradients,
self.global_step)
self.grad_norm = tf.global_norm([grad for grad, var in self.gradients])
self.var_norm = tf.global_norm(self.trainable_variables)
开发者ID:csawtelle,项目名称:pg_rnn,代码行数:32,代码来源:pg_rnn.py
示例14: check_dtypes
def check_dtypes(lengths_dtype, maxlen_dtype):
res = tf.sequence_mask(tf.constant([1, 3, 2], dtype=lengths_dtype),
tf.constant(5, dtype=maxlen_dtype))
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(res.eval(), [[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]])
开发者ID:BloodD,项目名称:tensorflow,代码行数:7,代码来源:array_ops_test.py
示例15: _compute_metrics
def _compute_metrics(self, features, labels, predictions):
length = self._get_features_length(features)
weights = tf.sequence_mask(
length, maxlen=tf.shape(labels["tags"])[1], dtype=tf.float32)
eval_metric_ops = {}
eval_metric_ops["accuracy"] = tf.metrics.accuracy(
labels["tags"], predictions["tags"], weights=weights)
if self.tagging_scheme in ("bioes",):
flag_fn = None
if self.tagging_scheme == "bioes":
flag_fn = flag_bioes_tags
gold_flags, predicted_flags = tf.py_func(
flag_fn,
[labels["tags"], predictions["tags"], length],
[tf.bool, tf.bool],
stateful=False)
precision_metric = tf.metrics.precision(gold_flags, predicted_flags)
recall_metric = tf.metrics.recall(gold_flags, predicted_flags)
precision = precision_metric[0]
recall = recall_metric[0]
f1 = (2 * precision * recall) / (recall + precision)
eval_metric_ops["precision"] = precision_metric
eval_metric_ops["recall"] = recall_metric
eval_metric_ops["f1"] = (f1, tf.no_op())
return eval_metric_ops
开发者ID:yhgon,项目名称:OpenNMT-tf,代码行数:32,代码来源:sequence_tagger.py
示例16: mkMask
def mkMask(input_tensor, maxLen):
shape_of_input = tf.shape(input_tensor)
shape_of_output = tf.concat(axis=0, values=[shape_of_input, [maxLen]])
oneDtensor = tf.reshape(input_tensor, shape=(-1,))
flat_mask = tf.sequence_mask(oneDtensor, maxlen=maxLen)
return tf.reshape(flat_mask, shape_of_output)
开发者ID:et0803,项目名称:nlpcc2017_news_headline_categorization,代码行数:7,代码来源:TfUtils.py
示例17: cross_entropy_sequence_loss
def cross_entropy_sequence_loss(logits,
labels,
sequence_length,
label_smoothing=0.0,
average_in_time=False,
mode=tf.estimator.ModeKeys.TRAIN):
"""Computes the cross entropy loss of sequences.
Args:
logits: The unscaled probabilities.
labels: The true labels.
sequence_length: The length of each sequence.
label_smoothing: The label smoothing value.
average_in_time: If ``True``, also average the loss in the time dimension.
mode: A ``tf.estimator.ModeKeys`` mode.
Returns:
A tuple (cumulated loss, loss normalizer, token-level normalizer).
"""
batch_size = tf.shape(logits)[0]
max_time = tf.shape(logits)[1]
cross_entropy = _softmax_cross_entropy(logits, labels, label_smoothing, mode)
weights = tf.sequence_mask(
sequence_length, maxlen=max_time, dtype=cross_entropy.dtype)
loss = tf.reduce_sum(cross_entropy * weights)
loss_token_normalizer = tf.reduce_sum(weights)
if average_in_time or mode != tf.estimator.ModeKeys.TRAIN:
loss_normalizer = loss_token_normalizer
else:
loss_normalizer = tf.cast(batch_size, loss.dtype)
return loss, loss_normalizer, loss_token_normalizer
开发者ID:yhgon,项目名称:OpenNMT-tf,代码行数:34,代码来源:losses.py
示例18: attention
def attention(x, feature_dim, sequence_length, mask_zero=False, maxlen=None, epsilon=1e-8, seed=0):
input_shape = tf.shape(x)
step_dim = input_shape[1]
# feature_dim = input_shape[2]
x = tf.reshape(x, [-1, feature_dim])
"""
The last dimension of the inputs to `Dense` should be defined. Found `None`.
cann't not use `tf.layers.Dense` here
eij = tf.layers.Dense(1)(x)
see: https://github.com/tensorflow/tensorflow/issues/13348
workaround: specify the feature_dim as input
"""
eij = tf.layers.Dense(1, activation=tf.nn.tanh, kernel_initializer=tf.glorot_uniform_initializer(seed=seed),
dtype=tf.float32, bias_initializer=tf.zeros_initializer())(x)
eij = tf.reshape(eij, [-1, step_dim])
a = tf.exp(eij)
# apply mask after the exp. will be re-normalized next
if mask_zero:
# None * step_dim
mask = tf.sequence_mask(sequence_length, maxlen)
mask = tf.cast(mask, tf.float32)
a = a * mask
# in some cases especially in the early stages of training the sum may be almost zero
a /= tf.cast(tf.reduce_sum(a, axis=1, keep_dims=True) + epsilon, tf.float32)
a = tf.expand_dims(a, axis=-1)
return a
开发者ID:jkhlot,项目名称:tensorflow-XNN,代码行数:32,代码来源:nn_module.py
示例19: make_positions
def make_positions(sequence_length, maximum_length=None):
"""Builds a sequence of positions.
The first position is 1 as the 0 index is reserved to padding positions.
Args:
sequence_length: The length of each sequence as a ``tf.Tensor`` of shape
:math:`[B]`.
maximum_length: Optional size of the returned time dimension. Otherwise it
is the maximum of :obj:`sequence_length`.
Returns:
The sequence of positions as a ``tf.Tensor`` of shape :math:`[B, T]`.
"""
if maximum_length is None:
maximum_length = tf.reduce_max(sequence_length)
batch_size = tf.shape(sequence_length)[0]
# Make 0 the position of padding.
position = tf.range(maximum_length) + 1
position = tf.tile(position, [batch_size])
position = tf.reshape(position, [batch_size, -1])
mask = tf.sequence_mask(
sequence_length, maxlen=maximum_length, dtype=position.dtype)
position = position * mask
return position
开发者ID:yhgon,项目名称:OpenNMT-tf,代码行数:30,代码来源:position.py
示例20: _mask_by_length
def _mask_by_length(t, length):
"""Mask t, 3-D [batch, time, dim], by length, 1-D [batch,]."""
maxlen = t.get_shape().as_list()[1]
mask = tf.sequence_mask(length, maxlen=maxlen)
mask = tf.expand_dims(tf.cast(mask, tf.float32), -1)
# shape(mask) = (batch, num_timesteps, 1)
return t * mask
开发者ID:Jmq14,项目名称:models,代码行数:7,代码来源:adversarial_losses.py
注:本文中的tensorflow.sequence_mask函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论