本文整理汇总了Python中tensorflow.mod函数的典型用法代码示例。如果您正苦于以下问题:Python mod函数的具体用法?Python mod怎么用?Python mod使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mod函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: slice_constant
def slice_constant(data, batch_size=32, name='constant_data', global_step=None):
"""Provide a slice based on the global_step.
This is useful when the entire data array can be stored in memory because it
allows you to feed the data very efficiently.
Args:
data: A numpy array or tensor.
batch_size: The batch size for the produced data.
name: An optional name for this data.
global_step: A global step variable that is used to read the data. If None
then the default prettytensor global_step is used.
Returns:
A tensor that produces the given data.
"""
with tf.name_scope(name):
all_data = tf.convert_to_tensor(data)
global_step = global_step or bookkeeper.global_step()
count = len(data) / batch_size
extra = len(data) - count * batch_size
if extra:
offset = tf.mod(global_step, count)
return tf.slice(all_data, offset * batch_size, batch_size)
else:
offset = tf.mod(global_step, count + 1)
return tf.slice(all_data, offset * batch_size,
tf.where(tf.equal(offset, count), extra, batch_size))
开发者ID:google,项目名称:prettytensor,代码行数:29,代码来源:input_helpers.py
示例2: training_control
def training_control(global_step, print_span, evaluation_span, max_step, name=None):
with tf.name_scope(name, "training_control"):
return {
"step": global_step,
"time_to_print": tf.equal(tf.mod(global_step, print_span), 0),
"time_to_evaluate": tf.equal(tf.mod(global_step, evaluation_span), 0),
"time_to_stop": tf.greater_equal(global_step, max_step),
}
开发者ID:ys2899,项目名称:mean-teacher,代码行数:8,代码来源:model.py
示例3: weights_concatenated
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one, [[0, 0], [2, 0], [0, 0],
[0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))
return ret
开发者ID:TrunksLegendary,项目名称:tensor2tensor,代码行数:27,代码来源:common_layers.py
示例4: roll_sequence
def roll_sequence(tensor, offsets):
"""Shifts sequences by an offset.
Args:
tensor: A ``tf.Tensor`` of shape ``[batch_size, time, ...]``.
offsets : The offset of each sequence.
Returns:
A ``tf.Tensor`` of the same shape as :obj:`tensor` with sequences shifted
by :obj:`offsets`.
"""
batch_size = tf.shape(tensor)[0]
time = tf.shape(tensor)[1]
cols = tf.range(time)
cols = tf.tile(cols, [batch_size])
cols = tf.reshape(cols, [batch_size, time])
cols -= tf.expand_dims(offsets, 1)
cols = tf.mod(cols, time)
rows = tf.range(batch_size)
rows = tf.tile(rows, [time])
rows = tf.reshape(rows, [time, batch_size])
rows = tf.transpose(rows, perm=[1, 0])
indices = tf.concat([tf.expand_dims(rows, -1), tf.expand_dims(cols, -1)], -1)
return tf.gather_nd(tensor, indices)
开发者ID:yhgon,项目名称:OpenNMT-tf,代码行数:28,代码来源:reducer.py
示例5: minimize_loss_single_machine
def minimize_loss_single_machine(loss,
accuracy,
layer_collection,
device="/gpu:0",
session_config=None):
"""Minimize loss with K-FAC on a single machine.
A single Session is responsible for running all of K-FAC's ops. The covariance
and inverse update ops are placed on `device`. All model variables are on CPU.
Args:
loss: 0-D Tensor. Loss to be minimized.
accuracy: 0-D Tensor. Accuracy of classifier on current minibatch.
layer_collection: LayerCollection instance describing model architecture.
Used by K-FAC to construct preconditioner.
device: string, Either '/cpu:0' or '/gpu:0'. The covaraince and invserse
update ops are run on this device.
session_config: None or tf.ConfigProto. Configuration for tf.Session().
Returns:
final value for 'accuracy'.
"""
# Train with K-FAC.
g_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=0.0001,
cov_ema_decay=0.95,
damping=0.001,
layer_collection=layer_collection,
placement_strategy="round_robin",
cov_devices=[device],
inv_devices=[device],
momentum=0.9)
(cov_update_thunks,
inv_update_thunks) = optimizer.make_vars_and_create_op_thunks()
def make_update_op(update_thunks):
update_ops = [thunk() for thunk in update_thunks]
return tf.group(*update_ops)
cov_update_op = make_update_op(cov_update_thunks)
with tf.control_dependencies([cov_update_op]):
inverse_op = tf.cond(
tf.equal(tf.mod(g_step, _INVERT_EVERY), 0),
lambda: make_update_op(inv_update_thunks), tf.no_op)
with tf.control_dependencies([inverse_op]):
with tf.device(device):
train_op = optimizer.minimize(loss, global_step=g_step)
tf.logging.info("Starting training.")
with tf.train.MonitoredTrainingSession(config=session_config) as sess:
while not sess.should_stop():
global_step_, loss_, accuracy_, _ = sess.run(
[g_step, loss, accuracy, train_op])
if global_step_ % _INVERT_EVERY == 0:
tf.logging.info("global_step: %d | loss: %f | accuracy: %s",
global_step_, loss_, accuracy_)
return accuracy_
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:60,代码来源:convnet.py
示例6: unwrap
def unwrap(p, discont=np.pi, axis=-1):
"""Unwrap a cyclical phase tensor.
Args:
p: Phase tensor.
discont: Float, size of the cyclic discontinuity.
axis: Axis of which to unwrap.
Returns:
unwrapped: Unwrapped tensor of same size as input.
"""
dd = diff(p, axis=axis)
ddmod = tf.mod(dd + np.pi, 2.0 * np.pi) - np.pi
idx = tf.logical_and(tf.equal(ddmod, -np.pi), tf.greater(dd, 0))
ddmod = tf.where(idx, tf.ones_like(ddmod) * np.pi, ddmod)
ph_correct = ddmod - dd
idx = tf.less(tf.abs(dd), discont)
ddmod = tf.where(idx, tf.zeros_like(ddmod), dd)
ph_cumsum = tf.cumsum(ph_correct, axis=axis)
shape = p.get_shape().as_list()
shape[axis] = 1
ph_cumsum = tf.concat([tf.zeros(shape, dtype=p.dtype), ph_cumsum], axis=axis)
unwrapped = p + ph_cumsum
return unwrapped
开发者ID:cghawthorne,项目名称:magenta,代码行数:25,代码来源:spectral_ops.py
示例7: _extract_feature
def _extract_feature(inputs, idxs):
idxs = tf.expand_dims(idxs,1)
idx_i = tf.floordiv(idxs, map_h)
idx_j = tf.mod(idxs, map_h)
# NOTE:
# calculate the center of input batches
# this depends on coarse layer's architecture
origin_i = 2*(2*idx_i+1)+3
origin_j = 2*(2*idx_j+1)+3
origin_centers = tf.concat(1,[origin_i,origin_j])
origin_centers = tf.to_float(origin_centers)
# NOTE: size also depends on the architecture
patches = tf.image.extract_glimpse(inputs, size=[14,14], offsets=origin_centers,
centered=False, normalized=False)
fine_features = fine_layers(patches)
# reuse variables
tf.get_variable_scope().reuse_variables()
src_idxs = tf.concat(1,[idx_i,idx_j])
return fine_features, src_idxs
开发者ID:mikowals,项目名称:dcn.tf,代码行数:28,代码来源:dcn.py
示例8: sample_from_discretized_mix_logistic
def sample_from_discretized_mix_logistic(y, log_scale_min=-7.):
'''
Args:
y: Tensor, [batch_size, channels, time_length]
Returns:
Tensor: sample in range of [-1, 1]
'''
with tf.control_dependencies([tf.assert_equal(tf.mod(tf.shape(y)[1], 3), 0)]):
nr_mix = tf.shape(y)[1] // 3
#[batch_size, time_length, channels]
y = tf.transpose(y, [0, 2, 1])
logit_probs = y[:, :, :nr_mix]
#sample mixture indicator from softmax
temp = tf.random_uniform(tf.shape(logit_probs), minval=1e-5, maxval=1. - 1e-5)
temp = logit_probs - tf.log(-tf.log(temp))
argmax = tf.argmax(temp, -1)
#[batch_size, time_length] -> [batch_size, time_length, nr_mix]
one_hot = tf.one_hot(argmax, depth=nr_mix, dtype=tf.float32)
#select logistic parameters
means = tf.reduce_sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, axis=-1)
log_scales = tf.maximum(tf.reduce_sum(
y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, axis=-1), log_scale_min)
#sample from logistic & clip to interval
#we don't actually round to the nearest 8-bit value when sampling
u = tf.random_uniform(tf.shape(means), minval=1e-5, maxval=1. - 1e-5)
x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1 -u))
return tf.minimum(tf.maximum(x, -1.), 1.)
开发者ID:duvtedudug,项目名称:Tacotron-2,代码行数:32,代码来源:mixture.py
示例9: py
def py(model, config, scope, connect = None):
with tf.variable_scope(scope), tf.name_scope(scope):
with tf.variable_scope('inputs'), tf.name_scope('inputs'):
if connect is None:
model['%s_in0length' %scope] = config.getint('global', 'batch_size')
model['%s_in1length' %scope] = config.getint('global', 'input_size')
model['%s_in2length' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_in2length' %scope)
model['%s_maxin2length' %scope] = config.getint('global', 'time_size')
model['%s_inputs' %scope] = tf.placeholder(tf.float32, [model['%s_maxin2length' %scope], model['%s_in0length' %scope], model['%s_in1length' %scope]], '%s_inputs' %scope)
else:
model['%s_in0length' %scope] = model['%s_out0length' %connect]
model['%s_in1length' %scope] = model['%s_out1length' %connect]
model['%s_in2length' %scope] = model['%s_out2length' %connect]
model['%s_maxin2length' %scope] = model['%s_maxout2length' %connect]
model['%s_inputs' %scope] = model['%s_outputs' %connect]
model['%s_factor' %scope] = config.getint(scope, 'factor')
model['%s_out0length' %scope] = model['%s_in0length' %scope]
model['%s_out1length' %scope] = model['%s_in1length' %scope] * model['%s_factor' %scope]
model['%s_out2length' %scope] = tf.div(tf.subtract(model['%s_in2length' %scope], tf.mod(model['%s_in2length' %scope], model['%s_factor' %scope])), model['%s_factor' %scope])
model['%s_maxout2length' %scope] = (model['%s_maxin2length' %scope] - model['%s_maxin2length' %scope] % model['%s_factor' %scope]) / model['%s_factor' %scope]
with tf.variable_scope('outputs'), tf.name_scope('outputs'):
model['%s_transpose' %scope] = tf.transpose(model['%s_inputs' %scope], [0, 2, 1], '%s_transpose' %scope)
model['%s_transform' %scope] = tf.reshape(model['%s_transpose' %scope], [model['%s_maxout2length' %scope], model['%s_out1length' %scope], model['%s_out0length' %scope]], '%s_transform' %scope)
model['%s_outputs' %scope] = tf.transpose(model['%s_transform' %scope], [0, 2, 1], '%s_outputs' %scope)
return model
开发者ID:aaiijmrtt,项目名称:DEEPSPEECH,代码行数:27,代码来源:py.py
示例10: __init__
def __init__(self,
q_t,
q_tp1,
q_tp0,
importance_weights,
rewards,
done_mask,
twin_q_t,
twin_q_tp1,
actor_loss_coeff=0.1,
critic_loss_coeff=1.0,
gamma=0.99,
n_step=1,
use_huber=False,
huber_threshold=1.0,
twin_q=False,
policy_delay=1):
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if twin_q:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = tf.minimum(q_tp1, twin_q_tp1)
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (1.0 - done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rewards + gamma**n_step * q_tp1_best_masked
# compute the error (potentially clipped)
if twin_q:
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
twin_td_error = twin_q_t_selected - tf.stop_gradient(
q_t_selected_target)
self.td_error = td_error + twin_td_error
if use_huber:
errors = _huber_loss(td_error, huber_threshold) + _huber_loss(
twin_td_error, huber_threshold)
else:
errors = 0.5 * tf.square(td_error) + 0.5 * tf.square(
twin_td_error)
else:
self.td_error = (
q_t_selected - tf.stop_gradient(q_t_selected_target))
if use_huber:
errors = _huber_loss(self.td_error, huber_threshold)
else:
errors = 0.5 * tf.square(self.td_error)
self.critic_loss = critic_loss_coeff * tf.reduce_mean(
importance_weights * errors)
# for policy gradient, update policy net one time v.s.
# update critic net `policy_delay` time(s)
global_step = tf.train.get_or_create_global_step()
policy_delay_mask = tf.to_float(
tf.equal(tf.mod(global_step, policy_delay), 0))
self.actor_loss = (-1.0 * actor_loss_coeff * policy_delay_mask *
tf.reduce_mean(q_tp0))
开发者ID:robertnishihara,项目名称:ray,代码行数:59,代码来源:ddpg_policy_graph.py
示例11: manual_update_GDL
def manual_update_GDL(arg,learning_rate,g,mu_noise,stddev_noise):
sess = arg.sess
with tf.variable_scope(arg.mdl_scope_name,reuse=True):
W_var = tf.get_variable(name='W')
eps = tf.random_normal(tf.shape(g),mean=mu_noise,stddev=stddev_noise)
#
W_new = tf.mod( W_var - learning_rate*g + eps , 20)
sess.run( W_var.assign(W_new) )
开发者ID:brando90,项目名称:hbf_tensorflow_code,代码行数:8,代码来源:GDL.py
示例12: _add
def _add():
num_adds_inc = self._num_adds_cs.execute(_increment_num_adds)
current_pos = tf.mod(num_adds_inc - 1, self._buffer_size)
update_ops = []
for name in self._tensors.keys():
update_ops.append(
tf.scatter_update(self._tensors[name], current_pos, tensors[name]))
return tf.group(*update_ops)
开发者ID:Exscotticus,项目名称:models,代码行数:8,代码来源:circular_buffer.py
示例13: discretized_mix_logistic_loss
def discretized_mix_logistic_loss(y_hat, y, num_classes=256,
log_scale_min=-7.0, reduce=True):
'''Discretized mix of logistic distributions loss.
Note that it is assumed that input is scaled to [-1, 1]
Args:
y_hat: Tensor [batch_size, channels, time_length], predicted output.
y: Tensor [batch_size, time_length, 1], Target.
Returns:
Tensor loss
'''
with tf.control_dependencies([tf.assert_equal(tf.mod(tf.shape(y_hat)[1], 3), 0), tf.assert_equal(tf.rank(y_hat), 3)]):
nr_mix = tf.shape(y_hat)[1] // 3
#[Batch_size, time_length, channels]
y_hat = tf.transpose(y_hat, [0, 2, 1])
#unpack parameters. [batch_size, time_length, num_mixtures] x 3
logit_probs = y_hat[:, :, :nr_mix]
means = y_hat[:, :, nr_mix:2 * nr_mix]
log_scales = tf.maximum(y_hat[:, :, 2* nr_mix: 3 * nr_mix], log_scale_min)
#[batch_size, time_length, 1] -> [batch_size, time_length, num_mixtures]
y = y * tf.ones(shape=[1, 1, nr_mix], dtype=tf.float32)
centered_y = y - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_y + 1. / (num_classes - 1))
cdf_plus = tf.nn.sigmoid(plus_in)
min_in = inv_stdv * (centered_y - 1. / (num_classes - 1))
cdf_min = tf.nn.sigmoid(min_in)
log_cdf_plus = plus_in - tf.nn.softplus(plus_in) # log probability for edge case of 0 (before scaling)
log_one_minus_cdf_min = -tf.nn.softplus(min_in) # log probability for edge case of 255 (before scaling)
#probability for all other cases
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_y
#log probability in the center of the bin, to be used in extreme cases
#(not actually used in this code)
log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in)
log_probs = tf.where(y < -0.999, log_cdf_plus,
tf.where(y > 0.999, log_one_minus_cdf_min,
tf.where(cdf_delta > 1e-5,
tf.log(tf.maximum(cdf_delta, 1e-12)),
log_pdf_mid - np.log((num_classes - 1) / 2))))
#log_probs = log_probs + tf.nn.log_softmax(logit_probs, -1)
log_probs = log_probs + log_prob_from_logits(logit_probs)
if reduce:
return -tf.reduce_sum(log_sum_exp(log_probs))
else:
return -tf.expand_dims(log_sum_exp(log_probs), [-1])
开发者ID:duvtedudug,项目名称:Tacotron-2,代码行数:57,代码来源:mixture.py
示例14: top_k_in_2dim_tensor
def top_k_in_2dim_tensor(ts, k):
shape = tf.shape(ts)
v, id = tf.nn.top_k(tf.reshape(ts, [-1]), k)
id_row_col = tf.transpose(tf.reshape(
tf.concat(0, [tf.div(id, shape[1]),tf.mod(id, shape[1])]),
shape=tf.concat(0, [[2], tf.reshape(k, [1])])
))
return v, id_row_col
开发者ID:staylonging,项目名称:tf,代码行数:9,代码来源:utils.py
示例15: dlstm_scan_fn
def dlstm_scan_fn(previous_output, current_input):
out, state_out = lstm(current_input, previous_output[1])
i = previous_output[2]
basis_i = tf.one_hot(i, depth=chunks)
state_out_dilated = dilate_one_time_step(tf.squeeze(state_out[0]), basis_i, chunks)
state_out = rnn.LSTMStateTuple(state_out_dilated, state_out[1])
i += tf.constant(1)
new_i = tf.mod(i, chunks)
return out, state_out, new_i
开发者ID:ioanachelu,项目名称:turi,代码行数:9,代码来源:test_dlstm.py
示例16: _round_up_tf
def _round_up_tf(x, multiple):
# Tf version of remainder = x % multiple
remainder = tf.mod(x, multiple)
# Tf version of return x if remainder == 0 else x + multiple - remainder
x_round = tf.cond(tf.equal(remainder, tf.zeros(tf.shape(remainder), dtype=tf.int32)),
lambda: x,
lambda: x + multiple - remainder)
return x_round
开发者ID:duvtedudug,项目名称:Tacotron-2,代码行数:9,代码来源:modules.py
示例17: get_bmu_loc
def get_bmu_loc(self, x):
expanded_x = tf.expand_dims(x, 0)
sqr_diff = tf.square(tf.subtract(expanded_x, self.nodes))
dists = tf.reduce_sum(sqr_diff, 1)
bmu_idx = tf.argmin(dists, 0)
bmu_loc = tf.pack([tf.mod(bmu_idx, self.width), tf.div(bmu_idx, self.width)])
return bmu_loc
开发者ID:timestocome,项目名称:DeepLearning,代码行数:9,代码来源:SOM_tf.py
示例18: preprocess
def preprocess(audio, rate=48000):
# pad with 1 second of silence on either side
front = tf.zeros([rate,2], dtype=audio.dtype)
back = tf.zeros([rate - tf.mod(tf.shape(audio)[0], rate) + rate, 2], dtype=audio.dtype)
audio = tf.concat([front, audio, back], 0)
audio = tf.add(audio, tf.abs(tf.reduce_min(audio)))
audio = tf.multiply(audio, 1.0/ tf.reduce_max(audio))
# audio = tf.reshape(audio, [-1, int(rate *
return audio
开发者ID:sventers,项目名称:hello,代码行数:9,代码来源:b_batch_audio_data_tf.py
示例19: arg_max_2d
def arg_max_2d(x_in):
orig_shape = tf.shape(x_in)
reshape_t = tf.concat([orig_shape[0:1], [-1], orig_shape[3:4]], 0)
zz = tf.reshape(x_in, reshape_t)
pp = tf.to_int32(tf.argmax(zz, 1))
sz1 = tf.slice(orig_shape, [2], [1])
cc1 = tf.div(pp, tf.to_int32(sz1))
cc2 = tf.mod(pp, tf.to_int32(sz1))
return tf.stack([cc1, cc2])
开发者ID:mkabra,项目名称:poseTF,代码行数:10,代码来源:PoseTools.py
示例20: get_timing_signal_1d
def get_timing_signal_1d(self, length, channels):
position = tf.to_float(tf.range(length))
num_timescales = channels // 2
log_timescale_increment = (math.log(float(self.max_timescale) / float(self.min_timescale)) / (tf.to_float(num_timescales) - 1))
inv_timescales = self.min_timescale * tf.exp(tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return signal
开发者ID:sunlinyu1993,项目名称:Machine-Learning-Toolbox,代码行数:10,代码来源:position_embedding.py
注:本文中的tensorflow.mod函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论