本文整理汇总了Python中tensorflow.python.ops.math_ops.range函数的典型用法代码示例。如果您正苦于以下问题:Python range函数的具体用法?Python range怎么用?Python range使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了range函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _do_maximum_mean
def _do_maximum_mean(samples, envelope, high, name=None):
"""Common code between maximum_mean and minimum_mean."""
with ops.name_scope(name, "do_maximum_mean", [samples, envelope, high]):
n = array_ops.rank(samples)
# Move the batch dimension of `samples` to the rightmost position,
# where the _batch_sort_vector function wants it.
perm = array_ops.concat([math_ops.range(1, n), [0]], axis=0)
samples = array_ops.transpose(samples, perm)
samples = _batch_sort_vector(samples)
# The maximum mean is given by taking `envelope`-worth of
# probability from the smallest samples and moving it to the
# maximum value. This amounts to:
# - ignoring the smallest k samples, where `k/n < envelope`
# - taking a `1/n - (envelope - k/n)` part of the index k sample
# - taking all the other samples
# - and adding `envelope * high` at the end.
# The following is a vectorized and batched way of computing this.
# `max_mean_contrib` is a mask implementing the previous.
batch_size = array_ops.shape(samples)[-1]
batch_size = math_ops.cast(batch_size, dtype=samples.dtype.base_dtype)
step = 1. / batch_size
cum_steps = step * math_ops.range(
1, batch_size + 1, dtype=samples.dtype.base_dtype)
max_mean_contrib = clip_ops.clip_by_value(
cum_steps - envelope[..., array_ops.newaxis],
clip_value_min=0.,
clip_value_max=step)
return math_ops.reduce_sum(
samples * max_mean_contrib, axis=-1) + envelope * high
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:31,代码来源:statistical_testing.py
示例2: crf_unary_score
def crf_unary_score(tag_indices, sequence_lengths, inputs):
"""Computes the unary scores of tag sequences.
Args:
tag_indices: A [batch_size, max_seq_len] matrix of tag indices.
sequence_lengths: A [batch_size] vector of true sequence lengths.
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials.
Returns:
unary_scores: A [batch_size] vector of unary scores.
"""
batch_size = array_ops.shape(inputs)[0]
max_seq_len = array_ops.shape(inputs)[1]
num_tags = array_ops.shape(inputs)[2]
flattened_inputs = array_ops.reshape(inputs, [-1])
offsets = array_ops.expand_dims(
math_ops.range(batch_size) * max_seq_len * num_tags, 1)
offsets += array_ops.expand_dims(math_ops.range(max_seq_len) * num_tags, 0)
flattened_tag_indices = array_ops.reshape(offsets + tag_indices, [-1])
unary_scores = array_ops.reshape(
array_ops.gather(flattened_inputs, flattened_tag_indices),
[batch_size, max_seq_len])
masks = _lengths_to_masks(sequence_lengths, array_ops.shape(tag_indices)[1])
unary_scores = math_ops.reduce_sum(unary_scores * masks, 1)
return unary_scores
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:29,代码来源:crf.py
示例3: testShapePassedToGradient
def testShapePassedToGradient(self):
with ops.Graph().as_default():
@custom_gradient.custom_gradient
def differentiable_scatter_update(handle, indices, values):
with ops.control_dependencies([
resource_variable_ops.resource_scatter_update(
handle, indices, values)]):
new_handle = array_ops.identity(handle)
def grad(dresult):
self.assertIsNotNone(
tensor_util.constant_value(dresult.dense_shape))
return [dresult, None, None]
return new_handle, grad
var = variable_scope.get_variable(
"foo", shape=[20], initializer=init_ops.zeros_initializer,
dtype=dtypes.float64, use_resource=True)
indices = math_ops.range(10)
updates = math_ops.range(9, -1, -1, dtype=dtypes.float64)
new_handle = differentiable_scatter_update(var.handle, indices, updates)
gathered = resource_variable_ops.resource_gather(
new_handle, indices, dtype=var.dtype)
gradients_impl.gradients([gathered], [updates])
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:26,代码来源:resource_variable_ops_test.py
示例4: _sample_n
def _sample_n(self, n, seed):
batch_shape = self.batch_shape_tensor()
event_shape = self.event_shape_tensor()
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = array_ops.concat([[n], batch_shape, event_shape], 0)
# Complexity: O(nbk**2)
x = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
expanded_df = self.df * array_ops.ones(
self.scale_operator.batch_shape_tensor(),
dtype=self.df.dtype.base_dtype)
g = random_ops.random_gamma(shape=[n],
alpha=self._multi_gamma_sequence(
0.5 * expanded_df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(
seed, "wishart"))
# Complexity: O(nbk**2)
x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = array_ops.matrix_set_diag(x, math_ops.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk**2)
perm = array_ops.concat([math_ops.range(1, ndims), [0]], 0)
x = array_ops.transpose(x, perm)
shape = array_ops.concat([batch_shape, [event_shape[0]], [-1]], 0)
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for LinearOperatorDiag, each matmul is O(k**2), so
# this complexity is O(nbk**2). For LinearOperatorLowerTriangular,
# each matmul is O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator.matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk**2)
shape = array_ops.concat([batch_shape, event_shape, [n]], 0)
x = array_ops.reshape(x, shape)
perm = array_ops.concat([[ndims - 1], math_ops.range(0, ndims - 1)], 0)
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
# Complexity: O(nbk^3)
x = math_ops.matmul(x, x, adjoint_b=True)
return x
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:60,代码来源:wishart.py
示例5: _fn
def _fn():
num_rows = np.shape(np_matrix)[0]
num_cols = np.shape(np_matrix)[1]
row_ids = math_ops.range(num_rows, dtype=dtypes.int64)
col_ids = math_ops.range(num_cols, dtype=dtypes.int64)
sp_mat = self.np_array_to_sparse(np_matrix)
sp_mat_t = sparse_ops.sparse_transpose(sp_mat)
row_batch = input_lib.batch(
[row_ids, sp_mat],
batch_size=min(batch_size, num_rows),
capacity=10,
enqueue_many=True)
col_batch = input_lib.batch(
[col_ids, sp_mat_t],
batch_size=min(batch_size, num_cols),
capacity=10,
enqueue_many=True)
features = extract_features(row_batch, col_batch, sp_mat.dense_shape)
if projection_weights is not None:
weights_batch = input_lib.batch(
projection_weights,
batch_size=batch_size,
capacity=10,
enqueue_many=True)
features[wals_lib.WALSMatrixFactorization.PROJECTION_WEIGHTS] = (
weights_batch)
if project_row is not None:
features[wals_lib.WALSMatrixFactorization.PROJECT_ROW] = (
constant_op.constant(project_row))
labels = None
return features, labels
开发者ID:LUTAN,项目名称:tensorflow,代码行数:33,代码来源:wals_test.py
示例6: _potential_scale_reduction_single_state
def _potential_scale_reduction_single_state(state, independent_chain_ndims):
"""potential_scale_reduction for one single state `Tensor`."""
# We assume exactly one leading dimension indexes e.g. correlated samples from
# each Markov chain.
state = ops.convert_to_tensor(state, name="state")
sample_ndims = 1
sample_axis = math_ops.range(0, sample_ndims)
chain_axis = math_ops.range(sample_ndims,
sample_ndims + independent_chain_ndims)
sample_and_chain_axis = math_ops.range(0,
sample_ndims + independent_chain_ndims)
n = _axis_size(state, sample_axis)
m = _axis_size(state, chain_axis)
# In the language of [2],
# B / n is the between chain variance, the variance of the chain means.
# W is the within sequence variance, the mean of the chain variances.
b_div_n = _reduce_variance(
math_ops.reduce_mean(state, sample_axis, keepdims=True),
sample_and_chain_axis,
biased=False)
w = math_ops.reduce_mean(
_reduce_variance(state, sample_axis, keepdims=True, biased=True),
sample_and_chain_axis)
# sigma^2_+ is an estimate of the true variance, which would be unbiased if
# each chain was drawn from the target. c.f. "law of total variance."
sigma_2_plus = w + b_div_n
return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:32,代码来源:mcmc_diagnostics_impl.py
示例7: to_weighted_sum
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor)[0]
if dimension > 1:
i1 = array_ops.reshape(array_ops.tile(array_ops.expand_dims(
math_ops.range(0, batch_size), 1), [1, dimension]), [-1])
i2 = array_ops.tile(math_ops.range(0, dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
# TODO(chapelle): move that logic to insert_transformed_feature to ensure
# unique buckets across dimensions after crossing.
bucket_indices = array_ops.reshape(input_tensor, [-1]) + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32)
bucket_indices = array_ops.reshape(input_tensor, [-1])
indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
shape = math_ops.to_int64(array_ops.pack([batch_size, 1]))
sparse_id_values = ops.SparseTensor(indices, bucket_indices, shape)
vocab_size = self.length * self.source_column.dimension
return _create_embedding_lookup(
sparse_id_values, vocab_size, num_outputs,
_add_variable_collection(weight_collections), 0., "sum",
trainable, self.name + "_weights")
开发者ID:YanLongDong,项目名称:tensorflow,代码行数:33,代码来源:feature_column.py
示例8: move_right_permutation
def move_right_permutation():
return util.prefer_static_value(
array_ops.concat([
math_ops.range(0, source_idx, dtype=dtype),
math_ops.range(source_idx+1, dest_idx+1, dtype=dtype),
[source_idx],
math_ops.range(dest_idx+1, ndims, dtype=dtype)], axis=0))
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:7,代码来源:distribution_util.py
示例9: test_docstring_example
def test_docstring_example(self):
# Produce the first 1000 members of the Halton sequence in 3 dimensions.
num_results = 1000
dim = 3
with self.test_session():
sample = halton.sample(dim, num_results=num_results, randomized=False)
# Evaluate the integral of x_1 * x_2^2 * x_3^3 over the three dimensional
# hypercube.
powers = math_ops.range(1.0, limit=dim + 1)
integral = math_ops.reduce_mean(
math_ops.reduce_prod(sample ** powers, axis=-1))
true_value = 1.0 / math_ops.reduce_prod(powers + 1.0)
# Produces a relative absolute error of 1.7%.
self.assertAllClose(integral.eval(), true_value.eval(), rtol=0.02)
# Now skip the first 1000 samples and recompute the integral with the next
# thousand samples. The sequence_indices argument can be used to do this.
sequence_indices = math_ops.range(start=1000, limit=1000 + num_results,
dtype=dtypes.int32)
sample_leaped = halton.sample(dim, sequence_indices=sequence_indices,
randomized=False)
integral_leaped = math_ops.reduce_mean(
math_ops.reduce_prod(sample_leaped ** powers, axis=-1))
self.assertAllClose(integral_leaped.eval(), true_value.eval(), rtol=0.05)
开发者ID:QiangCai,项目名称:tensorflow,代码行数:28,代码来源:halton_sequence_test.py
示例10: _ExtractImagePatchesGrad
def _ExtractImagePatchesGrad(op, grad):
batch_size, rows_in, cols_in, channels = [
dim.value for dim in op.inputs[0].shape.dims
]
input_bhwc = array_ops.shape(op.inputs[0])
batch_size = input_bhwc[0]
channels = input_bhwc[3]
# Create indices matrix for input tensor.
# Note that 0 is preserved for padding location,
# so indices for input start from 1 to 1 + rows_in * cols_in.
input_indices_num = 1 + rows_in * cols_in
input_idx = array_ops.reshape(math_ops.range(1, input_indices_num,
dtype=ops.dtypes.int64),
(1, rows_in, cols_in, 1))
input_idx_patched = gen_array_ops.extract_image_patches(
input_idx,
op.get_attr("ksizes"),
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))
# Create indices matrix for output tensor.
_, rows_out, cols_out, _ = [dim.value for dim in op.outputs[0].shape.dims]
_, ksize_r, ksize_c, _ = op.get_attr("ksizes")
# Indices for output start from 0.
output_indices_num = rows_out * cols_out * ksize_r * ksize_c
output_idx = array_ops.reshape(math_ops.range(output_indices_num,
dtype=ops.dtypes.int64),
(1, rows_out, cols_out, ksize_r * ksize_c))
# Construct mapping table for indices: (input -> output).
idx_matrix = array_ops.concat(
[array_ops.expand_dims(input_idx_patched, axis=-1),
array_ops.expand_dims(output_idx, axis=-1)],
axis=-1)
idx_map = array_ops.reshape(idx_matrix, (-1, 2))
sp_shape = (input_indices_num, output_indices_num)
sp_mat_full = sparse_tensor.SparseTensor(
idx_map,
array_ops.ones([output_indices_num], dtype=grad.dtype),
sp_shape)
# Remove all padding locations [0, :].
sp_mat = sparse_ops.sparse_slice(sp_mat_full,
(1, 0),
(input_indices_num - 1, output_indices_num))
grad_expanded = array_ops.transpose(
array_ops.reshape(
grad, (batch_size, rows_out, cols_out, ksize_r, ksize_c, channels)),
(1, 2, 3, 4, 0, 5))
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(jac, (rows_in, cols_in, batch_size, channels))
grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3))
return [grad_out]
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:60,代码来源:array_grad.py
示例11: frames
def frames(signal, frame_length, frame_step, name=None):
"""Frame a signal into overlapping frames.
May be used in front of spectral functions.
For example:
```python
pcm = tf.placeholder(tf.float32, [None, 9152])
frames = tf.contrib.signal.frames(pcm, 512, 180)
magspec = tf.abs(tf.spectral.rfft(frames, [512]))
image = tf.expand_dims(magspec, 3)
```
Args:
signal: A `Tensor` of shape `[batch_size, signal_length]`.
frame_length: An `int32` or `int64` `Tensor`. The length of each frame.
frame_step: An `int32` or `int64` `Tensor`. The step between frames.
name: A name for the operation (optional).
Returns:
A `Tensor` of frames with shape `[batch_size, num_frames, frame_length]`.
Raises:
ValueError: if signal does not have rank 2.
"""
with ops.name_scope(name, "frames", [signal, frame_length, frame_step]):
signal = ops.convert_to_tensor(signal, name="signal")
frame_length = ops.convert_to_tensor(frame_length, name="frame_length")
frame_step = ops.convert_to_tensor(frame_step, name="frame_step")
signal_rank = signal.shape.ndims
if signal_rank != 2:
raise ValueError("expected signal to have rank 2 but was " + signal_rank)
signal_length = array_ops.shape(signal)[1]
num_frames = math_ops.ceil((signal_length - frame_length) / frame_step)
num_frames = 1 + math_ops.cast(num_frames, dtypes.int32)
pad_length = (num_frames - 1) * frame_step + frame_length
pad_signal = array_ops.pad(signal, [[0, 0], [0,
pad_length - signal_length]])
indices_frame = array_ops.expand_dims(math_ops.range(frame_length), 0)
indices_frames = array_ops.tile(indices_frame, [num_frames, 1])
indices_step = array_ops.expand_dims(
math_ops.range(num_frames) * frame_step, 1)
indices_steps = array_ops.tile(indices_step, [1, frame_length])
indices = indices_frames + indices_steps
# TODO(androbin): remove `transpose` when `gather` gets `axis` support
pad_signal = array_ops.transpose(pad_signal)
signal_frames = array_ops.gather(pad_signal, indices)
signal_frames = array_ops.transpose(signal_frames, perm=[2, 0, 1])
return signal_frames
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:60,代码来源:shape_ops.py
示例12: testFormatOneTensorOneDimVarySummarize
def testFormatOneTensorOneDimVarySummarize(self):
with self.test_session():
tensor = math_ops.range(6)
format_output = string_ops.string_format("{}", tensor, summarize=-1)
out = self.evaluate(format_output)
expected = "[0 1 2 3 4 5]"
self.assertEqual(compat.as_text(out), expected)
with self.test_session():
tensor = math_ops.range(6)
format_output = string_ops.string_format("{}", tensor, summarize=1)
out = self.evaluate(format_output)
expected = "[0 ... 5]"
self.assertEqual(compat.as_text(out), expected)
with self.test_session():
tensor = math_ops.range(6)
format_output = string_ops.string_format("{}", tensor, summarize=2)
out = self.evaluate(format_output)
expected = "[0 1 ... 4 5]"
self.assertEqual(compat.as_text(out), expected)
with self.test_session():
tensor = math_ops.range(6)
format_output = string_ops.string_format("{}", tensor, summarize=10)
out = self.evaluate(format_output)
expected = "[0 1 2 3 4 5]"
self.assertEqual(compat.as_text(out), expected)
开发者ID:daiwk,项目名称:tensorflow,代码行数:28,代码来源:string_format_op_test.py
示例13: _tf_range
def _tf_range(start_or_stop, stop, step):
# TODO(mdan): We should optimize this when a full tensor is not required.
if step is not UNDEFINED:
return math_ops.range(start_or_stop, stop, step)
if stop is not UNDEFINED:
return math_ops.range(start_or_stop, stop)
return math_ops.range(start_or_stop)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:py_builtins.py
示例14: gather_tree_from_array
def gather_tree_from_array(t, parent_ids, sequence_length):
"""Calculates the full beams for `TensorArray`s.
Args:
t: A stacked `TensorArray` of size `max_time` that contains `Tensor`s of
shape `[batch_size, beam_width, s]` or `[batch_size * beam_width, s]`
where `s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `Tensor` which is a stacked `TensorArray` of the same size and type as
`t` and where beams are sorted in each `Tensor` according to `parent_ids`.
"""
max_time = parent_ids.shape[0].value or array_ops.shape(parent_ids)[0]
batch_size = parent_ids.shape[1].value or array_ops.shape(parent_ids)[1]
beam_width = parent_ids.shape[2].value or array_ops.shape(parent_ids)[2]
# Generate beam ids that will be reordered by gather_tree.
beam_ids = array_ops.expand_dims(
array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])
mask = array_ops.sequence_mask(
sequence_length, maxlen=max_time, dtype=dtypes.int32)
mask = array_ops.transpose(mask, perm=[2, 0, 1])
# Use beam_width + 1 to mark the end of beam.
masked_beam_ids = (beam_ids * mask) + (1 - mask) * (beam_width + 1)
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(sequence_length, axis=1))
sorted_beam_ids = beam_search_ops.gather_tree(
step_ids=masked_beam_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=beam_width + 1)
# For out of range steps, simply copy the same beam.
sorted_beam_ids = array_ops.where(
math_ops.cast(mask, dtypes.bool), x=sorted_beam_ids, y=beam_ids)
# Generate indices for gather_nd.
time_ind = array_ops.tile(array_ops.reshape(
math_ops.range(max_time), [-1, 1, 1]), [1, batch_size, beam_width])
batch_ind = array_ops.tile(array_ops.reshape(
math_ops.range(batch_size), [-1, 1, 1]), [1, max_time, beam_width])
batch_ind = array_ops.transpose(batch_ind, perm=[1, 0, 2])
indices = array_ops.stack([time_ind, batch_ind, sorted_beam_ids], -1)
# Gather from a tensor with collapsed additional dimensions.
gather_from = t
final_shape = array_ops.shape(gather_from)
gather_from = array_ops.reshape(
gather_from, [max_time, batch_size, beam_width, -1])
ordered = array_ops.gather_nd(gather_from, indices)
ordered = array_ops.reshape(ordered, final_shape)
return ordered
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:59,代码来源:beam_search_decoder.py
示例15: _maybe_rotate_dims
def _maybe_rotate_dims(self, x, rotate_right=False):
"""Helper which rolls left event_dims left or right event_dims right."""
if tensor_util.constant_value(self._needs_rotation) is False:
return x
ndims = array_ops.rank(x)
n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims
return array_ops.transpose(
x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))
开发者ID:arnonhongklay,项目名称:tensorflow,代码行数:8,代码来源:transformed_distribution.py
示例16: _GatherV2Grad
def _GatherV2Grad(op, grad):
"""Gradient for GatherV2 op."""
# params can be large, so colocate the shape calculation with it.
#
# params can be very large for sparse model, array_ops.shape raises
# exception on the Windows platform when any dimension is larger than
# int32. params_shape is not used in optimizer apply_sparse gradients,
# so it's fine to convert it back to int32 regardless of truncation.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params, out_type=ops.dtypes.int64)
params_shape = math_ops.to_int32(params_shape)
indices = op.inputs[1]
indices_size = array_ops.expand_dims(array_ops.size(indices), 0)
axis = op.inputs[2]
axis_static = tensor_util.constant_value(axis)
# For axis 0 gathers, build an appropriately shaped IndexedSlices.
if axis_static == 0:
values_shape = array_ops.concat([indices_size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, indices_size)
return [ops.IndexedSlices(values, indices, params_shape), None, None]
outer_shape = params_shape[:axis]
outer_dims = array_ops.size(outer_shape)
inner_shape = params_shape[axis:][1:]
inner_dims = array_ops.size(inner_shape)
outer_axes_indices = math_ops.range(outer_dims)
inner_axes_indices = math_ops.range(outer_dims + 1,
outer_dims + 1 + inner_dims)
values_shape = array_ops.concat([outer_shape, indices_size, inner_shape], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, indices_size)
# We need to sum up every slice `values[..., i, ....]` corresponding to
# `params[..., indices[i], ...]`. Since `unsorted_segment_sum` does not
# support an axis parameter, we transpose the gather dimension to the front,
# then use `unsorted_segment_sum` to build a
# [gather_axis, outer_axes, inner_axes] tensor with all the gradients
# affecting each index in `gather_axis` summed up.
transpose_dims = array_ops.concat(
[[outer_dims], outer_axes_indices, inner_axes_indices], 0)
values_transpose = array_ops.transpose(values, transpose_dims)
num_segments = params_shape[axis]
params_grad = math_ops.unsorted_segment_sum(
values_transpose, indices, num_segments)
# Inverts the above transpose by moving dimension 0 back to its original
# position.
invert_transpose_dims = array_ops.concat(
[outer_axes_indices + 1, [0], inner_axes_indices], 0)
params_grad = array_ops.transpose(params_grad, invert_transpose_dims)
return [params_grad, None, None]
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:58,代码来源:array_grad.py
示例17: _sample_n
def _sample_n(self, n, seed):
batch_shape = self.batch_shape()
event_shape = self.event_shape()
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = array_ops.concat(((n,), batch_shape, event_shape), 0)
# Complexity: O(nbk^2)
x = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
g = random_ops.random_gamma(shape=(n,),
alpha=self._multi_gamma_sequence(
0.5 * self.df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(
seed, "wishart"))
# Complexity: O(nbk^2)
x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = array_ops.matrix_set_diag(x, math_ops.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk^2)
perm = array_ops.concat((math_ops.range(1, ndims), (0,)), 0)
x = array_ops.transpose(x, perm)
shape = array_ops.concat((batch_shape, (event_shape[0], -1)), 0)
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for OperatorPDDiag, each matmul is O(k^2), so
# this complexity is O(nbk^2). For OperatorPDCholesky, each matmul is
# O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator_pd.sqrt_matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk^2)
shape = array_ops.concat((batch_shape, event_shape, (n,)), 0)
x = array_ops.reshape(x, shape)
perm = array_ops.concat(((ndims - 1,), math_ops.range(0, ndims - 1)), 0)
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
# Complexity: O(nbk^3)
x = math_ops.matmul(x, x, adjoint_b=True)
return x
开发者ID:ivankreso,项目名称:tensorflow,代码行数:57,代码来源:wishart.py
示例18: _rotate_last_dim
def _rotate_last_dim(x, rotate_right=False):
"""Rotate the last dimension either left or right."""
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat(
[[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)
else:
transpose_perm = array_ops.concat(
[math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm)
开发者ID:aritratony,项目名称:tensorflow,代码行数:10,代码来源:linear_operator_kronecker.py
示例19: _maybe_rotate_dims
def _maybe_rotate_dims(self, x, rotate_right=False):
"""Helper which rolls left event_dims left or right event_dims right."""
if tensor_util.constant_value(self._needs_rotation) is False:
return x
ndims = array_ops.rank(x)
n = _pick_scalar_condition(self._needs_rotation,
self._override_event_ndims, 0)
if rotate_right:
n = ndims - n
return array_ops.transpose(
x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:11,代码来源:transformed_distribution.py
示例20: _ctc_state_trans
def _ctc_state_trans(label_seq):
"""Compute CTC alignment model transition matrix.
Args:
label_seq: tensor of shape [batch_size, max_seq_length]
Returns:
tensor of shape [batch_size, states, states] with a state transition matrix
computed for each sequence of the batch.
"""
with ops.name_scope("ctc_state_trans"):
label_seq = ops.convert_to_tensor(label_seq, name="label_seq")
batch_size = _get_dim(label_seq, 0)
num_labels = _get_dim(label_seq, 1)
num_label_states = num_labels + 1
num_states = 2 * num_label_states
label_states = math_ops.range(num_label_states)
blank_states = label_states + num_label_states
# Start state to first label.
start_to_label = [[1, 0]]
# Blank to label transitions.
blank_to_label = array_ops.stack([label_states[1:], blank_states[:-1]], 1)
# Label to blank transitions.
label_to_blank = array_ops.stack([blank_states, label_states], 1)
# Scatter transitions that don't depend on sequence.
indices = array_ops.concat(
[start_to_label, blank_to_label, label_to_blank], 0)
values = array_ops.ones([_get_dim(indices, 0)])
trans = array_ops.scatter_nd(
indices, values, shape=[num_states, num_states])
trans += linalg_ops.eye(num_states) # Self-loops.
# Label to label transitions. Disallow transitions between repeated labels
# with no blank state in between.
batch_idx = array_ops.zeros_like(label_states[2:])
indices = array_ops.stack(
[batch_idx, label_states[2:], label_states[1:-1]], 1)
indices = array_ops.tile(
array_ops.expand_dims(indices, 0), [batch_size, 1, 1])
batch_idx = array_ops.expand_dims(math_ops.range(batch_size), 1) * [1, 0, 0]
indices += array_ops.expand_dims(batch_idx, 1)
repeats = math_ops.equal(label_seq[:, :-1], label_seq[:, 1:])
values = 1.0 - math_ops.cast(repeats, dtypes.float32)
batched_shape = [batch_size, num_states, num_states]
label_to_label = array_ops.scatter_nd(indices, values, batched_shape)
return array_ops.expand_dims(trans, 0) + label_to_label
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:54,代码来源:ctc_ops.py
注:本文中的tensorflow.python.ops.math_ops.range函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论