本文整理汇总了Python中tensorflow.python.ops.math_ops.logical_and函数的典型用法代码示例。如果您正苦于以下问题:Python logical_and函数的具体用法?Python logical_and怎么用?Python logical_and使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了logical_and函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: dense_make_stats_update
def dense_make_stats_update(is_active, are_buckets_ready, float_column,
quantile_buckets, example_partition_ids, gradients,
hessians, weights, empty_gradients, empty_hessians):
"""Updates the state for dense split handler."""
empty_float = constant_op.constant_v1([], dtype=dtypes.float32)
quantile_values, quantile_weights = control_flow_ops.cond(
is_active[1], # For the next layer, this handler is inactive.
lambda: (float_column, weights),
lambda: (empty_float, empty_float))
def ready_inputs_fn():
"""Branch to execute when quantiles are ready."""
quantized_feature = quantile_ops.quantiles([float_column], [],
[quantile_buckets], [], [])
quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
quantized_feature = array_ops.squeeze(quantized_feature, axis=0)
return (example_partition_ids, quantized_feature, gradients, hessians)
def not_ready_inputs_fn():
return (constant_op.constant_v1([], dtype=dtypes.int32),
constant_op.constant_v1([[]], dtype=dtypes.int64, shape=[1, 2]),
empty_gradients, empty_hessians)
example_partition_ids, feature_ids, gradients, hessians = (
control_flow_ops.cond(
math_ops.logical_and(
math_ops.logical_and(are_buckets_ready,
array_ops.size(quantile_buckets) > 0),
is_active[0]), ready_inputs_fn, not_ready_inputs_fn))
return (quantile_values, quantile_weights, example_partition_ids, feature_ids,
gradients, hessians)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:32,代码来源:ordinal_split_handler.py
示例2: maybe_update_masks
def maybe_update_masks():
with ops.name_scope(self._spec.name):
is_step_within_pruning_range = math_ops.logical_and(
math_ops.greater_equal(self._global_step,
self._spec.begin_pruning_step),
# If end_pruning_step is negative, keep pruning forever!
math_ops.logical_or(
math_ops.less_equal(self._global_step,
self._spec.end_pruning_step),
math_ops.less(self._spec.end_pruning_step, 0)))
is_pruning_step = math_ops.less_equal(
math_ops.add(self._last_update_step, self._spec.pruning_frequency),
self._global_step)
return math_ops.logical_and(is_step_within_pruning_range,
is_pruning_step)
开发者ID:SylChan,项目名称:tensorflow,代码行数:15,代码来源:pruning.py
示例3: is_initialized
def is_initialized(self, name=None):
# We have to cast the self._index.values() to a `list` because when we
# use `model_to_estimator` to run tf.keras models, self._index.values() is
# of type `dict_values` and not `list`.
values_list = list(self._index.values())
result = values_list[0].is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For tower local variables, the
# `is_initialized` op is a `logical_and` op.
for v in values_list[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(result, values_list[-1].is_initialized(),
name=name)
return result
开发者ID:Eagle732,项目名称:tensorflow,代码行数:15,代码来源:values.py
示例4: body
def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: bool tensor (keeping track of what's finished).
sequence_lengths: int32 tensor (keeping track of time of finish).
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)`.
```
"""
(next_outputs, decoder_state, next_inputs,
decoder_finished) = decoder.step(time, inputs, state)
next_finished = math_ops.logical_or(decoder_finished, finished)
if maximum_iterations is not None:
next_finished = math_ops.logical_or(
next_finished, time + 1 >= maximum_iterations)
next_sequence_lengths = array_ops.where(
math_ops.logical_and(math_ops.logical_not(finished), next_finished),
array_ops.fill(array_ops.shape(sequence_lengths), time + 1),
sequence_lengths)
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
emit = nest.map_structure(
lambda out, zero: array_ops.where(finished, zero, out),
next_outputs,
zero_outputs)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return new if pass_through else array_ops.where(finished, cur, new)
if impute_finished:
next_state = nest.map_structure(
_maybe_copy_state, decoder_state, state)
else:
next_state = decoder_state
outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
outputs_ta, emit)
return (time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:60,代码来源:decoder.py
示例5: get_seed
def get_seed(seed):
"""Returns the local seeds an operation should use given an op-specific seed.
See `tf.compat.v1.get_seed` for more details. This wrapper adds support for
the case
where `seed` may be a tensor.
Args:
seed: An integer or a `tf.int64` scalar tensor.
Returns:
A tuple of two `tf.int64` scalar tensors that should be used for the local
seed of the calling dataset.
"""
seed, seed2 = random_seed.get_seed(seed)
if seed is None:
seed = constant_op.constant(0, dtype=dtypes.int64, name="seed")
else:
seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name="seed")
if seed2 is None:
seed2 = constant_op.constant(0, dtype=dtypes.int64, name="seed2")
else:
with ops.name_scope("seed2") as scope:
seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64)
seed2 = array_ops.where(
math_ops.logical_and(
math_ops.equal(seed, 0), math_ops.equal(seed2, 0)),
constant_op.constant(2**31 - 1, dtype=dtypes.int64),
seed2,
name=scope)
return seed, seed2
开发者ID:aritratony,项目名称:tensorflow,代码行数:31,代码来源:random_seed.py
示例6: mode
def mode(self, name="mode"):
"""Mode of the distribution.
Note that the mode for the Beta distribution is only defined
when `a > 1`, `b > 1`. This returns the mode when `a > 1` and `b > 1`,
and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
will be raised rather than returning `NaN`.
Args:
name: The name for this op.
Returns:
Mode of the Beta distribution.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._a, self._b, self._a_b_sum], name):
a = self._a
b = self._b
a_b_sum = self._a_b_sum
one = constant_op.constant(1, self.dtype)
mode = (a - 1)/ (a_b_sum - 2)
if self.allow_nan_stats:
return math_ops.select(
math_ops.logical_and(
math_ops.greater(a, 1), math_ops.greater(b, 1)),
mode,
(constant_op.constant(float("NaN"), dtype=self.dtype) *
array_ops.ones_like(a_b_sum, dtype=self.dtype)))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(one, a),
check_ops.assert_less(one, b)], mode)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:33,代码来源:beta.py
示例7: collapse_repeated
def collapse_repeated(labels, seq_length, name=None):
"""Merge repeated labels into single labels.
Args:
labels: Tensor of shape [batch, max value in seq_length]
seq_length: Tensor of shape [batch], sequence length of each batch element.
name: A name for this `Op`. Defaults to "collapse_repeated_labels".
Returns:
A tuple `(collapsed_labels, new_seq_length)` where
collapsed_labels: Tensor of shape [batch, max_seq_length] with repeated
labels collapsed and padded to max_seq_length, eg:
`[[A, A, B, B, A], [A, B, C, D, E]] => [[A, B, A, 0, 0], [A, B, C, D, E]]`
new_seq_length: int tensor of shape [batch] with new sequence lengths.
"""
with ops.name_scope(name, "collapse_repeated_labels", [labels, seq_length]):
labels = ops.convert_to_tensor(labels, name="labels")
seq_length = ops.convert_to_tensor(seq_length, name="seq_length")
# Mask labels that don't equal previous label.
label_mask = array_ops.concat([
array_ops.ones_like(labels[:, :1], dtypes.bool),
math_ops.not_equal(labels[:, 1:], labels[:, :-1])
],
axis=1)
# Filter labels that aren't in the original sequence.
maxlen = _get_dim(labels, 1)
seq_mask = array_ops.sequence_mask(seq_length, maxlen=maxlen)
label_mask = math_ops.logical_and(label_mask, seq_mask)
# Count masks for new sequence lengths.
new_seq_len = math_ops.reduce_sum(
math_ops.cast(label_mask, dtypes.int32), axis=1)
# Mask indexes based on sequence length mask.
new_maxlen = math_ops.reduce_max(new_seq_len)
idx_mask = array_ops.sequence_mask(new_seq_len, maxlen=new_maxlen)
# Flatten everything and mask out labels to keep and sparse indices.
flat_labels = array_ops.reshape(labels, [-1])
flat_label_mask = array_ops.reshape(label_mask, [-1])
flat_idx_mask = array_ops.reshape(idx_mask, [-1])
idx = math_ops.range(_get_dim(flat_idx_mask, 0))
# Scatter to flat shape.
flat = array_ops.scatter_nd(
indices=array_ops.expand_dims(
array_ops.boolean_mask(idx, flat_idx_mask), axis=1),
updates=array_ops.boolean_mask(flat_labels, flat_label_mask),
shape=array_ops.shape(flat_idx_mask))
# Reshape back to square batch.
batch_size = _get_dim(labels, 0)
new_shape = [batch_size, new_maxlen]
return (array_ops.reshape(flat, new_shape),
math_ops.cast(new_seq_len, seq_length.dtype))
开发者ID:aritratony,项目名称:tensorflow,代码行数:60,代码来源:ctc_ops.py
示例8: _filter_input
def _filter_input(input_tensor, vocab_freq_table, vocab_min_count,
vocab_subsampling, corpus_size, seed):
"""Filters input tensor based on vocab freq, threshold, and subsampling."""
if vocab_freq_table is None:
return input_tensor
if not isinstance(vocab_freq_table, lookup.InitializableLookupTableBase):
raise ValueError(
"vocab_freq_table must be a subclass of "
"InitializableLookupTableBase (such as HashTable) instead of type "
"{}.".format(type(vocab_freq_table)))
with ops.name_scope(
"filter_vocab", values=[vocab_freq_table, input_tensor, vocab_min_count]):
freq = vocab_freq_table.lookup(input_tensor)
# Filters out elements in input_tensor that are not found in
# vocab_freq_table (table returns a default value of -1 specified above when
# an element is not found).
mask = math_ops.not_equal(freq, vocab_freq_table.default_value)
# Filters out elements whose vocab frequencies are less than the threshold.
if vocab_min_count is not None:
cast_threshold = math_ops.cast(vocab_min_count, freq.dtype)
mask = math_ops.logical_and(mask,
math_ops.greater_equal(freq, cast_threshold))
input_tensor = array_ops.boolean_mask(input_tensor, mask)
freq = array_ops.boolean_mask(freq, mask)
if not vocab_subsampling:
return input_tensor
if vocab_subsampling < 0 or vocab_subsampling > 1:
raise ValueError(
"Invalid vocab_subsampling={} - it should be within range [0, 1].".
format(vocab_subsampling))
# Subsamples the input tokens based on vocabulary frequency and
# vocab_subsampling threshold (ie randomly discard commonly appearing
# tokens).
with ops.name_scope(
"subsample_vocab", values=[input_tensor, freq, vocab_subsampling]):
corpus_size = math_ops.cast(corpus_size, dtypes.float64)
freq = math_ops.cast(freq, dtypes.float64)
vocab_subsampling = math_ops.cast(vocab_subsampling, dtypes.float64)
# From tensorflow_models/tutorials/embedding/word2vec_kernels.cc, which is
# suppose to correlate with Eq. 5 in http://arxiv.org/abs/1310.4546.
keep_prob = ((math_ops.sqrt(freq /
(vocab_subsampling * corpus_size)) + 1.0) *
(vocab_subsampling * corpus_size / freq))
random_prob = random_ops.random_uniform(
array_ops.shape(freq),
minval=0,
maxval=1,
dtype=dtypes.float64,
seed=seed)
mask = math_ops.less_equal(random_prob, keep_prob)
return array_ops.boolean_mask(input_tensor, mask)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:60,代码来源:skip_gram_ops.py
示例9: _mode
def _mode(self):
mode = (self.a - 1.0) / (self.a_b_sum - 2.0)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return math_ops.select(
math_ops.logical_and(math_ops.greater(self.a, 1.0), math_ops.greater(self.b, 1.0)),
mode,
array_ops.fill(self.batch_shape(), nan, name="nan"),
)
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones((), dtype=self.dtype),
self.a,
message="Mode not defined for components of a <= 1.",
),
check_ops.assert_less(
array_ops.ones((), dtype=self.dtype),
self.b,
message="Mode not defined for components of b <= 1.",
),
],
mode,
)
开发者ID:caisq,项目名称:tensorflow,代码行数:25,代码来源:beta.py
示例10: _prune_invalid_ids
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(is_id_valid, math_ops.greater(sparse_weights.values, 0))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
开发者ID:yuikns,项目名称:tensorflow,代码行数:9,代码来源:embedding_ops.py
示例11: _logical_and
def _logical_and(*args):
"""Convenience function which attempts to statically `reduce_all`."""
args_ = [_static_value(x) for x in args]
if any(x is not None and not bool(x) for x in args_):
return constant_op.constant(False)
if all(x is not None and bool(x) for x in args_):
return constant_op.constant(True)
if len(args) == 2:
return math_ops.logical_and(*args)
return math_ops.reduce_all(args)
开发者ID:arnonhongklay,项目名称:tensorflow,代码行数:10,代码来源:transformed_distribution.py
示例12: wrapped_cond
def wrapped_cond(loop_counter, *args):
# Convert the flow variables in `args` to TensorArrays. `args` should
# already have the same structure as `orig_loop_vars` but currently there
# is no nest.zip so we call `_pack_sequence_as` which flattens both
# `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays
# and packs it into the structure of `orig_loop_vars`.
if maximum_iterations is None:
return cond(*_pack_sequence_as(orig_loop_vars, args))
else:
return math_ops.logical_and(
loop_counter < maximum_iterations,
cond(*_pack_sequence_as(orig_loop_vars, args)))
开发者ID:ziky90,项目名称:tensorflow,代码行数:12,代码来源:while_v2.py
示例13: undo_make_batch_of_event_sample_matrices
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, expand_batch_dim=True,
name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
x: `Tensor` of shape `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims>=1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: _B+_E+[prod(S)]
sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
x = distribution_util.rotate_transpose(x, shift=1)
# x.shape: [prod(S)]+_B+_E
if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
if self._batch_ndims_is_0 or self._event_ndims_is_0:
squeeze_dims = []
if self._event_ndims_is_0:
squeeze_dims += [-1]
if self._batch_ndims_is_0 and expand_batch_dim:
squeeze_dims += [1]
if squeeze_dims:
x = array_ops.squeeze(x, axis=squeeze_dims)
# x.shape: [prod(S)]+B+E
_, batch_shape, event_shape = self.get_shape(x)
else:
s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
else array_ops.shape(x))
batch_shape = s[1:1+self.batch_ndims]
# Since sample_dims=1 and is left-most, we add 1 to the number of
# batch_ndims to get the event start dim.
event_start = array_ops.where(
math_ops.logical_and(expand_batch_dim, self._batch_ndims_is_0),
2, 1 + self.batch_ndims)
event_shape = s[event_start:event_start+self.event_ndims]
new_shape = array_ops.concat([sample_shape, batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: S+B+E
return x
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:53,代码来源:shape.py
示例14: element_to_bucket_id
def element_to_bucket_id(*args):
"""Return int64 id of the length bucket for this element."""
seq_length = element_length_func(*args)
boundaries = list(bucket_boundaries)
buckets_min = [np.iinfo(np.int32).min] + boundaries
buckets_max = boundaries + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, seq_length),
math_ops.less(seq_length, buckets_max))
bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))
return bucket_id
开发者ID:bunbutter,项目名称:tensorflow,代码行数:13,代码来源:grouping.py
示例15: fn_with_cond
def fn_with_cond(*inner_args, **inner_kwds):
"""Conditionally runs initialization if it's needed."""
condition = True
for wr in self._created_variables:
variable = wr()
if variable is None:
raise ValueError(
"A tf.Variable created inside your tf.function has been"
" garbage-collected. Your code needs to keep Python references"
" to variables created inside `tf.function`s.\n"
"\n"
"A common way to raise this error is to create and return a"
" variable only referenced inside your function:\n"
"\n"
"@tf.function\n"
"def f():\n"
" v = tf.Variable(1.0)\n"
" return v\n"
"\n"
"v = f() # Crashes with this error message!\n"
"\n"
"The reason this crashes is that @tf.function annotated"
" function returns a **`tf.Tensor`** with the **value** of the"
" variable when the function is called rather than the"
" variable instance itself. As such there is no code holding a"
" reference to the `v` created inside the function and Python"
" garbage collects it.\n"
"\n"
"The simplest way to fix this issue is to create variables"
" outside the function and capture them:\n"
"\n"
"v = tf.Variable(1.0)\n"
"\n"
"@tf.function\n"
"def f():\n"
" return v\n"
"\n"
"f() # <tf.Tensor: ... numpy=1.>\n"
"v.assign_add(1.)\n"
"f() # <tf.Tensor: ... numpy=2.>")
condition = math_ops.logical_and(
condition, resource_variable_ops.var_is_initialized_op(
variable.handle))
# We want to call stateless_fn if possible because it avoids recomputing
# potentially expensive initializers.
return control_flow_ops.cond(
condition,
lambda: self._stateless_fn(*inner_args, **inner_kwds),
functools.partial(self._concrete_stateful_fn._filtered_call, # pylint: disable=protected-access
inner_args, inner_kwds))
开发者ID:kylin9872,项目名称:tensorflow,代码行数:50,代码来源:def_function.py
示例16: sparsemax_loss
def sparsemax_loss(logits, sparsemax, labels, name=None):
"""Computes sparsemax loss function [1].
[1]: https://arxiv.org/abs/1602.02068
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
`float64`.
sparsemax: A `Tensor`. Must have the same type as `logits`.
labels: A `Tensor`. Must have the same type as `logits`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
with ops.name_scope(name, "sparsemax_loss",
[logits, sparsemax, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
sparsemax = ops.convert_to_tensor(sparsemax, name="sparsemax")
labels = ops.convert_to_tensor(labels, name="labels")
# In the paper, they call the logits z.
# A constant can be substracted from logits to make the algorithm
# more numerically stable in theory. However, there are really no major
# source numerical instability in this algorithm.
z = logits
# sum over support
# Use a conditional where instead of a multiplication to support z = -inf.
# If z = -inf, and there is no support (sparsemax = 0), a multiplication
# would cause 0 * -inf = nan, which is not correct in this case.
sum_s = array_ops.where(
math_ops.logical_or(sparsemax > 0, math_ops.is_nan(sparsemax)),
sparsemax * (z - 0.5 * sparsemax), array_ops.zeros_like(sparsemax))
# - z_k + ||q||^2
q_part = labels * (0.5 * labels - z)
# Fix the case where labels = 0 and z = -inf, where q_part would
# otherwise be 0 * -inf = nan. But since the lables = 0, no cost for
# z = -inf should be consideredself.
# The code below also coveres the case where z = inf. Howeverm in this
# caose the sparsemax will be nan, which means the sum_s will also be nan,
# therefor this case doesn't need addtional special treatment.
q_part_safe = array_ops.where(
math_ops.logical_and(math_ops.equal(labels, 0), math_ops.is_inf(z)),
array_ops.zeros_like(z), q_part)
return math_ops.reduce_sum(sum_s + q_part_safe, axis=1)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:49,代码来源:sparsemax_loss.py
示例17: is_initialized
def is_initialized(self, name=None):
"""Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
"""
# We have to cast the self._index.values() to a `list` because when we
# use `model_to_estimator` to run tf.keras models, self._index.values() is
# of type `dict_values` and not `list`.
values_list = list(self._index.values())
result = values_list[0].is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For distributed variables, the
# `is_initialized` op is a `logical_and` op.
for v in values_list[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(result, values_list[-1].is_initialized(),
name=name)
return result
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:24,代码来源:values.py
示例18: _process_matrix
def _process_matrix(self, matrix, min_rank, event_ndims):
"""Helper to __init__ which gets matrix in batch-ready form."""
# Pad the matrix so that matmul works in the case of a matrix and vector
# input. Keep track if the matrix was padded, to distinguish between a
# rank 3 tensor and a padded rank 2 tensor.
# TODO(srvasude): Remove side-effects from functions. Its currently unbroken
# but error-prone since the function call order may change in the future.
self._rank_two_event_ndims_one = math_ops.logical_and(
math_ops.equal(array_ops.rank(matrix), min_rank),
math_ops.equal(event_ndims, 1))
left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
pad = array_ops.concat(
[array_ops.ones(
[left], dtype=dtypes.int32), array_ops.shape(matrix)],
0)
return array_ops.reshape(matrix, pad)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:16,代码来源:affine_impl.py
示例19: _is_shape
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.op_scope([actual_tensor], "is_shape") as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name="actual")
shape_equal = _all_equal(ops.convert_to_tensor(expected_shape, name="expected"), actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
开发者ID:ville-k,项目名称:tensorflow,代码行数:16,代码来源:tensor_util.py
示例20: _UnsortedSegmentMinOrMaxGrad
def _UnsortedSegmentMinOrMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMin and UnsortedSegmentMax. """
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs, zero_clipped_indices, is_positive = \
_GatherDropNegatives(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
is_selected = math_ops.logical_and(is_selected, is_positive)
num_selected = math_ops.unsorted_segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.div(grad, num_selected)
gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None,
zero_clipped_indices,
is_positive)
zeros = array_ops.zeros_like(gathered_grads)
return array_ops.where(is_selected, gathered_grads, zeros), None, None
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:17,代码来源:math_grad.py
注:本文中的tensorflow.python.ops.math_ops.logical_and函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论