本文整理汇总了Python中tensorflow.assert_less_equal函数的典型用法代码示例。如果您正苦于以下问题:Python assert_less_equal函数的具体用法?Python assert_less_equal怎么用?Python assert_less_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_less_equal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_doesnt_raise_when_both_empty
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_less_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
开发者ID:3kwa,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例2: preprocess_for_inception
def preprocess_for_inception(images):
"""Preprocess images for inception.
Args:
images: images minibatch. Shape [batch size, width, height,
channels]. Values are in [0..255].
Returns:
preprocessed_images
"""
# Images should have 3 channels.
assert images.shape[3].value == 3
# tfgan_eval.preprocess_image function takes values in [0, 1], so rescale.
with tf.control_dependencies([tf.assert_greater_equal(images, 0.0),
tf.assert_less_equal(images, 255.0)]):
images = tf.identity(images)
preprocessed_images = tf.map_fn(
fn=tfgan_eval.preprocess_image,
elems=images,
back_prop=False
)
return preprocessed_images
开发者ID:changchunli,项目名称:compare_gan,代码行数:26,代码来源:fid_score.py
示例3: replace
def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less_equal(
length, self._max_length, message='max length exceeded')
replace_ops = []
with tf.control_dependencies([assert_max_length]):
for buffer_, elements in zip(self._buffers, episodes):
replace_op = tf.scatter_update(buffer_, rows, elements)
replace_ops.append(replace_op)
with tf.control_dependencies(replace_ops):
return tf.scatter_update(self._length, rows, length)
开发者ID:AndrewMeadows,项目名称:bullet3,代码行数:25,代码来源:memory.py
示例4: new_mean_squared
def new_mean_squared(grad_vec, decay, ms):
"""Calculates the new accumulated mean squared of the gradient.
Args:
grad_vec: the vector for the current gradient
decay: the decay term
ms: the previous mean_squared value
Returns:
the new mean_squared value
"""
decay_size = decay.get_shape().num_elements()
decay_check_ops = [
tf.assert_less_equal(decay, 1., summarize=decay_size),
tf.assert_greater_equal(decay, 0., summarize=decay_size)]
with tf.control_dependencies(decay_check_ops):
grad_squared = tf.square(grad_vec)
# If the previous mean_squared is the 0 vector, don't use the decay and just
# return the full grad_squared. This should only happen on the first timestep.
decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)
# Update the running average of squared gradients.
epsilon = 1e-12
return (1. - decay) * (grad_squared + epsilon) + decay * ms
开发者ID:ALISCIFP,项目名称:models,代码行数:27,代码来源:utils.py
示例5: calculate_reshape
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
"""Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
if batch_shape_static.is_fully_defined():
return np.int32(batch_shape_static.as_list()), batch_shape_static, []
with tf.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
original_size = tf.reduce_prod(original_shape)
implicit_dim = tf.equal(new_shape, -1)
size_implicit_dim = (
original_size // tf.maximum(1, -tf.reduce_prod(new_shape)))
new_ndims = tf.shape(new_shape)
expanded_new_shape = tf.where( # Assumes exactly one `-1`.
implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
validations = [] if not validate else [
tf.assert_rank(
original_shape, 1, message="Original shape must be a vector."),
tf.assert_rank(new_shape, 1, message="New shape must be a vector."),
tf.assert_less_equal(
tf.count_nonzero(implicit_dim, dtype=tf.int32),
1,
message="At most one dimension can be unknown."),
tf.assert_positive(
expanded_new_shape, message="Shape elements must be >=-1."),
tf.assert_equal(
tf.reduce_prod(expanded_new_shape),
original_size,
message="Shape sizes do not match."),
]
return expanded_new_shape, batch_shape_static, validations
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:batch_reshape.py
示例6: test_doesnt_raise_when_less_equal_and_broadcastable_shapes
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 1], name="big")
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
开发者ID:3kwa,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例7: _maybe_check_valid_shape
def _maybe_check_valid_shape(self, shape, validate_args):
"""Check that a shape Tensor is int-type and otherwise sane."""
if not shape.dtype.is_integer:
raise TypeError('{} dtype ({}) should be `int`-like.'.format(
shape, shape.dtype.name))
assertions = []
ndims = tf.rank(shape)
ndims_ = tensor_util.constant_value(ndims)
if ndims_ is not None and ndims_ > 1:
raise ValueError('`{}` rank ({}) should be <= 1.'.format(
shape, ndims_))
elif validate_args:
assertions.append(
tf.assert_less_equal(
ndims, 1, message='`{}` rank should be <= 1.'.format(shape)))
# Note, we might be inclined to use tensor_util.constant_value_as_shape
# here, but that method coerces negative values into `None`s, rendering the
# checks we do below impossible.
shape_tensor_ = tensor_util.constant_value(shape)
if shape_tensor_ is not None:
es = np.int32(shape_tensor_)
if sum(es == -1) > 1:
raise ValueError(
'`{}` must have at most one `-1` (given {})'
.format(shape, es))
if np.any(es < -1):
raise ValueError(
'`{}` elements must be either positive integers or `-1`'
'(given {}).'
.format(shape, es))
elif validate_args:
assertions.extend([
tf.assert_less_equal(
tf.reduce_sum(tf.cast(tf.equal(shape, -1), tf.int32)),
1,
message='`{}` elements must have at most one `-1`.'
.format(shape)),
tf.assert_greater_equal(
shape,
-1,
message='`{}` elements must be either positive integers or `-1`.'
.format(shape)),
])
return assertions
开发者ID:lewisKit,项目名称:probability,代码行数:47,代码来源:reshape.py
示例8: remidify
def remidify(pitches):
"""Transforms [0, 88) to MIDI pitches [21, 108]."""
assertions = [
tf.assert_greater_equal(pitches, 0),
tf.assert_less_equal(pitches, 87)
]
with tf.control_dependencies(assertions):
return pitches + 21
开发者ID:adarob,项目名称:magenta,代码行数:8,代码来源:util.py
示例9: demidify
def demidify(pitches):
"""Transforms MIDI pitches [21,108] to [0, 88)."""
assertions = [
tf.assert_greater_equal(pitches, 21),
tf.assert_less_equal(pitches, 108)
]
with tf.control_dependencies(assertions):
return pitches - 21
开发者ID:adarob,项目名称:magenta,代码行数:8,代码来源:util.py
示例10: test_raises_when_greater
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies([tf.assert_less_equal(big, small)]):
out = tf.identity(small)
with self.assertRaisesOpError("big.*small"):
out.eval()
开发者ID:3kwa,项目名称:tensorflow,代码行数:8,代码来源:check_ops_test.py
示例11: _augment_data
def _augment_data(self, inout, nchan=6):
"""Flip, crop and rotate samples randomly."""
with tf.name_scope('data_augmentation'):
if self.fliplr:
inout = tf.image.random_flip_left_right(inout, seed=1234)
if self.flipud:
inout = tf.image.random_flip_up_down(inout, seed=3456)
if self.rotate:
angle = tf.random_uniform((), minval=0, maxval=4, dtype=tf.int32, seed=4567)
inout = tf.case([(tf.equal(angle, 1), lambda: tf.image.rot90(inout, k=1)),
(tf.equal(angle, 2), lambda: tf.image.rot90(inout, k=2)),
(tf.equal(angle, 3), lambda: tf.image.rot90(inout, k=3))],
lambda: inout)
inout.set_shape([None, None, nchan])
with tf.name_scope('crop'):
shape = tf.shape(inout)
new_height = tf.to_int32(self.output_resolution[0])
new_width = tf.to_int32(self.output_resolution[1])
height_ok = tf.assert_less_equal(new_height, shape[0])
width_ok = tf.assert_less_equal(new_width, shape[1])
with tf.control_dependencies([height_ok, width_ok]):
if self.random_crop:
inout = tf.random_crop(
inout, tf.stack([new_height, new_width, nchan]))
else:
height_offset = tf.to_int32((shape[0]-new_height)/2)
width_offset = tf.to_int32((shape[1]-new_width)/2)
inout = tf.image.crop_to_bounding_box(
inout, height_offset, width_offset,
new_height, new_width)
inout.set_shape([None, None, nchan])
inout = tf.image.resize_images(
inout, [self.output_resolution[0], self.output_resolution[1]])
fullres = inout
with tf.name_scope('resize'):
new_size = 256
inout = tf.image.resize_images(
inout, [new_size, new_size],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return fullres, inout
开发者ID:KeyKy,项目名称:hdrnet,代码行数:46,代码来源:data_pipeline.py
示例12: test_raises_when_less_equal_but_non_broadcastable_shapes
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 1], name="big")
with self.assertRaisesRegexp(ValueError, "broadcast"):
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
开发者ID:3kwa,项目名称:tensorflow,代码行数:8,代码来源:check_ops_test.py
示例13: scale_to_inception_range
def scale_to_inception_range(image):
"""Scales an image in the range [0,1] to [-1,1] as expected by inception."""
# Assert that incoming images have been properly scaled to [0,1].
with tf.control_dependencies(
[tf.assert_less_equal(tf.reduce_max(image), 1.),
tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
开发者ID:NoPointExc,项目名称:models,代码行数:9,代码来源:preprocessing.py
示例14: _maybe_assert_valid
def _maybe_assert_valid(self, x):
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
tf.assert_non_negative(x, message="sample must be non-negative"),
tf.assert_less_equal(
x,
tf.ones([], self.concentration0.dtype),
message="sample must be no larger than `1`."),
], x)
开发者ID:lewisKit,项目名称:probability,代码行数:10,代码来源:kumaraswamy.py
示例15: _maybe_assert_valid_y
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_positive = tf.assert_non_negative(
y, message="Inverse transformation input must be greater than 0.")
less_than_one = tf.assert_less_equal(
y,
tf.constant(1., y.dtype),
message="Inverse transformation input must be less than or equal to 1.")
return control_flow_ops.with_dependencies([is_positive, less_than_one], y)
开发者ID:lewisKit,项目名称:probability,代码行数:10,代码来源:gumbel.py
示例16: _maybe_assert_valid_sample
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return control_flow_ops.with_dependencies([
tf.assert_less_equal(
counts,
self.total_count,
message="counts are not less than or equal to n."),
], counts)
开发者ID:lewisKit,项目名称:probability,代码行数:11,代码来源:binomial.py
示例17: _validate_correlationness
def _validate_correlationness(self, x):
if not self.validate_args:
return x
checks = [
tf.assert_less_equal(
tf.cast(-1., dtype=x.dtype.base_dtype),
x,
message='Correlations must be >= -1.'),
tf.assert_less_equal(
x,
tf.cast(1., x.dtype.base_dtype),
message='Correlations must be <= 1.'),
tf.assert_near(
tf.matrix_diag_part(x),
tf.cast(1., x.dtype.base_dtype),
message='Self-correlations must be = 1.'),
tf.assert_near(
x, tf.matrix_transpose(x),
message='Correlation matrices must be symmetric')
]
with tf.control_dependencies(checks):
return tf.identity(x)
开发者ID:asudomoeva,项目名称:probability,代码行数:22,代码来源:lkj.py
示例18: maybe_split_sequence_lengths
def maybe_split_sequence_lengths(sequence_length, num_splits, total_length):
"""Validates and splits `sequence_length`, if necessary.
Returned value must be used in graph for all validations to be executed.
Args:
sequence_length: A batch of sequence lengths, either sized `[batch_size]`
and equal to either 0 or `total_length`, or sized
`[batch_size, num_splits]`.
num_splits: The scalar number of splits of the full sequences.
total_length: The scalar total sequence length (potentially padded).
Returns:
sequence_length: If input shape was `[batch_size, num_splits]`, returns the
same Tensor. Otherwise, returns a Tensor of that shape with each input
length in the batch divided by `num_splits`.
Raises:
ValueError: If `sequence_length` is not shaped `[batch_size]` or
`[batch_size, num_splits]`.
tf.errors.InvalidArgumentError: If `sequence_length` is shaped
`[batch_size]` and all values are not either 0 or `total_length`.
"""
if sequence_length.shape.ndims == 1:
if total_length % num_splits != 0:
raise ValueError(
'`total_length` must be evenly divisible by `num_splits`.')
with tf.control_dependencies(
[tf.Assert(
tf.reduce_all(
tf.logical_or(tf.equal(sequence_length, 0),
tf.equal(sequence_length, total_length))),
data=[sequence_length])]):
sequence_length = (
tf.tile(tf.expand_dims(sequence_length, axis=1), [1, num_splits]) //
num_splits)
elif sequence_length.shape.ndims == 2:
with tf.control_dependencies([
tf.assert_less_equal(
sequence_length,
tf.constant(total_length // num_splits, tf.int32),
message='Segment length cannot be more than '
'`total_length / num_splits`.')]):
sequence_length = tf.identity(sequence_length)
sequence_length.set_shape([sequence_length.shape[0], num_splits])
else:
raise ValueError(
'Sequence lengths must be given as a vector or a 2D Tensor whose '
'second dimension size matches its initial hierarchical split. Got '
'shape: %s' % sequence_length.shape.as_list())
return sequence_length
开发者ID:Alice-ren,项目名称:magenta,代码行数:50,代码来源:lstm_utils.py
示例19: _maximum_mean
def _maximum_mean(samples, envelope, high, name=None):
"""Returns a stochastic upper bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded above, then
the mean is bounded above as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `high`, and operated on
separately.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `high`.
envelope: Floating-point `Tensor` of sizes of admissible CDF
envelopes (i.e., the `eps` above).
high: Floating-point `Tensor` of upper bounds on the distributions'
supports. `samples <= high`.
name: A name for this operation (optional).
Returns:
bound: Floating-point `Tensor` of upper bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be larger than
the corresponding `high`.
"""
with tf.name_scope(name, "maximum_mean", [samples, envelope, high]):
dtype = dtype_util.common_dtype([samples, envelope, high], tf.float32)
samples = tf.convert_to_tensor(samples, name="samples", dtype=dtype)
envelope = tf.convert_to_tensor(envelope, name="envelope", dtype=dtype)
high = tf.convert_to_tensor(high, name="high", dtype=dtype)
xmax = tf.reduce_max(samples, axis=[0])
msg = "Given sample maximum value exceeds expectations"
check_op = tf.assert_less_equal(xmax, high, message=msg)
with tf.control_dependencies([check_op]):
return tf.identity(_do_maximum_mean(samples, envelope, high))
开发者ID:asudomoeva,项目名称:probability,代码行数:44,代码来源:statistical_testing.py
示例20: _init_clusters_random
def _init_clusters_random(self):
"""Does random initialization of clusters.
Returns:
Tensor of randomly initialized clusters.
"""
num_data = tf.add_n([tf.shape(inp)[0] for inp in self._inputs])
# Note that for mini-batch k-means, we should ensure that the batch size of
# data used during initialization is sufficiently large to avoid duplicated
# clusters.
with tf.control_dependencies(
[tf.assert_less_equal(self._num_clusters, num_data)]):
indices = tf.random_uniform(tf.reshape(self._num_clusters, [-1]),
minval=0,
maxval=tf.cast(num_data, tf.int64),
seed=self._random_seed,
dtype=tf.int64)
clusters_init = embedding_lookup(self._inputs, indices,
partition_strategy='div')
return clusters_init
开发者ID:2020zyc,项目名称:tensorflow,代码行数:20,代码来源:clustering_ops.py
注:本文中的tensorflow.assert_less_equal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论