本文整理汇总了Python中tensorflow.python.ops.check_ops.assert_greater_equal函数的典型用法代码示例。如果您正苦于以下问题:Python assert_greater_equal函数的具体用法?Python assert_greater_equal怎么用?Python assert_greater_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_greater_equal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_doesnt_raise_when_both_empty
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_greater_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例2: test_doesnt_raise_when_equal
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
[check_ops.assert_greater_equal(small, small)]):
out = array_ops.identity(small)
out.eval()
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例3: _check_valid_event_ndims
def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
"""Check whether event_ndims is atleast min_event_ndims."""
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims_ = tensor_util.constant_value(event_ndims)
assertions = []
if not event_ndims.dtype.is_integer:
raise ValueError("Expected integer dtype, got dtype {}".format(
event_ndims.dtype))
if event_ndims_ is not None:
if event_ndims.shape.ndims != 0:
raise ValueError("Expected scalar event_ndims, got shape {}".format(
event_ndims.shape))
if min_event_ndims > event_ndims_:
raise ValueError("event_ndims ({}) must be larger than "
"min_event_ndims ({})".format(
event_ndims_, min_event_ndims))
elif self.validate_args:
assertions += [
check_ops.assert_greater_equal(event_ndims, min_event_ndims)]
if event_ndims.shape.is_fully_defined():
if event_ndims.shape.ndims != 0:
raise ValueError("Expected scalar shape, got ndims {}".format(
event_ndims.shape.ndims))
elif self.validate_args:
assertions += [
check_ops.assert_rank(event_ndims, 0, message="Expected scalar.")]
return assertions
开发者ID:AnishShah,项目名称:tensorflow,代码行数:31,代码来源:bijector_impl.py
示例4: test_doesnt_raise_when_greater_equal_and_broadcastable_shapes
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例5: _single_batch_sampler
def _single_batch_sampler(self, sampler):
# Enforce that there are at least as many data points as centers
# remaining. This gives the provided sampler the chance to select all
# remaining centers from a single batch.
with ops.control_dependencies(
[check_ops.assert_greater_equal(self._num_data, self._num_remaining)]):
return sampler()
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:7,代码来源:clustering_ops.py
示例6: test_raises_when_less_equal_but_non_broadcastable_shapes
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = constant_op.constant([1, 1, 1], name="big")
big = constant_op.constant([3, 1], name="small")
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
out.eval()
开发者ID:1000sprites,项目名称:tensorflow,代码行数:9,代码来源:check_ops_test.py
示例7: test_raises_when_less
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_greater_equal(
small, big, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:9,代码来源:check_ops_test.py
示例8: _validate_aux_loss_weight
def _validate_aux_loss_weight(aux_loss_weight, name='aux_loss_weight'):
if isinstance(aux_loss_weight, ops.Tensor):
aux_loss_weight.shape.assert_is_compatible_with([])
with ops.control_dependencies(
[check_ops.assert_greater_equal(aux_loss_weight, 0.0)]):
aux_loss_weight = array_ops.identity(aux_loss_weight)
elif aux_loss_weight is not None and aux_loss_weight < 0:
raise ValueError('`%s` must be greater than 0. Instead, was %s' %
(name, aux_loss_weight))
return aux_loss_weight
开发者ID:andrewharp,项目名称:tensorflow,代码行数:10,代码来源:train.py
示例9: check
def check(t):
samples_batch_shape = array_ops.shape(samples)[1:]
broadcasted_batch_shape = array_ops.broadcast_dynamic_shape(
samples_batch_shape, array_ops.shape(t))
# This rank check ensures that I don't get a wrong answer from the
# _shapes_ broadcasting against each other.
samples_batch_ndims = array_ops.size(samples_batch_shape)
ge = check_ops.assert_greater_equal(
samples_batch_ndims, array_ops.rank(t))
eq = check_ops.assert_equal(samples_batch_shape, broadcasted_batch_shape)
return ge, eq
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:11,代码来源:statistical_testing.py
示例10: test_raises_when_less_equal_but_non_broadcastable_shapes
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="big")
big = constant_op.constant([3, 1], name="small")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:14,代码来源:check_ops_test.py
示例11: _check_valid_event_ndims
def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
"""Check whether event_ndims is atleast min_event_ndims."""
assert_static(min_event_ndims)
event_ndims_ = get_static_value(event_ndims, np.int32)
assertions = []
if event_ndims_ is not None:
if min_event_ndims > event_ndims_:
raise ValueError("event_ndims ({}) must be larger than "
"min_event_ndims ({})".format(
event_ndims_, min_event_ndims))
elif self.validate_args:
assertions += [
check_ops.assert_greater_equal(event_ndims, min_event_ndims)]
return assertions
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:14,代码来源:bijector_impl.py
示例12: _check_valid_event_ndims
def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
"""Check whether event_ndims is atleast min_event_ndims."""
min_event_ndims_ = (min_event_ndims if isinstance(min_event_ndims, int)
else tensor_util.constant_value(min_event_ndims))
event_ndims_ = (event_ndims if isinstance(event_ndims, int)
else tensor_util.constant_value(event_ndims))
if min_event_ndims_ is not None and event_ndims_ is not None:
if min_event_ndims_ > event_ndims_:
raise ValueError("event_ndims ({}) must be larger than "
"min_event_ndims ({})".format(
event_ndims_, min_event_ndims_))
return []
if self.validate_args:
return [check_ops.assert_greater_equal(event_ndims, min_event_ndims)]
return []
开发者ID:kimr843,项目名称:tensorflow,代码行数:17,代码来源:bijector_impl.py
示例13: _minimum_mean
def _minimum_mean(samples, envelope, low, name=None):
"""Returns a stochastic lower bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded below, then
the mean is bounded below as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `low`, and operated on
separately.
Args:
samples: Floating-point tensor of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `low`.
envelope: Floating-point tensor of sizes of admissible CDF
envelopes (i.e., the `eps` above).
low: Floating-point tensor of lower bounds on the distributions'
supports.
name: A name for this operation (optional).
Returns:
bound: Floating-point tensor of lower bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be smaller than
the corresponding `low`.
"""
with ops.name_scope(name, "minimum_mean", [samples, envelope, low]):
samples = ops.convert_to_tensor(samples, name="samples")
envelope = ops.convert_to_tensor(envelope, name="envelope")
low = ops.convert_to_tensor(low, name="low")
xmin = math_ops.reduce_min(samples, axis=[-1])
msg = "Given sample minimum value falls below expectations"
check_op = check_ops.assert_greater_equal(xmin, low, message=msg)
with ops.control_dependencies([check_op]):
return - _do_maximum_mean(-samples, envelope, -low)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:43,代码来源:statistical_testing.py
示例14: _maybe_check_valid_shape
def _maybe_check_valid_shape(self, shape, validate_args):
"""Check that a shape Tensor is int-type and otherwise sane."""
if not shape.dtype.is_integer:
raise TypeError("{} dtype ({}) should be `int`-like.".format(
shape, shape.dtype.name))
assertions = []
ndims = array_ops.rank(shape)
ndims_ = tensor_util.constant_value(ndims)
if ndims_ is not None and ndims_ > 1:
raise ValueError("`{}` rank ({}) should be <= 1.".format(
shape, ndims_))
elif validate_args:
assertions.append(check_ops.assert_less_equal(
ndims, 1, message="`{}` rank should be <= 1.".format(shape)))
shape_ = tensor_util.constant_value_as_shape(shape)
if shape_.is_fully_defined():
es = np.int32(shape_.as_list())
if sum(es == -1) > 1:
raise ValueError(
"`{}` must have at most one `-1` (given {})"
.format(shape, es))
if np.any(es < -1):
raise ValueError(
"`{}` elements must be either positive integers or `-1`"
"(given {})."
.format(shape, es))
elif validate_args:
assertions.extend([
check_ops.assert_less_equal(
math_ops.reduce_sum(
math_ops.cast(math_ops.equal(shape, -1), dtypes.int32)),
1,
message="`{}` elements must have at most one `-1`."
.format(shape)),
check_ops.assert_greater_equal(
shape, -1,
message="`{}` elements must be either positive integers or `-1`."
.format(shape)),
])
return assertions
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:43,代码来源:reshape.py
示例15: batch_gather_with_default
def batch_gather_with_default(params,
indices,
default_value='',
name=None):
"""Same as `batch_gather` but inserts `default_value` for invalid indices.
This operation is similar to `batch_gather` except that it will substitute
the value for invalid indices with `default_value` as the contents.
See `batch_gather` for more details.
Args:
params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`,
`M>0`).
indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`).
default_value: A value to be inserted in places where `indices` are out of
bounds. Must be the same dtype as params and either a scalar or rank 1.
name: A name for the operation (optional).
Returns:
A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`.
`result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`.
#### Example:
```python
>>> params = tf.ragged.constant([
['a', 'b', 'c'],
['d'],
[],
['e']])
>>> indices = tf.ragged.constant([[1, 2, -1], [], [], [0, 10]])
>>> batch_gather_with_default(params, indices, 'FOO')
[['b', 'c', 'FOO'], [], [], ['e', 'FOO']]
```
"""
with ops.name_scope(name, 'RaggedBatchGatherWithDefault'):
params = ragged_tensor.convert_to_tensor_or_ragged_tensor(
params, name='params',
)
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices',
)
default_value = ragged_tensor.convert_to_tensor_or_ragged_tensor(
default_value, name='default_value',
)
# TODO(hterry): lift this restriction and support default_values of
# of rank > 1
if (default_value.shape.ndims is not 0
and default_value.shape.ndims is not 1):
raise ValueError('"default_value" must be a scalar or vector')
upper_bounds = None
if indices.shape.ndims is None:
raise ValueError('Indices must have a known rank.')
if params.shape.ndims is None:
raise ValueError('Params must have a known rank.')
num_batch_dimensions = indices.shape.ndims - 1
pad = None
# The logic for this works as follows:
# - create a padded params, where:
# padded_params[b1...bn, 0] = default_value
# padded_params[b1...bn, i] = params[b1...bn, i-1] (i>0)
# - create an `upper_bounds` Tensor that contains the number of elements
# in each innermost rank. Broadcast `upper_bounds` to be the same shape
# as `indices`.
# - check to see which index in `indices` are out of bounds and substitute
# it with the index containing `default_value` (the first).
# - call batch_gather with the indices adjusted.
with ops.control_dependencies([
check_ops.assert_greater_equal(array_ops.rank(params),
array_ops.rank(indices))]):
if ragged_tensor.is_ragged(params):
row_lengths = ragged_array_ops.expand_dims(
params.row_lengths(axis=num_batch_dimensions),
axis=-1)
upper_bounds = math_ops.cast(row_lengths, indices.dtype)
pad_shape = _get_pad_shape(params, indices)
pad = ragged_tensor_shape.broadcast_to(
default_value, pad_shape)
else:
params_shape = array_ops.shape(params)
pad_shape = array_ops.concat([
params_shape[:num_batch_dimensions],
[1],
params_shape[num_batch_dimensions + 1:params.shape.ndims]
], 0)
upper_bounds = params_shape[num_batch_dimensions]
pad = array_ops.broadcast_to(default_value, pad_shape)
# Add `default_value` as the first value in the innermost (ragged) rank.
pad = math_ops.cast(pad, params.dtype)
padded_params = array_ops.concat(
[pad, params], axis=num_batch_dimensions)
# Adjust the indices by substituting out-of-bound indices to the
# default-value index (which is the first element)
shifted_indices = indices + 1
is_out_of_bounds = (indices < 0) | (indices > upper_bounds)
#.........这里部分代码省略.........
开发者ID:ziky90,项目名称:tensorflow,代码行数:101,代码来源:ragged_batch_gather_with_default_op.py
示例16: _interpolate_bilinear
def _interpolate_bilinear(grid,
query_points,
name='interpolate_bilinear',
indexing='ij'):
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape `[batch, N, 2]`.
name: a name for the operation (optional).
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the inputs
invalid.
"""
if indexing != 'ij' and indexing != 'xy':
raise ValueError('Indexing mode must be \'ij\' or \'xy\'')
with ops.name_scope(name):
grid = ops.convert_to_tensor(grid)
query_points = ops.convert_to_tensor(query_points)
shape = grid.get_shape().as_list()
if len(shape) != 4:
msg = 'Grid must be 4 dimensional. Received size: '
raise ValueError(msg + str(grid.get_shape()))
batch_size, height, width, channels = (array_ops.shape(grid)[0],
array_ops.shape(grid)[1],
array_ops.shape(grid)[2],
array_ops.shape(grid)[3])
shape = [batch_size, height, width, channels]
query_type = query_points.dtype
grid_type = grid.dtype
with ops.control_dependencies([
check_ops.assert_equal(
len(query_points.get_shape()),
3,
message='Query points must be 3 dimensional.'),
check_ops.assert_equal(
array_ops.shape(query_points)[2],
2,
message='Query points must be size 2 in dim 2.')
]):
num_queries = array_ops.shape(query_points)[1]
with ops.control_dependencies([
check_ops.assert_greater_equal(
height, 2, message='Grid height must be at least 2.'),
check_ops.assert_greater_equal(
width, 2, message='Grid width must be at least 2.')
]):
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == 'ij' else [1, 0]
unstacked_query_points = array_ops.unstack(query_points, axis=2)
for dim in index_order:
with ops.name_scope('dim-' + str(dim)):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = shape[dim + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = math_ops.cast(size_in_indexing_dimension - 2, query_type)
min_floor = constant_op.constant(0.0, dtype=query_type)
floor = math_ops.minimum(
math_ops.maximum(min_floor, math_ops.floor(queries)), max_floor)
int_floor = math_ops.cast(floor, dtypes.int32)
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = math_ops.cast(queries - floor, grid_type)
min_alpha = constant_op.constant(0.0, dtype=grid_type)
max_alpha = constant_op.constant(1.0, dtype=grid_type)
alpha = math_ops.minimum(math_ops.maximum(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = array_ops.expand_dims(alpha, 2)
alphas.append(alpha)
with ops.control_dependencies([
check_ops.assert_less_equal(
math_ops.cast(batch_size * height * width, dtype=dtypes.float32),
np.iinfo(np.int32).max / 8,
message="""The image size or batch size is sufficiently large
#.........这里部分代码省略.........
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:101,代码来源:dense_image_warp.py
示例17: assert_true_mean_in_interval_by_dkwm
def assert_true_mean_in_interval_by_dkwm(
samples, low, high, expected_low, expected_high,
false_fail_rate=1e-6, name=None):
"""Asserts the mean of the given distribution is in the given interval.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the mean of the distribution from which the given samples are
drawn is _outside_ the given interval with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want
to check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_mean_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
expected_low: Floating-point `Tensor` of lower bounds on the
expected true means.
expected_high: Floating-point `Tensor` of upper bounds on the
expected true means.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected mean
interval does not overlap with the corresponding confidence
interval.
"""
with ops.name_scope(
name, "assert_true_mean_in_interval_by_dkwm",
[samples, low, high, expected_low, expected_high, false_fail_rate]):
samples = ops.convert_to_tensor(samples, name="samples")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
expected_low = ops.convert_to_tensor(expected_low, name="expected_low")
expected_high = ops.convert_to_tensor(expected_high, name="expected_high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
samples = _check_shape_dominates(
samples, [low, high, expected_low, expected_high])
min_mean, max_mean = true_mean_confidence_interval_by_dkwm(
samples, low, high, false_fail_rate)
# Assert that the interval [min_mean, max_mean] intersects the
# interval [expected_low, expected_high]. This is true if
# max_mean >= expected_low and min_mean <= expected_high.
# By DeMorgan's law, that's also equivalent to
# not (max_mean < expected_low or min_mean > expected_high),
# which is a way of saying the two intervals are not disjoint.
check_confidence_interval_can_intersect = check_ops.assert_greater_equal(
max_mean, expected_low, message="Confidence interval does not "
"intersect: true mean smaller than expected")
with ops.control_dependencies([check_confidence_interval_can_intersect]):
return check_ops.assert_less_equal(
min_mean, expected_high, message="Confidence interval does not "
"intersect: true mean greater than expected")
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:68,代码来源:statistical_testing.py
示例18: percentile
def percentile(x,
q,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
name=None):
"""Compute the `q`-th percentile of `x`.
Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
way from the minimum to the maximum in a sorted copy of `x`.
The values and distances of the two nearest neighbors as well as the
`interpolation` parameter will determine the percentile if the normalized
ranking does not match the location of `q` exactly.
This function is the same as the median if `q = 50`, the same as the minimum
if `q = 0` and the same as the maximum if `q = 100`.
```python
# Get 30th percentile with default ('nearest') interpolation.
x = [1., 2., 3., 4.]
percentile(x, q=30.)
==> 2.0
# Get 30th percentile with 'lower' interpolation
x = [1., 2., 3., 4.]
percentile(x, q=30., interpolation='lower')
==> 1.0
# Get 100th percentile (maximum). By default, this is computed over every dim
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100.)
==> 4.0
# Treat the leading dim as indexing samples, and find the 100th quantile (max)
# over all such samples.
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100., axis=[0])
==> [3., 4.]
```
Compare to `numpy.percentile`.
Args:
x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
q: Scalar `Tensor` in `[0, 100]`. The percentile.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values.
The axis that hold independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {"lower", "higher", "nearest"}. Default: "nearest"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points `i < j`:
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity.
If False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is "percentile"
Returns:
A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
`axis` is `None`, a scalar.
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
"""
name = name or "percentile"
allowed_interpolations = {"lower", "higher", "nearest"}
if interpolation is None:
interpolation = "nearest"
else:
if interpolation not in allowed_interpolations:
raise ValueError("Argument 'interpolation' must be in %s. Found %s" %
(allowed_interpolations, interpolation))
with ops.name_scope(name, [x, q]):
x = ops.convert_to_tensor(x, name="x")
# Double is needed here and below, else we get the wrong index if the array
# is huge along axis.
q = math_ops.to_double(q, name="q")
_get_static_ndims(q, expect_ndims=0)
if validate_args:
q = control_flow_ops.with_dependencies([
check_ops.assert_rank(q, 0),
check_ops.assert_greater_equal(q, math_ops.to_double(0.)),
check_ops.assert_less_equal(q, math_ops.to_double(100.))
], q)
if axis is None:
y = array_ops.reshape(x, [-1])
#.........这里部分代码省略.........
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:101,代码来源:sample_stats.py
示例19: predict
def predict(self, features):
"""Computes predictions multiple steps into the future.
Args:
features: A dictionary with the following key/value pairs:
PredictionFeatures.TIMES: A [batch size, predict window size]
integer Tensor of times, after the window of data indicated by
`STATE_TUPLE`, to make predictions for.
PredictionFeatures.STATE_TUPLE: A tuple of (times, values), times with
shape [batch size, self.input_window_size], values with shape [batch
size, self.input_window_size, self.num_features] representing a
segment of the time series before `TIMES`. This data is used
to start of the autoregressive computation. This should have data for
at least self.input_window_size timesteps.
And any exogenous features, with shapes prefixed by shape of `TIMES`.
Returns:
A dictionary with keys, "mean", "covariance". The
values are Tensors of shape [batch_size, predict window size,
num_features] and correspond to the values passed in `TIMES`.
"""
if not self._graph_initialized:
self.initialize_graph()
predict_times = math_ops.cast(
ops.convert_to_tensor(features[PredictionFeatures.TIMES]), dtypes.int32)
exogenous_regressors = self._process_exogenous_features(
times=predict_times,
features={key: value for key, value in features.items()
if key not in [TrainEvalFeatures.TIMES,
TrainEvalFeatures.VALUES,
PredictionFeatures.STATE_TUPLE]})
with ops.control_dependencies(
[check_ops.assert_equal(array_ops.shape(predict_times)[1],
array_ops.shape(exogenous_regressors)[1])]):
exogenous_regressors = array_ops.identity(exogenous_regressors)
batch_size = array_ops.shape(predict_times)[0]
num_predict_values = array_ops.shape(predict_times)[1]
prediction_iterations = ((num_predict_values + self.output_window_size - 1)
// self.output_window_size)
# Pad predict_times and exogenous regressors so as to have exact multiple of
# self.output_window_size values per example.
padding_size = (prediction_iterations * self.output_window_size -
num_predict_values)
predict_times = array_ops.pad(
predict_times, [[0, 0], [0, padding_size]])
exogenous_regressors = array_ops.pad(
exogenous_regressors, [[0, 0], [0, padding_size], [0, 0]])
state = features[PredictionFeatures.STATE_TUPLE]
(state_times, state_values, state_exogenous_regressors) = state
state_times = math_ops.cast(
ops.convert_to_tensor(state_times), dtypes.int32)
state_values = ops.convert_to_tensor(state_values, dtype=self.dtype)
state_exogenous_regressors = ops.convert_to_tensor(
state_exogenous_regressors, dtype=self.dtype)
initial_input_times = predict_times[:, :self.output_window_size]
initial_input_exogenous_regressors = (
exogenous_regressors[:, :self.output_window_size, :])
if self.input_window_size > 0:
initial_input_times = array_ops.concat(
[state_times[:, -self.input_window_size:], initial_input_times], 1)
values_size = array_ops.shape(state_values)[1]
times_size = array_ops.shape(state_times)[1]
with ops.control_dependencies([
check_ops.assert_greater_equal(values_size, self.input_window_size),
check_ops.assert_equal(values_size, times_size)
]):
initial_input_values = state_values[:, -self.input_window_size:, :]
initial_input_exogenous_regressors = array_ops.concat(
[state_exogenous_regressors[:, -self.input_window_size:, :],
initial_input_exogenous_regressors[
:, :self.output_window_size, :]],
axis=1)
else:
initial_input_values = 0
# Iterate over the predict_times, predicting self.output_window_size values
# in each iteration.
def _while_condition(iteration_number, *unused_args):
return math_ops.less(iteration_number, prediction_iterations)
def _while_body(iteration_number, input_times, input_values,
input_exogenous_regressors, mean_ta, covariance_ta):
"""Predict self.output_window_size values."""
prediction_ops = self.prediction_ops(
input_times, input_values, input_exogenous_regressors)
predicted_mean = prediction_ops["mean"]
predicted_covariance = prediction_ops["covariance"]
offset = self.output_window_size * gen_math_ops.minimum(
iteration_number + 1, prediction_iterations - 1)
if self.input_window_size > 0:
if self.output_window_size < self.input_window_size:
new_input_values = array_ops.concat(
[input_values[:, self.output_window_size:, :], predicted_mean], 1)
new_input_exogenous_regressors = array_ops.concat(
[input_exogenous_regressors[:, -self.input_window_size:, :],
exogenous_regressors[
:, offset:offset + self.output_window_size, :]],
axis=1)
new_input_times = array_ops.concat([
input_times[:, -self.input_window_size:],
#.........这里部分代码省略.........
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:101,代码来源:ar_model.py
示例20: update_confusion_matrix_variables
def update_confusion_matrix_variables(variables_to_update,
y_true,
y_pred,
thresholds,
top_k=None,
class_id=None,
sample_weight=None):
"""Returns op to update the given confusion matrix variables.
For every pair of values in y_true and y_pred:
true_positive: y_true == True and y_pred > thresholds
false_negatives: y_true == True and y_pred <= thresholds
true_negatives: y_true == False and y_pred <= thresholds
false_positive: y_true == False and y_pred > thresholds
The results will be weighted and added together. When multiple thresholds are
provided, we will repeat the same for every threshold.
For estimation of these metrics over a stream of data, the function creates an
`update_op` operation that updates the given variables.
If `sample_weight` is `None`, weights default to 1.
Use weights of 0 to mask values.
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are in
the range `[0, 1]`.
thresholds: A float value or a python list or tuple of float thresholds in
`[0, 1]`, or NEG_INF (used when top_k is set).
top_k: Optional int, indicates that the positive labels should be limited to
the top k predictions.
class_id: Optional int, limits the prediction and labels to the class
specified by this argument.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true` (i.e., all dimensions must
be either `1`, or the same as the corresponding `y_true` dimension).
Returns:
Update op.
Raises:
ValueError: If `y_pred` and `y_true` have mismatched shapes, or if
`sample_weight` is not `None` and its shape doesn't match `y_pred`, or if
`variables_to_update` contains invalid keys.
"""
if variables_to_update is None:
return
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
y_pred.shape.assert_is_compatible_with(y_true.shape)
if not any(
key for key in variables_to_update if key in list(ConfusionMatrix)):
raise ValueError(
'Please provide at least one valid confusion matrix '
'variable to update. Valid variable key options are: "{}". '
'Received: "{}"'.format(
list(ConfusionMatrix), variables_to_update.keys()))
invalid_keys = [
key for key in variables_to_update if key not in list(ConfusionMatrix)
]
if invalid_keys:
raise ValueError(
'Invalid keys: {}. Valid variable key options are: "{}"'.format(
invalid_keys, list(ConfusionMatrix)))
with ops.control_dependencies([
check_ops.assert_greater_equal(
y_pred,
math_ops.cast(0.0, dtype=y_pred.dtype),
message='predictions must be >= 0'),
check_ops.assert_less_equal(
y_pred,
math_ops.cast(1.0, dtype=y_pred.dtype),
message='predictions must be <= 1')
]):
y_pred, y_true, sample_weight = squeeze_or_expand_dimensions(
math_ops.cast(y_pred, dtype=dtypes.float32),
math_ops.cast(y_true, dtype=dtypes.bool), sample_weight)
if top_k is not None:
y_pred = _filter_top_k(y_pred, top_k)
if class_id is not None:
y_true = y_true[..., class_id]
y_pred = y_pred[..., class_id]
thresholds = to_list(thresholds)
num_thresholds = len(thresholds)
num_predictions = array_ops.size(y_pred)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(y_pred, [1, -1])
labels_2d = array_ops.reshape(
math_ops.cast(y_true, dtype=dtypes.bool), [1, -1])
#.........这里部分代码省略.........
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:101,代码来源:metrics_utils.py
|
请发表评论