本文整理汇总了Python中tensorflow.python.ops.array_ops.rank函数的典型用法代码示例。如果您正苦于以下问题:Python rank函数的具体用法?Python rank怎么用?Python rank使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rank函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: rank
def rank(input, name=None): # pylint: disable=redefined-builtin
"""Returns the rank of a RaggedTensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, None, None]
t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]])
tf.rank(t) # 3
```
Args:
input: A `RaggedTensor`
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, 'RaggedRank', [input]) as name:
if not ragged_tensor.is_ragged(input):
return array_ops.rank(input, name)
return input.ragged_rank + array_ops.rank(input.flat_values)
开发者ID:aritratony,项目名称:tensorflow,代码行数:25,代码来源:ragged_array_ops.py
示例2: _check_shapes_dynamic
def _check_shapes_dynamic(self, operator, v, diag):
"""Return (v, diag) with Assert dependencies, which check shape."""
checks = []
with ops.op_scope([operator, v, diag], 'check_shapes'):
s_v = array_ops.shape(v)
r_op = operator.rank()
r_v = array_ops.rank(v)
if diag is not None:
s_d = array_ops.shape(diag)
r_d = array_ops.rank(diag)
# Check tensor rank.
checks.append(check_ops.assert_rank(v, r_op))
if diag is not None:
checks.append(check_ops.assert_rank(diag, r_op - 1))
# Check batch shape
checks.append(check_ops.assert_equal(
operator.batch_shape(), array_ops.slice(s_v, [0], [r_v - 2])))
if diag is not None:
checks.append(check_ops.assert_equal(
operator.batch_shape(), array_ops.slice(s_d, [0], [r_d - 1])))
# Check event shape
checks.append(check_ops.assert_equal(
operator.vector_space_dimension(), array_ops.gather(s_v, r_v - 2)))
if diag is not None:
checks.append(check_ops.assert_equal(
array_ops.gather(s_v, r_v - 1), array_ops.gather(s_d, r_d - 1)))
v = control_flow_ops.with_dependencies(checks, v)
if diag is not None:
diag = control_flow_ops.with_dependencies(checks, diag)
return v, diag
开发者ID:10imaging,项目名称:tensorflow,代码行数:34,代码来源:operator_pd_vdvt_update.py
示例3: _inverse_log_det_jacobian
def _inverse_log_det_jacobian(self, y, **kwargs):
ildj = constant_op.constant(
0., dtype=y.dtype.base_dtype, name="inverse_log_det_jacobian")
if not self.bijectors:
return ildj
event_ndims = _maybe_get_event_ndims_statically(
self.inverse_min_event_ndims)
if _use_static_shape(y, event_ndims):
event_shape = y.shape[y.shape.ndims - event_ndims:]
else:
event_shape = array_ops.shape(y)[array_ops.rank(y) - event_ndims:]
for b in self.bijectors:
ildj += b.inverse_log_det_jacobian(
y, event_ndims=event_ndims, **kwargs.get(b.name, {}))
if _use_static_shape(y, event_ndims):
event_shape = b.inverse_event_shape(event_shape)
event_ndims = _maybe_get_event_ndims_statically(event_shape.ndims)
else:
event_shape = b.inverse_event_shape_tensor(event_shape)
event_ndims = _maybe_get_event_ndims_statically(
array_ops.rank(event_shape))
y = b.inverse(y, **kwargs.get(b.name, {}))
return ildj
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:28,代码来源:chain.py
示例4: _check_mu
def _check_mu(self, mu):
"""Return `mu` after validity checks and possibly with assertations."""
mu = ops.convert_to_tensor(mu)
cov = self._cov
if mu.dtype != cov.dtype:
raise TypeError(
"mu and cov must have the same dtype. Found mu.dtype = %s, "
"cov.dtype = %s"
% (mu.dtype, cov.dtype))
if not self.strict:
return mu
else:
assert_compatible_shapes = control_flow_ops.group(
check_ops.assert_equal(
array_ops.rank(mu) + 1,
cov.rank(),
data=["mu should have rank 1 less than cov. Found: rank(mu) = ",
array_ops.rank(mu), " rank(cov) = ", cov.rank()],
),
check_ops.assert_equal(
array_ops.shape(mu),
cov.vector_shape(),
data=["mu.shape and cov.shape[:-1] should match. "
"Found: shape(mu) = "
, array_ops.shape(mu), " shape(cov) = ", cov.shape()],
),
)
return control_flow_ops.with_dependencies([assert_compatible_shapes], mu)
开发者ID:363158858,项目名称:tensorflow,代码行数:29,代码来源:mvn.py
示例5: _check_labels_and_scores
def _check_labels_and_scores(boolean_labels, scores, check_shape):
"""Check the rank of labels/scores, return tensor versions."""
with ops.op_scope([boolean_labels, scores], '_check_labels_and_scores'):
boolean_labels = ops.convert_to_tensor(boolean_labels,
name='boolean_labels')
scores = ops.convert_to_tensor(scores, name='scores')
if boolean_labels.dtype != dtypes.bool:
raise ValueError(
'Argument boolean_labels should have dtype bool. Found: %s',
boolean_labels.dtype)
if check_shape:
labels_rank_1 = logging_ops.Assert(
math_ops.equal(1, array_ops.rank(boolean_labels)),
['Argument boolean_labels should have rank 1. Found: ',
boolean_labels.name, array_ops.shape(boolean_labels)])
scores_rank_1 = logging_ops.Assert(
math_ops.equal(1, array_ops.rank(scores)),
['Argument scores should have rank 1. Found: ', scores.name,
array_ops.shape(scores)])
with ops.control_dependencies([labels_rank_1, scores_rank_1]):
return boolean_labels, scores
else:
return boolean_labels, scores
开发者ID:285219011,项目名称:hello-world,代码行数:27,代码来源:histogram_ops.py
示例6: _get_sparse_tensors
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs)
id_tensor = sparse_tensors.id_tensor
weight_tensor = sparse_tensors.weight_tensor
# Expands final dimension, so that embeddings are not combined during
# embedding lookup.
check_id_rank = check_ops.assert_equal(
array_ops.rank(id_tensor), 2,
data=[
'Column {} expected ID tensor of rank 2. '.format(self.name),
'id_tensor shape: ', array_ops.shape(id_tensor)])
with ops.control_dependencies([check_id_rank]):
id_tensor = sparse_ops.sparse_reshape(
id_tensor,
shape=array_ops.concat([id_tensor.dense_shape, [1]], axis=0))
if weight_tensor is not None:
check_weight_rank = check_ops.assert_equal(
array_ops.rank(weight_tensor), 2,
data=[
'Column {} expected weight tensor of rank 2.'.format(self.name),
'weight_tensor shape:', array_ops.shape(weight_tensor)])
with ops.control_dependencies([check_weight_rank]):
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor,
shape=array_ops.concat([weight_tensor.dense_shape, [1]], axis=0))
return fc._CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
开发者ID:DILASSS,项目名称:tensorflow,代码行数:27,代码来源:sequential_feature_column.py
示例7: same_dynamic_shape
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`Boolean` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
def all_shapes_equal():
return math_ops.reduce_all(math_ops.equal(
array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0),
array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
all_shapes_equal,
lambda: constant_op.constant(False))
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:27,代码来源:distribution_util.py
示例8: _forward_log_det_jacobian
def _forward_log_det_jacobian(self, x, **kwargs):
x = ops.convert_to_tensor(x, name="x")
fldj = constant_op.constant(
0., dtype=x.dtype, name="inverse_log_det_jacobian")
if not self.bijectors:
return fldj
event_ndims = _maybe_get_event_ndims_statically(
self.forward_min_event_ndims)
if _use_static_shape(x, event_ndims):
event_shape = x.shape[x.shape.ndims - event_ndims:]
else:
event_shape = array_ops.shape(x)[array_ops.rank(x) - event_ndims:]
for b in reversed(self.bijectors):
fldj += b.forward_log_det_jacobian(
x, event_ndims=event_ndims, **kwargs.get(b.name, {}))
if _use_static_shape(x, event_ndims):
event_shape = b.forward_event_shape(event_shape)
event_ndims = _maybe_get_event_ndims_statically(event_shape.ndims)
else:
event_shape = b.forward_event_shape_tensor(event_shape)
event_ndims = _maybe_get_event_ndims_statically(
array_ops.rank(event_shape))
x = b.forward(x, **kwargs.get(b.name, {}))
return fldj
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:31,代码来源:chain.py
示例9: check
def check(t):
target = array_ops.shape(tensor)[1:]
result = array_ops.broadcast_dynamic_shape(target, array_ops.shape(t))
# This rank check ensures that I don't get a wrong answer from the
# _shapes_ broadcasting against each other.
gt = check_ops.assert_greater(array_ops.rank(target), array_ops.rank(t))
eq = check_ops.assert_equal(target, result)
return gt, eq
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:8,代码来源:statistical_testing.py
示例10: sign_magnitude_positive_definite
def sign_magnitude_positive_definite(
raw, off_diagonal_scale=0., overall_scale=0.):
"""Constructs a positive definite matrix from an unconstrained input matrix.
We want to keep the whole matrix on a log scale, but also allow off-diagonal
elements to be negative, so the sign of off-diagonal elements is modeled
separately from their magnitude (using the lower and upper triangles
respectively). Specifically:
for i < j, we have:
output_cholesky[i, j] = raw[j, i] / (abs(raw[j, i]) + 1) *
exp((off_diagonal_scale + overall_scale + raw[i, j]) / 2)
output_cholesky[i, i] = exp((raw[i, i] + overall_scale) / 2)
output = output_cholesky^T * output_cholesky
where raw, off_diagonal_scale, and overall_scale are
un-constrained real-valued variables. The resulting values are stable
around zero due to the exponential (and the softsign keeps the function
smooth).
Args:
raw: A [..., M, M] Tensor.
off_diagonal_scale: A scalar or [...] shaped Tensor controlling the relative
scale of off-diagonal values in the output matrix.
overall_scale: A scalar or [...] shaped Tensor controlling the overall scale
of the output matrix.
Returns:
The `output` matrix described above, a [..., M, M] positive definite matrix.
"""
raw = ops.convert_to_tensor(raw)
diagonal = array_ops.matrix_diag_part(raw)
def _right_pad_with_ones(tensor, target_rank):
# Allow broadcasting even if overall_scale and off_diagonal_scale have batch
# dimensions
tensor = ops.convert_to_tensor(tensor, dtype=raw.dtype.base_dtype)
return array_ops.reshape(tensor,
array_ops.concat(
[
array_ops.shape(tensor), array_ops.ones(
[target_rank - array_ops.rank(tensor)],
dtype=target_rank.dtype)
],
axis=0))
# We divide the log values by 2 to compensate for the squaring that happens
# when transforming Cholesky factors into positive definite matrices.
sign_magnitude = (gen_math_ops.exp(
(raw + _right_pad_with_ones(off_diagonal_scale, array_ops.rank(raw)) +
_right_pad_with_ones(overall_scale, array_ops.rank(raw))) / 2.) *
nn.softsign(array_ops.matrix_transpose(raw)))
sign_magnitude.set_shape(raw.get_shape())
cholesky_factor = array_ops.matrix_set_diag(
input=array_ops.matrix_band_part(sign_magnitude, 0, -1),
diagonal=gen_math_ops.exp((diagonal + _right_pad_with_ones(
overall_scale, array_ops.rank(diagonal))) / 2.))
return math_ops.matmul(cholesky_factor, cholesky_factor, transpose_a=True)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:58,代码来源:math_utils.py
示例11: remove_squeezable_dimensions
def remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=0, name=None):
"""Squeeze last dim if ranks differ from expected by exactly 1.
In the common case where we expect shapes to match, `expected_rank_diff`
defaults to 0, and we squeeze the last dimension of the larger rank if they
differ by 1.
But, for example, if `labels` contains class IDs and `predictions` contains 1
probability per class, we expect `predictions` to have 1 more dimension than
`labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze
`labels` if `rank(predictions) - rank(labels) == 0`, and
`predictions` if `rank(predictions) - rank(labels) == 2`.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
name: Name of the op.
Returns:
Tuple of `labels` and `predictions`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[labels, predictions]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == expected_rank_diff + 1:
predictions = array_ops.squeeze(predictions, [-1])
elif rank_diff == expected_rank_diff - 1:
labels = array_ops.squeeze(labels, [-1])
return labels, predictions
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(expected_rank_diff + 1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(expected_rank_diff - 1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return labels, predictions
开发者ID:aritratony,项目名称:tensorflow,代码行数:58,代码来源:confusion_matrix.py
示例12: testRank
def testRank(self):
with self.test_scope():
self.assertEqual(
0, array_ops.rank(constant_op.constant(1.0)).numpy())
self.assertEqual(
1, array_ops.rank(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
2, array_ops.rank(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:9,代码来源:eager_test.py
示例13: _get_chol_and_x_compatible_shape
def _get_chol_and_x_compatible_shape(self, x):
"""Return self.chol and x, (possibly) broadcast to compatible shape."""
# x and chol are "compatible" if their shape matches except for the last two
# dimensions of chol are [k, k], and the last two of x are [k, 1].
# E.g. x.shape = [A, B, k, 1], and chol.shape = [A, B, k, k]
# This is required for the batch_triangular_solve, which does not broadcast.
# TODO(langmore) This broadcast replicates matrices unnecesarily! In the
# case where
# x.shape = [M1,...,Mr, N1,...,Nb, k], and chol.shape = [N1,...,Nb, k, k]
# (which is common if x was sampled), the front dimensions of x can be
# "flipped" to the end, making
# x_flipped.shape = [N1,...,Nb, k, M1*...*Mr],
# and this can be handled by the linear solvers. This is preferred, because
# it does not replicate the matrix, or create any new data.
# We assume x starts without the trailing singleton dimension, e.g.
# x.shape = [B, k].
chol = self._chol
with ops.op_scope([x] + self.inputs, 'get_chol_and_x_compatible_shape'):
# If we determine statically that shapes match, we're done.
if x.get_shape() == chol.get_shape()[:-1]:
x_expanded = array_ops.expand_dims(x, -1)
return chol, x_expanded
# Dynamic check if shapes match or not.
vector_shape = self.vector_shape() # Shape of chol minus last dim.
are_same_rank = math_ops.equal(
array_ops.rank(x), array_ops.rank(vector_shape))
def shapes_match_if_same_rank():
return math_ops.reduce_all(math_ops.equal(
array_ops.shape(x), vector_shape))
shapes_match = control_flow_ops.cond(are_same_rank,
shapes_match_if_same_rank,
lambda: ops.convert_to_tensor(False))
# Make tensors (never instantiated) holding the broadcast shape.
# matrix_broadcast_dummy is the shape we will broadcast chol to.
matrix_bcast_dummy = chol + array_ops.expand_dims(x, -1)
# vector_bcast_dummy is the shape we will bcast x to, before we expand it.
chol_minus_last_dim = math_ops.reduce_sum(chol, reduction_indices=[-1])
vector_bcast_dummy = x + chol_minus_last_dim
chol_bcast = chol + array_ops.zeros_like(matrix_bcast_dummy)
x_bcast = x + array_ops.zeros_like(vector_bcast_dummy)
chol_result = control_flow_ops.cond(shapes_match, lambda: chol,
lambda: chol_bcast)
chol_result.set_shape(matrix_bcast_dummy.get_shape())
x_result = control_flow_ops.cond(shapes_match, lambda: x, lambda: x_bcast)
x_result.set_shape(vector_bcast_dummy.get_shape())
x_expanded = array_ops.expand_dims(x_result, -1)
return chol_result, x_expanded
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:57,代码来源:operator_pd_cholesky.py
示例14: _reshape_helper
def _reshape_helper(self, x, event_shape_in, event_shape_out):
"""Reshape only the event_shape of an input `Tensor`."""
def _get_rank_from_shape(shape):
"""Computes rank from a shape `Tensor`, statically if possible."""
# Uses fact that rank is "shape of shape".
ndims = shape.shape.with_rank_at_least(1)[0].value
if ndims is not None:
return ndims, ndims
return None, array_ops.shape(shape)[0]
event_ndims_in_, event_ndims_in = _get_rank_from_shape(event_shape_in)
assertions = []
# Ensure x.event_shape is compatible with event_shape_in.
if x.shape.ndims is not None:
x_ndims_, x_ndims = [x.shape.ndims]*2
else:
x_ndims_, x_ndims = None, array_ops.rank(x)
if (event_ndims_in_ is not None
and x_ndims_ is not None
and x.shape.with_rank_at_least(event_ndims_in_)[
x_ndims_-event_ndims_in_:].is_fully_defined()):
x_event_shape_, x_event_shape = [ # pylint: disable=unbalanced-tuple-unpacking
np.int32(x.shape[x_ndims_-event_ndims_in_:])]*2
else:
x_event_shape_, x_event_shape = (
None, array_ops.shape(x)[x_ndims-event_ndims_in:])
event_shape_in_ = tensor_util.constant_value(event_shape_in)
if x_event_shape_ is not None and event_shape_in_ is not None:
if not np.equal(x_event_shape_, event_shape_in_).all():
raise ValueError(
"Input `event_shape` ({}) does not match `event_shape_in` ({}).".
format(x_event_shape_, event_shape_in_))
elif self.validate_args:
assertions.append(check_ops.assert_equal(
x_event_shape, event_shape_in,
message="Input `event_shape` does not match `event_shape_in`."))
if assertions:
x = control_flow_ops.with_dependencies(assertions, x)
# get the parts of shape(x) that will not change
sample_and_batch_shape = array_ops.shape(x)
ndims = (x.shape.ndims if x.shape.ndims is not None
else array_ops.rank(x))
sample_and_batch_shape = sample_and_batch_shape[
:(ndims - math_ops.abs(event_ndims_in))]
new_shape = array_ops.concat(
[sample_and_batch_shape, event_shape_out], axis=0)
return array_ops.reshape(x, new_shape)
开发者ID:SylChan,项目名称:tensorflow,代码行数:57,代码来源:reshape_impl.py
示例15: testDenseShape
def testDenseShape(self):
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t_value)))
self.assertEqual(4, self.evaluate(array_ops.size(t_value)))
self.assertEqual(2, self.evaluate(array_ops.rank(t_value)))
t = constant_op.constant(t_value)
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t)))
self.assertEqual(4, self.evaluate(array_ops.size(t)))
self.assertEqual(2, self.evaluate(array_ops.rank(t)))
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:10,代码来源:array_ops_test.py
示例16: testSparseShape
def testSparseShape(self):
sp_value = sparse_tensor.SparseTensorValue(
indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp_value)))
self.assertEqual(4, self.evaluate(array_ops.size(sp_value)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp_value)))
sp = sparse_tensor.SparseTensor.from_value(sp_value)
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp)))
self.assertEqual(4, self.evaluate(array_ops.size(sp)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp)))
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:11,代码来源:array_ops_test.py
示例17: testDenseShape
def testDenseShape(self):
with self.test_session():
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), array_ops.shape(t_value).eval())
self.assertEqual(4, array_ops.size(t_value).eval())
self.assertEqual(2, array_ops.rank(t_value).eval())
t = constant_op.constant(t_value)
self.assertAllEqual((2, 2), array_ops.shape(t).eval())
self.assertEqual(4, array_ops.size(t).eval())
self.assertEqual(2, array_ops.rank(t).eval())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:11,代码来源:array_ops_test.py
示例18: testSparseShape
def testSparseShape(self):
with self.test_session():
sp_value = sparse_tensor.SparseTensorValue(
indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
self.assertAllEqual((2, 2), array_ops.shape(sp_value).eval())
self.assertEqual(4, array_ops.size(sp_value).eval())
self.assertEqual(2, array_ops.rank(sp_value).eval())
sp = sparse_tensor.SparseTensor.from_value(sp_value)
self.assertAllEqual((2, 2), array_ops.shape(sp).eval())
self.assertEqual(4, array_ops.size(sp).eval())
self.assertEqual(2, array_ops.rank(sp).eval())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:12,代码来源:array_ops_test.py
示例19: run_inception
def run_inception(images,
graph_def=None,
default_graph_def_fn=_default_graph_def_fn,
image_size=INCEPTION_DEFAULT_IMAGE_SIZE,
input_tensor=INCEPTION_INPUT,
output_tensor=INCEPTION_OUTPUT):
"""Run images through a pretrained Inception classifier.
Args:
images: Input tensors. Must be [batch, height, width, channels]. Input shape
and values must be in [-1, 1], which can be achieved using
`preprocess_image`.
graph_def: A GraphDef proto of a pretrained Inception graph. If `None`,
call `default_graph_def_fn` to get GraphDef.
default_graph_def_fn: A function that returns a GraphDef. Used if
`graph_def` is `None. By default, returns a pretrained InceptionV3 graph.
image_size: Required image width and height. See unit tests for the default
values.
input_tensor: Name of input Tensor.
output_tensor: Name or list of output Tensors. This function will compute
activations at the specified layer. Examples include INCEPTION_V3_OUTPUT
and INCEPTION_V3_FINAL_POOL which would result in this function computing
the final logits or the penultimate pooling layer.
Returns:
Tensor or Tensors corresponding to computed `output_tensor`.
Raises:
ValueError: If images are not the correct size.
ValueError: If neither `graph_def` nor `default_graph_def_fn` are provided.
"""
images = _validate_images(images, image_size)
if graph_def is None:
if default_graph_def_fn is None:
raise ValueError('If `graph_def` is `None`, must provide '
'`default_graph_def_fn`.')
graph_def = default_graph_def_fn()
activations = run_image_classifier(images, graph_def, input_tensor,
output_tensor)
if isinstance(activations, list):
for i, activation in enumerate(activations):
if array_ops.rank(activation) != 2:
activations[i] = layers.flatten(activation)
else:
if array_ops.rank(activations) != 2:
activations = layers.flatten(activations)
return activations
开发者ID:changchunli,项目名称:compare_gan,代码行数:50,代码来源:classifier_metrics_impl.py
示例20: _compute_energy_change
def _compute_energy_change(current_target_log_prob,
current_momentums,
proposed_target_log_prob,
proposed_momentums,
independent_chain_ndims,
name=None):
"""Helper to `kernel` which computes the energy change."""
with ops.name_scope(
name, "compute_energy_change",
([current_target_log_prob, proposed_target_log_prob,
independent_chain_ndims] +
current_momentums + proposed_momentums)):
# Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy
# since they're a mouthful and lets us inline more.
lk0, lk1 = [], []
for current_momentum, proposed_momentum in zip(current_momentums,
proposed_momentums):
axis = math_ops.range(independent_chain_ndims,
array_ops.rank(current_momentum))
lk0.append(_log_sum_sq(current_momentum, axis))
lk1.append(_log_sum_sq(proposed_momentum, axis))
lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),
axis=-1)
lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),
axis=-1)
lp0 = -current_target_log_prob # log_potential
lp1 = -proposed_target_log_prob # proposed_log_potential
x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],
axis=-1)
# The sum is NaN if any element is NaN or we see both +Inf and -Inf.
# Thus we will replace such rows with infinite energy change which implies
# rejection. Recall that float-comparisons with NaN are always False.
is_sum_determinate = (
math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &
math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))
is_sum_determinate = array_ops.tile(
is_sum_determinate[..., array_ops.newaxis],
multiples=array_ops.concat([
array_ops.ones(array_ops.rank(is_sum_determinate),
dtype=dtypes.int32),
[4],
], axis=0))
x = array_ops.where(is_sum_determinate,
x,
array_ops.fill(array_ops.shape(x),
value=x.dtype.as_numpy_dtype(np.inf)))
return math_ops.reduce_sum(x, axis=-1)
开发者ID:Yashar78,项目名称:tensorflow,代码行数:50,代码来源:hmc_impl.py
注:本文中的tensorflow.python.ops.array_ops.rank函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论