本文整理汇总了Python中tensorflow.python.ops.check_ops.assert_rank_at_least函数的典型用法代码示例。如果您正苦于以下问题:Python assert_rank_at_least函数的具体用法?Python assert_rank_at_least怎么用?Python assert_rank_at_least使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_rank_at_least函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval()
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例2: _assert_valid_alpha
def _assert_valid_alpha(self, alpha, validate_args):
alpha = ops.convert_to_tensor(alpha, name="alpha")
if not validate_args:
return alpha
return control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(alpha, 1),
check_ops.assert_positive(alpha)], alpha)
开发者ID:ivankreso,项目名称:tensorflow,代码行数:7,代码来源:dirichlet_multinomial.py
示例3: test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例4: test_rank_one_tensor_raises_if_rank_too_small_static_rank
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "rank at least 2"):
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py
示例5: maybe_check_quadrature_param
def maybe_check_quadrature_param(param, name, validate_args):
"""Helper which checks validity of `loc` and `scale` init args."""
with ops.name_scope(name="check_" + name, values=[param]):
assertions = []
if param.shape.ndims is not None:
if param.shape.ndims == 0:
raise ValueError("Mixing params must be a (batch of) vector; "
"{}.rank={} is not at least one.".format(
name, param.shape.ndims))
elif validate_args:
assertions.append(check_ops.assert_rank_at_least(
param, 1,
message=("Mixing params must be a (batch of) vector; "
"{}.rank is not at least one.".format(
name))))
# TODO(jvdillon): Remove once we support k-mixtures.
if param.shape.with_rank_at_least(1)[-1] is not None:
if param.shape[-1].value != 1:
raise NotImplementedError("Currently only bimixtures are supported; "
"{}.shape[-1]={} is not 1.".format(
name, param.shape[-1].value))
elif validate_args:
assertions.append(check_ops.assert_equal(
array_ops.shape(param)[-1], 1,
message=("Currently only bimixtures are supported; "
"{}.shape[-1] is not 1.".format(name))))
if assertions:
return control_flow_ops.with_dependencies(assertions, param)
return param
开发者ID:bikong2,项目名称:tensorflow,代码行数:31,代码来源:vector_diffeomixture.py
示例6: _check_alpha
def _check_alpha(self, alpha):
alpha = ops.convert_to_tensor(alpha, name='alpha')
if not self.strict:
return alpha
return control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(alpha, 1),
check_ops.assert_positive(alpha)], alpha)
开发者ID:Brandon-Tai,项目名称:tensorflow,代码行数:7,代码来源:dirichlet_multinomial.py
示例7: test_rank_zero_tensor_raises_if_rank_too_small_static_rank
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank at least 1"):
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval()
开发者ID:1000sprites,项目名称:tensorflow,代码行数:8,代码来源:check_ops_test.py
示例8: test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 2
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
开发者ID:1000sprites,项目名称:tensorflow,代码行数:8,代码来源:check_ops_test.py
示例9: lbeta
def lbeta(x, name='lbeta'):
r"""Computes `ln(|Beta(x)|)`, reducing along the last dimension.
Given one-dimensional `z = [z_0,...,z_{K-1}]`, we define
```Beta(z) = \prod_j Gamma(z_j) / Gamma(\sum_j z_j)```
And for `n + 1` dimensional `x` with shape `[N1, ..., Nn, K]`, we define
`lbeta(x)[i1, ..., in] = Log(|Beta(x[i1, ..., in, :])|)`. In other words,
the last dimension is treated as the `z` vector.
Note that if `z = [u, v]`, then
`Beta(z) = int_0^1 t^{u-1} (1 - t)^{v-1} dt`, which defines the traditional
bivariate beta function.
Args:
x: A rank `n + 1` `Tensor` with type `float`, or `double`.
name: A name for the operation (optional).
Returns:
The logarithm of `|Beta(x)|` reducing along the last dimension.
Raises:
ValueError: If `x` is empty with rank one or less.
"""
with ops.op_scope([x], name):
x = ops.convert_to_tensor(x, name='x')
x = control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(x, 1)], x)
is_empty = math_ops.equal(0, array_ops.size(x))
def nonempty_lbeta():
last_index = array_ops.size(array_ops.shape(x)) - 1
log_prod_gamma_x = math_ops.reduce_sum(
math_ops.lgamma(x),
reduction_indices=last_index)
sum_x = math_ops.reduce_sum(x, reduction_indices=last_index)
log_gamma_sum_x = math_ops.lgamma(sum_x)
result = log_prod_gamma_x - log_gamma_sum_x
result.set_shape(x.get_shape()[:-1])
return result
def empty_lbeta():
# If x is empty, return version with one less dimension.
# Can only do this if rank >= 2.
assertion = check_ops.assert_rank_at_least(x, 2)
with ops.control_dependencies([assertion]):
return array_ops.squeeze(x, squeeze_dims=[0])
static_size = x.get_shape().num_elements()
if static_size is not None:
if static_size > 0:
return nonempty_lbeta()
else:
return empty_lbeta()
else:
return control_flow_ops.cond(is_empty, empty_lbeta, nonempty_lbeta)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:58,代码来源:special_math_ops.py
示例10: _forward
def _forward(self, x):
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(x, 2)
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(shape[-2], shape[-1])
x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
# For safety, explicitly zero-out the upper triangular part.
x = array_ops.matrix_band_part(x, -1, 0)
return math_ops.matmul(x, x, adjoint_b=True)
开发者ID:ebrevdo,项目名称:tensorflow,代码行数:9,代码来源:cholesky_outer_product.py
示例11: __init__
def __init__(self,
alpha,
validate_args=False,
allow_nan_stats=True,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
alpha: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm, k]` `m >= 0`. Defines this as a batch of `N1 x ... x Nm`
different `k` class Dirichlet distributions.
validate_args: `Boolean`, default `False`. Whether to assert valid values
for parameters `alpha` and `x` in `prob` and `log_prob`. If `False`,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of 2-class Dirichlet distributions,
# also known as a Beta distribution.
dist = Dirichlet([1.1, 2.0])
# Define a 2-batch of 3-class distributions.
dist = Dirichlet([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
```
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha]) as ns:
alpha = ops.convert_to_tensor(alpha, name="alpha")
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_rank_at_least(alpha, 1)
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._alpha_sum = math_ops.reduce_sum(alpha,
reduction_indices=[-1],
keep_dims=False)
super(Dirichlet, self).__init__(
dtype=self._alpha.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
parameters=parameters,
graph_parents=[self._alpha, self._alpha_sum],
name=ns)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:53,代码来源:dirichlet.py
示例12: __init__
def __init__(self,
alpha,
validate_args=True,
allow_nan_stats=False,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
alpha: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm, k]` `m >= 0`. Defines this as a batch of `N1 x ... x Nm`
different `k` class Dirichlet distributions.
validate_args: Whether to assert valid values for parameters `alpha` and
`x` in `prob` and `log_prob`. If `False`, correct behavior is not
guaranteed.
allow_nan_stats: Boolean, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of 2-class Dirichlet distributions,
# also known as a Beta distribution.
dist = Dirichlet([1.1, 2.0])
# Define a 2-batch of 3-class distributions.
dist = Dirichlet([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
```
"""
with ops.op_scope([alpha], name):
alpha = ops.convert_to_tensor(alpha, name="alpha_before_deps")
with ops.control_dependencies([
check_ops.assert_positive(alpha), check_ops.assert_rank_at_least(
alpha, 1)
] if validate_args else []):
alpha = array_ops.identity(alpha, name="alpha")
self._alpha = alpha
self._name = name
# Used for mean/mode/variance/entropy computations
self._alpha_0 = math_ops.reduce_sum(alpha,
reduction_indices=[-1],
keep_dims=False)
self._get_batch_shape = self._alpha_0.get_shape()
self._get_event_shape = self._alpha.get_shape().with_rank_at_least(1)[-1:]
self._validate_args = validate_args
self._allow_nan_stats = allow_nan_stats
开发者ID:10imaging,项目名称:tensorflow,代码行数:52,代码来源:dirichlet.py
示例13: _prob
def _prob(self, x):
if self.validate_args:
is_vector_check = check_ops.assert_rank_at_least(x, 1)
right_vec_space_check = check_ops.assert_equal(
self.event_shape_tensor(),
array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1),
message=
"Argument 'x' not defined in the same space R^k as this distribution")
with ops.control_dependencies([is_vector_check]):
with ops.control_dependencies([right_vec_space_check]):
x = array_ops.identity(x)
return math_ops.cast(
math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1),
dtype=self.dtype)
开发者ID:LUTAN,项目名称:tensorflow,代码行数:14,代码来源:deterministic.py
示例14: _maybe_assert_valid_concentration
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
check_ops.assert_rank_at_least(
concentration, 1,
message="Concentration parameter must have >=1 dimensions."),
check_ops.assert_less(
1, array_ops.shape(concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:15,代码来源:dirichlet_multinomial.py
示例15: _check_chol
def _check_chol(self, chol):
"""Verify that `chol` is proper."""
chol = ops.convert_to_tensor(chol, name='chol')
if not self.verify_pd:
return chol
shape = array_ops.shape(chol)
rank = array_ops.rank(chol)
is_matrix = check_ops.assert_rank_at_least(chol, 2)
is_square = check_ops.assert_equal(
array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))
deps = [is_matrix, is_square]
deps.append(check_ops.assert_positive(self._diag))
return control_flow_ops.with_dependencies(deps, chol)
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:17,代码来源:operator_pd_cholesky.py
示例16: _check_logits_final_dim
def _check_logits_final_dim(logits, expected_logits_dimension):
"""Checks that logits shape is [D0, D1, ... DN, logits_dimension]."""
with ops.name_scope(None, 'logits', (logits,)) as scope:
logits = math_ops.to_float(logits)
logits_shape = array_ops.shape(logits)
assert_rank = check_ops.assert_rank_at_least(
logits, 2, data=[logits_shape],
message='logits shape must be [D0, D1, ... DN, logits_dimension]')
with ops.control_dependencies([assert_rank]):
static_shape = logits.shape
if static_shape.ndims is not None and static_shape[-1] is not None:
if static_shape[-1] != expected_logits_dimension:
raise ValueError(
'logits shape must be [D0, D1, ... DN, logits_dimension], '
'got %s.' % (static_shape,))
return logits
assert_dimension = check_ops.assert_equal(
expected_logits_dimension, logits_shape[-1], data=[logits_shape],
message='logits shape must be [D0, D1, ... DN, logits_dimension]')
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(logits, name=scope)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:21,代码来源:head.py
示例17: _assertions
def _assertions(self, x):
if not self.validate_args:
return []
shape = array_ops.shape(x)
is_matrix = check_ops.assert_rank_at_least(
x, 2, message="Input must have rank at least 2.")
is_square = check_ops.assert_equal(
shape[-2], shape[-1], message="Input must be a square matrix.")
above_diagonal = array_ops.matrix_band_part(
array_ops.matrix_set_diag(
x, array_ops.zeros(shape[:-1], dtype=dtypes.float32)),
0, -1)
is_lower_triangular = check_ops.assert_equal(
above_diagonal, array_ops.zeros_like(above_diagonal),
message="Input must be lower triangular.")
# A lower triangular matrix is nonsingular iff all its diagonal entries are
# nonzero.
diag_part = array_ops.matrix_diag_part(x)
is_nonsingular = check_ops.assert_none_equal(
diag_part, array_ops.zeros_like(diag_part),
message="Input must have all diagonal entries nonzero.")
return [is_matrix, is_square, is_lower_triangular, is_nonsingular]
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:22,代码来源:matrix_inverse_tril.py
示例18: test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:6,代码来源:check_ops_test.py
示例19: _forward_log_det_jacobian
def _forward_log_det_jacobian(self, x):
# Let Y be a symmetric, positive definite matrix and write:
# Y = X X.T
# where X is lower-triangular.
#
# Observe that,
# dY[i,j]/dX[a,b]
# = d/dX[a,b] { X[i,:] X[j,:] }
# = sum_{d=1}^p { I[i=a] I[d=b] X[j,d] + I[j=a] I[d=b] X[i,d] }
#
# To compute the Jacobian dX/dY we must represent X,Y as vectors. Since Y is
# symmetric and X is lower-triangular, we need vectors of dimension:
# d = p (p + 1) / 2
# where X, Y are p x p matrices, p > 0. We use a row-major mapping, i.e.,
# k = { i (i + 1) / 2 + j i>=j
# { undef i<j
# and assume zero-based indexes. When k is undef, the element is dropped.
# Example:
# j k
# 0 1 2 3 /
# 0 [ 0 . . . ]
# i 1 [ 1 2 . . ]
# 2 [ 3 4 5 . ]
# 3 [ 6 7 8 9 ]
# Write vec[.] to indicate transforming a matrix to vector via k(i,j). (With
# slight abuse: k(i,j)=undef means the element is dropped.)
#
# We now show d vec[Y] / d vec[X] is lower triangular. Assuming both are
# defined, observe that k(i,j) < k(a,b) iff (1) i<a or (2) i=a and j<b.
# In both cases dvec[Y]/dvec[X]@[k(i,j),k(a,b)] = 0 since:
# (1) j<=i<a thus i,j!=a.
# (2) i=a>j thus i,j!=a.
#
# Since the Jacobian is lower-triangular, we need only compute the product
# of diagonal elements:
# d vec[Y] / d vec[X] @[k(i,j), k(i,j)]
# = X[j,j] + I[i=j] X[i,j]
# = 2 X[j,j].
# Since there is a 2 X[j,j] term for every lower-triangular element of X we
# conclude:
# |Jac(d vec[Y]/d vec[X])| = 2^p prod_{j=0}^{p-1} X[j,j]^{p-j}.
if self._static_event_ndims == 0:
if self.validate_args:
is_positive = check_ops.assert_positive(
x, message="All elements must be positive.")
x = control_flow_ops.with_dependencies([is_positive], x)
return np.log(2.) + math_ops.log(x)
diag = array_ops.matrix_diag_part(x)
# We now ensure diag is columnar. Eg, if `diag = [1, 2, 3]` then the output
# is `[[1], [2], [3]]` and if `diag = [[1, 2, 3], [4, 5, 6]]` then the
# output is unchanged.
diag = self._make_columnar(diag)
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(
x, 2, message="Input must be a (batch of) matrix.")
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(
shape[-2], shape[-1],
message="Input must be a (batch of) square matrix.")
# Assuming lower-triangular means we only need check diag>0.
is_positive_definite = check_ops.assert_positive(
diag, message="Input must be positive definite.")
x = control_flow_ops.with_dependencies(
[is_matrix, is_square, is_positive_definite], x)
# Create a vector equal to: [p, p-1, ..., 2, 1].
if x.get_shape().ndims is None or x.get_shape()[-1].value is None:
p_int = array_ops.shape(x)[-1]
p_float = math_ops.cast(p_int, dtype=x.dtype)
else:
p_int = x.get_shape()[-1].value
p_float = np.array(p_int, dtype=x.dtype.as_numpy_dtype)
exponents = math_ops.linspace(p_float, 1., p_int)
sum_weighted_log_diag = array_ops.squeeze(
math_ops.matmul(math_ops.log(diag),
exponents[..., array_ops.newaxis]),
squeeze_dims=-1)
fldj = p_float * np.log(2.) + sum_weighted_log_diag
return fldj
开发者ID:Immexxx,项目名称:tensorflow,代码行数:84,代码来源:cholesky_outer_product_impl.py
示例20: _check_dense_labels_match_logits_and_reshape
def _check_dense_labels_match_logits_and_reshape(
labels, logits, expected_labels_dimension):
"""Checks that labels shape matches logits and reshapes if needed.
Consider logits of shape [D0, D1, ... DN, logits_dimension]. Then labels
shape must be [D0, D1, ... DN, expected_labels_dimension].
If expected_labels_dimension=1, labels could be [D0, D1, ... DN] and this
method reshapes them to [D0, D1, ... DN, 1].
Args:
labels: labels Tensor.
logits: logits Tensor.
expected_labels_dimension: Integer.
Returns:
Validated and reshaped labels Tensor.
Raises:
ValueError: If labels is a SparseTensor.
ValueError: If labels shape is statically defined and fails validation.
OpError: If labels shape is not statically defined and fails validation.
"""
if labels is None:
raise ValueError(
'You must provide a labels Tensor. Given: None. '
'Suggested troubleshooting steps: Check that your data contain '
'your label feature. Check that your input_fn properly parses and '
'returns labels.')
with ops.name_scope(None, 'labels', (labels, logits)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
raise ValueError(
'SparseTensor labels are not supported. '
'labels must be a Tensor of shape [D0, D1, ..., DN, %s], '
'e.g. [batch_size, %s]. '
'Suggested Fix (1): Check the label feature in your data. '
'Each example must contain %s value(s). If not, your choice of label '
'was probably incorrect. '
'Suggested Fix (2): In your input_fn, use '
'tf.sparse_tensor_to_dense() to turn labels into a Tensor.'
'' % (expected_labels_dimension, expected_labels_dimension,
expected_labels_dimension))
if (labels.shape.ndims is not None and logits.shape.ndims is not None and
labels.shape.ndims == logits.shape.ndims - 1):
labels = array_ops.expand_dims(labels, -1)
labels_shape = array_ops.shape(labels)
logits_shape = array_ops.shape(logits)
err_msg = (
'labels shape must be [D0, D1, ... DN, {}]. '
'Suggested Fix: check your n_classes argument to the estimator '
'and/or the shape of your label.'.format(expected_labels_dimension))
assert_rank = check_ops.assert_rank_at_least(labels, 2, message=err_msg)
with ops.control_dependencies([assert_rank]):
static_shape = labels.shape
if static_shape.ndims is not None:
dim1 = static_shape[-1]
if (dim1 is not None) and (dim1 != expected_labels_dimension):
raise ValueError(
'Mismatched label shape. '
'Classifier configured with n_classes=%s. Received %s. '
'Suggested Fix: check your n_classes argument to the estimator '
'and/or the shape of your label.' %
(expected_labels_dimension, dim1))
expected_labels_shape = array_ops.concat(
[logits_shape[:-1], [expected_labels_dimension]], axis=0)
assert_dimension = check_ops.assert_equal(
expected_labels_shape, labels_shape, message=err_msg,
data=['expected_labels_shape: ', expected_labels_shape,
'labels_shape: ', labels_shape])
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(labels, name=scope)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:69,代码来源:head.py
注:本文中的tensorflow.python.ops.check_ops.assert_rank_at_least函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论