本文整理汇总了Python中tensorflow.python.ops.gradient_checker.compute_gradient函数的典型用法代码示例。如果您正苦于以下问题:Python compute_gradient函数的具体用法?Python compute_gradient怎么用?Python compute_gradient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了compute_gradient函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _compareGradientY
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, ys, outf, zs, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
开发者ID:HughKu,项目名称:tensorflow,代码行数:29,代码来源:cwise_ops_binary_test.py
示例2: testGradientMatchesSegmentSum
def testGradientMatchesSegmentSum(self):
# Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
# and compare the outputs, which should be identical.
# NB: for this test to work, indices must be valid for SegmentSum, namely
# it must be sorted, the indices must be contiguous, and num_segments
# must be max(indices) + 1.
indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
n = len(indices)
num_cols = 2
shape = [n, num_cols]
num_segments = max(indices) + 1
for dtype in self.differentiable_dtypes:
with self.cached_session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
# Results from UnsortedSegmentSum
unsorted_s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
unsorted_jacob_t, unsorted_jacob_n = (
gradient_checker.compute_gradient(tf_x, shape, unsorted_s,
[num_segments, num_cols],
x_init_value=np_x, delta=1))
# Results from SegmentSum
sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
sorted_s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t)
self.assertAllClose(unsorted_jacob_n, sorted_jacob_n)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:32,代码来源:segment_reduction_ops_test.py
示例3: _compareCpu
def _compareCpu(self, x, p, conjugate=False):
np_ans = self._np_transpose(x, p)
if conjugate:
np_ans = np.conj(np_ans)
with self.test_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p, conjugate=conjugate)
tf_ans = y.eval()
self.assertShapeEqual(np_ans, y)
self.assertAllEqual(np_ans, tf_ans)
jacob_t = None
# Gradient check on CPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype in [np.float32, np.complex64]:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype in [np.float64, np.complex128]:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
开发者ID:HughKu,项目名称:tensorflow,代码行数:25,代码来源:transpose_op_test.py
示例4: _compareGpu
def _compareGpu(self, x, p, conjugate=False):
if p is None:
rank = x.ndim
perm = (rank - 1) - np.arange(rank)
else:
perm = p
np_ans = self._np_transpose(x, perm)
if conjugate:
np_ans = np.conj(np_ans)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p, conjugate=conjugate)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on GPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:31,代码来源:transpose_op_test.py
示例5: _compute_gradient_error_float16
def _compute_gradient_error_float16(self, x, x32, x_shape, y, y32, y_shape):
"""Computes the gradient error for float16 inputs and/or outputs.
This returns the same value as gradient_checker.compute_gradient_error. The
difference is that gradient_checker.compute_gradient_error does not
numerically compute the gradients in a numerically stable way for float16
tensors. To fix this, this function requires float32 versions of x and y to
numerically compute the gradients, to compare with the float16 symbolically
computed gradients.
Args:
x: The input tensor.
x32: A float32 version of x.
x_shape: The shape of x.
y: The output tensor.
y32: A float32 version of y. Must be calculated based on x32, not x.
y_shape: The shape of y.
Returns:
The maximum error in between the two Jacobians, as in
gradient_checker.compute_gradient_error.
"""
x_init_val = np.random.random_sample(x_shape).astype(np.float16)
x32_init_val = x_init_val.astype(np.float32)
# TODO(reedwm): Do not perform the unnecessary computations in
# compute_gradient, since they double the computation time of this function.
theoretical_grad, _ = gradient_checker.compute_gradient(
x, x_shape, y, y_shape, delta=1e-3, x_init_value=x_init_val)
_, numerical_grad = gradient_checker.compute_gradient(
x32, x_shape, y32, y_shape, delta=1e-3, x_init_value=x32_init_val)
return np.fabs(theoretical_grad - numerical_grad).max()
开发者ID:SylChan,项目名称:tensorflow,代码行数:32,代码来源:nn_fused_batchnorm_test.py
示例6: testEmptyFails
def testEmptyFails(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = array_ops.placeholder(dtypes.float32)
with g.gradient_override_map({"Identity": "BadGrad"}):
y = array_ops.identity(x)
bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
with self.assertRaisesRegexp(ValueError, bad):
gradient_checker.compute_gradient(x, (0, 3), y, (0, 3))
with self.assertRaisesRegexp(ValueError, bad):
gradient_checker.compute_gradient_error(x, (0, 3), y, (0, 3))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:11,代码来源:gradient_checker_test.py
示例7: Test
def Test(self):
with self.session(use_gpu=True):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
if functor_.__name__ == 'matrix_square_root':
# Square the input matrix to ensure that its matrix square root exists
a = math_ops.matmul(a, a)
a_np = a.eval()
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else 0.05
theoretical, numerical = gradient_checker.compute_gradient(
a,
a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=a_np,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:28,代码来源:linalg_grad_test.py
示例8: Test
def Test(self):
if not use_static_shape_ or a_np_.dtype in (np.int32, np.float16):
self.skipTest("Skipping infeasible gradient test.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
epsilon = np.finfo(a_np_.dtype).eps
delta = epsilon**(1.0 / 3.0)
tol = 20 * delta
with self.test_session(use_gpu=True):
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
for x, x_init in [a, effective_a_np], [b, effective_b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
x,
x_init.shape,
res, [a_np_.shape[0], b_np_.shape[1]],
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:26,代码来源:matmul_op_test.py
示例9: Test
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
if dtype_ == np.float32:
tol = 1e-2
else:
tol = 1e-7
with self.test_session():
tf_a = constant_op.constant(a)
tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
for b in tf_e, tf_v:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
x_init += np.conj(x_init.T)
x_init = np.tile(x_init, batch_shape + (1, 1))
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:33,代码来源:self_adjoint_eig_op_test.py
示例10: testGradientWithEmptySegmentsAtEnd
def testGradientWithEmptySegmentsAtEnd(self):
shape = [10, 4]
num_segments = 5
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]:
with self.cached_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [5, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:25,代码来源:segment_reduction_ops_test.py
示例11: Test
def Test(self):
# TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable
# GPU test for matrix_solve.
use_gpu = False if functor_ == linalg_ops.matrix_solve else True
with self.test_session(use_gpu=use_gpu):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
b = constant_op.constant(b_np)
c = functor_(a, b, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.04
# The gradients for a and b may be of very different magnitudes,
# so to not get spurious failures we test them separately.
for factor, factor_init in [a, a_np], [b, b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
factor,
factor.get_shape().as_list(),
c,
c.get_shape().as_list(),
x_init_value=factor_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:35,代码来源:linalg_grad_test.py
示例12: Test
def Test(self):
np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
if dtype_ in [np.float32, np.complex64]:
tol = 3e-2
else:
tol = 1e-6
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
tf_b = linalg_ops.qr(tf_a, full_matrices=full_matrices_)
for b in tf_b:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:30,代码来源:qr_op_test.py
示例13: _compareMulGradient
def _compareMulGradient(self, data):
# data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],
# data[:, 2], data[:, 3] are real parts of x, imaginary parts of
# x, real parts of y and imaginary parts of y.
with self.cached_session():
inp = ops.convert_to_tensor(data)
xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1)
def vec(x): # Reshape to a vector
return array_ops.reshape(x, [-1])
xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
def cplx(r, i): # Combine to a complex vector
return math_ops.complex(r, i)
x, y = cplx(xr, xi), cplx(yr, yi)
# z is x times y in complex plane.
z = x * y
# Defines the loss function as the sum of all coefficients of z.
loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z))
epsilon = 0.005
jacob_t, jacob_n = gradient_checker.compute_gradient(
inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:25,代码来源:cwise_ops_test.py
示例14: test_high_dim_filter_grad
def test_high_dim_filter_grad(self):
x_shape = [5, 10, 10]
# Test inputs: unaries and RGB values
unary_np = np.random.randn(*x_shape).astype(np.float32)
rgb_np = np.random.randint(low=0, high=256, size=x_shape).astype(np.float32)
with self.test_session():
unary_tf = constant_op.constant(unary_np)
rgb_tf = constant_op.constant(rgb_np)
y_tf = custom_module.high_dim_filter(unary_tf, rgb_tf,
bilateral=True,
theta_alpha=1000.,
theta_beta=1000.,
theta_gamma=1000.)
out = gradient_checker.compute_gradient([unary_tf, rgb_tf], [x_shape, x_shape],
y_tf, x_shape)
# We only need to compare gradients w.r.t. unaries
computed = out[0][0].flatten()
estimated = out[0][1].flatten()
mask = (computed != 0)
computed = computed[mask]
estimated = estimated[mask]
difference = computed - estimated
measure1 = np.mean(difference) / np.mean(computed)
measure2 = np.max(difference) / np.max(computed)
print('Gradient check: measure1 = {:.6f}, measure2 = {:.6f}'.format(measure1, measure2))
self.assertLess(measure1, 1e-3, 'Errors found in the gradient computation.')
self.assertLess(measure2, 2e-2, 'Errors found in the gradient computation.')
print('Gradient check: success!')
开发者ID:lhch1994,项目名称:crfasrnn_keras,代码行数:35,代码来源:test_gradients.py
示例15: _compareCpu
def _compareCpu(self, x, np_func, tf_func, grad_rtol=None, grad_atol=None):
if grad_rtol is None:
grad_rtol = _default_tolerance(x.dtype)
if grad_atol is None:
grad_atol = _default_tolerance(x.dtype)
np_ans = np_func(x)
with self.test_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
if x.dtype in (np.float32, np.float64,
dtypes_lib.bfloat16.as_numpy_dtype):
y = 1.1 * tf_func(inx)
np_ans *= 1.1
else:
y = tf_func(inx)
tf_cpu = y.eval()
self.assertShapeEqual(np_ans, y)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)
elif x.dtype == dtypes_lib.bfloat16.as_numpy_dtype:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-2, atol=1e-2)
else:
self.assertAllClose(np_ans, tf_cpu)
if x.dtype in (np.complex64, np.complex128) and tf_func == math_ops.sign:
return # Return early
if x.dtype == np.float16:
s = list(np.shape(x))
jacob_t, _ = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x)
xf = x.astype(np.float)
inxf = ops.convert_to_tensor(xf)
yf = tf_func(inxf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, yf, s, x_init_value=xf, delta=1e-2)
jacob_n = jacob_n.astype(np.float16)
self.assertAllClose(jacob_t, jacob_n, rtol=grad_rtol, atol=grad_atol)
elif x.dtype in (np.float32, np.complex64):
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x, delta=1e-3)
self.assertAllClose(jacob_t, jacob_n, rtol=grad_rtol, atol=grad_atol)
elif x.dtype in (np.float64, np.complex128):
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x, delta=1e-5)
self.assertAllClose(jacob_t, jacob_n, rtol=grad_rtol, atol=grad_atol)
开发者ID:HughKu,项目名称:tensorflow,代码行数:47,代码来源:cwise_ops_unary_test.py
示例16: _compareGradient
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(1, 9).reshape(shape).astype(np.float64)
with self.cached_session(use_gpu=True):
t = ops.convert_to_tensor(x)
result = math_ops.cumprod(t, axis, exclusive, reverse)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, shape, result, shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:8,代码来源:scan_ops_test.py
示例17: testEmptySucceeds
def testEmptySucceeds(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32)
y = array_ops.identity(x)
for grad in gradient_checker.compute_gradient(x, (0, 3), y, (0, 3)):
self.assertEqual(grad.shape, (0, 0))
error = gradient_checker.compute_gradient_error(x, (0, 3), y, (0, 3))
self.assertEqual(error, 0)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:8,代码来源:gradient_checker_test.py
示例18: testGradient4
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:9,代码来源:reduction_ops_test.py
示例19: _compareBroadcastGradient
def _compareBroadcastGradient(self, x):
x_ = ops.convert_to_tensor(x)
epsilon = 1e-3
with self.cached_session():
for args in [(x_, 0.), (0., x_)]:
z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))
jacob_t, jacob_n = gradient_checker.compute_gradient(
x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:9,代码来源:cwise_ops_test.py
示例20: _test_grad_accuracy
def _test_grad_accuracy(self, dtype, grid_spec, error_spec):
raw_grid = _make_grid(dtype, grid_spec)
grid = ops.convert_to_tensor(raw_grid)
with self.cached_session():
fn = sm.log_ndtr if self._use_log else sm.ndtr
# If there are N points in the grid,
# grad_eval.shape = (N, N), with grad_eval[i, j] the partial derivative of
# the ith output point w.r.t. the jth grid point. We only expect the
# diagonal to be nonzero.
# TODO(b/31131137): Replace tf.compat.v1.test.compute_gradient with our
# own custom gradient evaluation to ensure we correctly handle small
# function delta.
grad_eval, _ = gradient_checker.compute_gradient(grid, grid_spec.shape,
fn(grid),
grid_spec.shape)
grad_eval = np.diag(grad_eval)
# Check for NaN separately in order to get informative failures.
self.assert_all_false(np.isnan(grad_eval))
self.assert_all_true(grad_eval > 0.)
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(grad_eval))
# Do the same checks but explicitly compute the gradient.
# (We did this because we're not sure if we trust
# tf.test.compute_gradient.)
grad_eval = gradients_impl.gradients(fn(grid), grid)[0].eval()
self.assert_all_false(np.isnan(grad_eval))
if self._use_log:
g = np.reshape(grad_eval, [-1])
half = np.ceil(len(g) / 2)
self.assert_all_true(g[:int(half)] > 0.)
self.assert_all_true(g[int(half):] >= 0.)
else:
# The ndtr gradient will only be non-zero in the range [-14, 14] for
# float32 and [-38, 38] for float64.
self.assert_all_true(grad_eval >= 0.)
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(grad_eval))
# Versus scipy.
if not (special and stats):
return
expected = stats.norm.pdf(raw_grid)
if self._use_log:
expected /= special.ndtr(raw_grid)
expected[np.isnan(expected)] = 0.
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
grad_eval.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
开发者ID:aritratony,项目名称:tensorflow,代码行数:56,代码来源:special_math_test.py
注:本文中的tensorflow.python.ops.gradient_checker.compute_gradient函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论