本文整理汇总了Python中tensorflow.python.ops.math_ops.complex函数的典型用法代码示例。如果您正苦于以下问题:Python complex函数的具体用法?Python complex怎么用?Python complex使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了complex函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testStripDefaultAttrsInconsistentConsumerDefaults
def testStripDefaultAttrsInconsistentConsumerDefaults(self):
if ops._USE_C_API: return # TODO(skyewm): get this working
export_dir = self._get_export_dir(
"test_strip_default_attrs_no_consumer_defaults")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with two float32 variables and a Complex Op composing them
# with strip_default_attrs enabled. This must remove the following
# defaults for the "Complex" Op:
# o "T" : float32. (input type)
# o "Tout" : complex64. (output type)
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], strip_default_attrs=True)
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Update the Op registry to remove defaults for all attrs("T", "Tout") from
# the "Complex" OpDef.
complex_op_def = op_def_registry.get_registered_ops()["Complex"]
original_complex_op_def = op_def_pb2.OpDef()
original_complex_op_def.CopyFrom(complex_op_def)
for attr_def in complex_op_def.attr:
attr_def.ClearField("default_value")
# Loading the SavedModel via the loader must fail because the SavedModel
# does not have any attr values for the "Complex" node and the current
# op registry does not have have any default values for the "Complex" op.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
ValueError,
"Expected one attr with name .*T(out)?.* in name: \"complex\".*"):
loader.load(sess, ["foo"], export_dir)
# Update the Op registry to change the defaults for attr "Tout"
# (complex64 -> complex128).
complex_op_def.CopyFrom(original_complex_op_def)
for attr_def in complex_op_def.attr:
if attr_def.name == "Tout":
attr_def.default_value.type = types_pb2.DT_COMPLEX128
# Loading the SavedModel via the loader must set "Tout" attr_value for the
# "Complex" node according to the latest defaults (complex128). This is
# expected to fail the model import as there is no OpKernel registered to
# handle attrs "T" (float32) and "Tout" (complex128).
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
".*No OpKernel was registered to support Op \'Complex\' with these "
"attrs..*"):
loader.load(sess, ["foo"], export_dir)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:57,代码来源:saved_model_test.py
示例2: _AngleGrad
def _AngleGrad(op, grad):
"""Returns -grad / (Im(x) + iRe(x))"""
x = op.inputs[0]
with ops.control_dependencies([grad]):
re = math_ops.real(x)
im = math_ops.imag(x)
z = math_ops.reciprocal(math_ops.complex(im, re))
zero = constant_op.constant(0, dtype=grad.dtype)
complex_grad = math_ops.complex(grad, zero)
return -complex_grad * z
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:10,代码来源:math_grad.py
示例3: testDefaultAttrStripping
def testDefaultAttrStripping(self):
"""Verifies that default attributes are stripped from a graph def."""
# Complex Op has 2 attributes with defaults:
# o "T" : float32.
# o "Tout" : complex64.
# When inputs to the Complex Op are float32 instances, "T" maps to float32
# and "Tout" maps to complex64. Since these attr values map to their
# defaults, they must be stripped unless stripping of default attrs is
# disabled.
with self.cached_session():
real_num = constant_op.constant(1.0, dtype=dtypes.float32, name="real")
imag_num = constant_op.constant(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
# strip_default_attrs is enabled.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
# strip_default_attrs is disabled.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=False)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
self.assertFalse(meta_graph_def.meta_info_def.stripped_default_attrs)
# When inputs to the Complex Op are float64 instances, "T" maps to float64
# and "Tout" maps to complex128. Since these attr values don't map to their
# defaults, they must not be stripped.
with self.session(graph=ops.Graph()):
real_num = constant_op.constant(1.0, dtype=dtypes.float64, name="real")
imag_num = constant_op.constant(2.0, dtype=dtypes.float64, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertEqual(node_def.attr["T"].type, dtypes.float64)
self.assertEqual(node_def.attr["Tout"].type, dtypes.complex128)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
开发者ID:aeverall,项目名称:tensorflow,代码行数:51,代码来源:meta_graph_test.py
示例4: test_assert_non_singular_does_not_raise_for_complex_nonsingular
def test_assert_non_singular_does_not_raise_for_complex_nonsingular(self):
with self.test_session():
x = [1., 0.]
y = [0., 1.]
diag = math_ops.complex(x, y)
# Should not raise.
linalg.LinearOperatorDiag(diag).assert_non_singular().run()
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_diag_test.py
示例5: test_assert_positive_definite_does_not_raise_if_pd_and_complex
def test_assert_positive_definite_does_not_raise_if_pd_and_complex(self):
with self.test_session():
x = [1., 2.]
y = [1., 0.]
diag = math_ops.complex(x, y) # Re[diag] > 0.
# Should not fail
linalg.LinearOperatorDiag(diag).assert_positive_definite().run()
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_diag_test.py
示例6: _test_unary_cwise_ops
def _test_unary_cwise_ops(self, ops, is_complex):
for op in ops:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
if is_complex:
y = random_ops.random_uniform([3, 5])
g.watch(y)
x = math_ops.complex(x, y)
# pylint: disable=cell-var-from-loop
output_dtypes = []
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y1 = op(x1)
outputs = [op(x), y1]
if y1.dtype == dtypes.float32:
loss = math_ops.reduce_sum(y1 * y1)
else:
loss = None
if loss is not None:
grad = g.gradient(loss, x1)
if grad is not None:
outputs.append(grad)
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
开发者ID:aritratony,项目名称:tensorflow,代码行数:33,代码来源:math_test.py
示例7: random_normal
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
"""Tensor with (possibly complex) Gaussian entries.
Samples are distributed like
```
N(mean, stddev^2), if dtype is real,
X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_normal"):
samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 1234
more_samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
开发者ID:Joetz,项目名称:tensorflow,代码行数:32,代码来源:linear_operator_test_util.py
示例8: test_complex_tensor_with_imag_zero_doesnt_raise
def test_complex_tensor_with_imag_zero_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([0., 0, 0])
z = math_ops.complex(x, y)
with self.cached_session():
# Should not raise.
linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_util_test.py
示例9: test_complex_tensor_with_nonzero_imag_raises
def test_complex_tensor_with_nonzero_imag_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.cached_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_util_test.py
示例10: _trace
def _trace(self):
# The diagonal of the [[nested] block] circulant operator is the mean of
# the spectrum.
# Proof: For the [0,...,0] element, this follows from the IDFT formula.
# Then the result follows since all diagonal elements are the same.
# Therefore, the trace is the sum of the spectrum.
# Get shape of diag along with the axis over which to reduce the spectrum.
# We will reduce the spectrum over all block indices.
if self.spectrum.get_shape().is_fully_defined():
spec_rank = self.spectrum.get_shape().ndims
axis = np.arange(spec_rank - self.block_depth, spec_rank, dtype=np.int32)
else:
spec_rank = array_ops.rank(self.spectrum)
axis = math_ops.range(spec_rank - self.block_depth, spec_rank)
# Real diag part "re_d".
# Suppose spectrum.shape = [B1,...,Bb, N1, N2]
# self.shape = [B1,...,Bb, N, N], with N1 * N2 = N.
# re_d_value.shape = [B1,...,Bb]
re_d_value = math_ops.reduce_sum(math_ops.real(self.spectrum), axis=axis)
if not self.dtype.is_complex:
return math_ops.cast(re_d_value, self.dtype)
# Imaginary part, "im_d".
if self.is_self_adjoint:
im_d_value = 0.
else:
im_d_value = math_ops.reduce_sum(math_ops.imag(self.spectrum), axis=axis)
return math_ops.cast(math_ops.complex(re_d_value, im_d_value), self.dtype)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:33,代码来源:linear_operator_circulant.py
示例11: test_zero_complex_tensor_raises
def test_zero_complex_tensor_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.test_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123").run()
开发者ID:LugarkPirog,项目名称:tensorflow,代码行数:8,代码来源:linear_operator_util_test.py
示例12: test_assert_self_adjoint_does_not_raise_for_diag_with_zero_imag
def test_assert_self_adjoint_does_not_raise_for_diag_with_zero_imag(self):
with self.test_session():
x = [1., 0.]
y = [0., 0.]
diag = math_ops.complex(x, y)
operator = linalg.LinearOperatorDiag(diag)
# Should not raise
operator.assert_self_adjoint().run()
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:8,代码来源:linear_operator_diag_test.py
示例13: test_assert_self_adjoint_raises_if_diag_has_complex_part
def test_assert_self_adjoint_raises_if_diag_has_complex_part(self):
with self.test_session():
x = [1., 0.]
y = [0., 1.]
diag = math_ops.complex(x, y)
operator = linalg.LinearOperatorDiag(diag)
with self.assertRaisesOpError("imaginary.*not self-adjoint"):
operator.assert_self_adjoint().run()
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:8,代码来源:linear_operator_diag_test.py
示例14: test_nonzero_complex_tensor_doesnt_raise
def test_nonzero_complex_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.cached_session():
# Should not raise.
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123").run()
开发者ID:AnishShah,项目名称:tensorflow,代码行数:8,代码来源:linear_operator_util_test.py
示例15: dct
def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin
"""Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.
Currently only Type II is supported. Implemented using a length `2N` padded
@{tf.spectral.rfft}, as described here: https://dsp.stackexchange.com/a/10606
@compatibility(scipy)
Equivalent to scipy.fftpack.dct for the Type-II DCT.
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
@end_compatibility
Args:
input: A `[..., samples]` `float32` `Tensor` containing the signals to
take the DCT of.
type: The DCT type to perform. Must be 2.
n: For future expansion. The length of the transform. Must be `None`.
axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
norm: The normalization to apply. `None` for no normalization or `'ortho'`
for orthonormal normalization.
name: An optional name for the operation.
Returns:
A `[..., samples]` `float32` `Tensor` containing the DCT of `input`.
Raises:
ValueError: If `type` is not `2`, `n` is not `None, `axis` is not `-1`, or
`norm` is not `None` or `'ortho'`.
[dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform
"""
_validate_dct_arguments(type, n, axis, norm)
with _ops.name_scope(name, "dct", [input]):
# We use the RFFT to compute the DCT and TensorFlow only supports float32
# for FFTs at the moment.
input = _ops.convert_to_tensor(input, dtype=_dtypes.float32)
axis_dim = input.shape[-1].value or _array_ops.shape(input)[-1]
axis_dim_float = _math_ops.to_float(axis_dim)
scale = 2.0 * _math_ops.exp(_math_ops.complex(
0.0, -_math.pi * _math_ops.range(axis_dim_float) /
(2.0 * axis_dim_float)))
# TODO(rjryan): Benchmark performance and memory usage of the various
# approaches to computing a DCT via the RFFT.
dct2 = _math_ops.real(
rfft(input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)
if norm == "ortho":
n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)
n2 = n1 * _math_ops.sqrt(2.0)
# Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
weights = _array_ops.pad(
_array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
constant_values=n2)
dct2 *= weights
return dct2
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:57,代码来源:spectral_ops.py
示例16: _compareBroadcastGradient
def _compareBroadcastGradient(self, x):
x_ = ops.convert_to_tensor(x)
epsilon = 1e-3
with self.cached_session():
for args in [(x_, 0.), (0., x_)]:
z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))
jacob_t, jacob_n = gradient_checker.compute_gradient(
x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:9,代码来源:cwise_ops_test.py
示例17: _compareMake
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
real = ops.convert_to_tensor(real)
imag = ops.convert_to_tensor(imag)
tf_ans = math_ops.complex(real, imag)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
开发者ID:bunbutter,项目名称:tensorflow,代码行数:10,代码来源:cwise_ops_test.py
示例18: Test
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(np_dtype).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
tol = 1e-2
else:
tol = 1e-7
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
top_rows = tf_v[..., 0:1, :]
if tf_a.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_v *= phase
outputs = [tf_e, tf_v]
else:
tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
outputs = [tf_e]
for b in outputs:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
x_init += np.conj(x_init.T)
x_init = np.tile(x_init, batch_shape + (1, 1))
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
开发者ID:bunbutter,项目名称:tensorflow,代码行数:54,代码来源:self_adjoint_eig_op_test.py
示例19: _compareMake
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with test_util.device(use_gpu=use_gpu):
real = ops.convert_to_tensor(real)
imag = ops.convert_to_tensor(imag)
tf_ans = math_ops.complex(real, imag)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:11,代码来源:cwise_ops_test.py
示例20: test_assert_positive_definite_raises_for_negative_real_eigvalues
def test_assert_positive_definite_raises_for_negative_real_eigvalues(self):
with self.test_session():
diag_x = [1.0, -2.0]
diag_y = [0., 0.] # Imaginary eigenvalues should not matter.
diag = math_ops.complex(diag_x, diag_y)
operator = linalg.LinearOperatorDiag(diag)
# is_self_adjoint should not be auto-set for complex diag.
self.assertTrue(operator.is_self_adjoint is None)
with self.assertRaisesOpError("non-positive real.*not positive definite"):
operator.assert_positive_definite().run()
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:11,代码来源:linear_operator_diag_test.py
注:本文中的tensorflow.python.ops.math_ops.complex函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论