本文整理汇总了Python中tensorflow.python.ops.linalg_ops.eye函数的典型用法代码示例。如果您正苦于以下问题:Python eye函数的具体用法?Python eye怎么用?Python eye使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了eye函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: Test
def Test(self):
eye_np = np.eye(num_rows, M=num_columns, dtype=dtype.as_numpy_dtype)
if batch_shape is not None:
eye_np = np.tile(eye_np, batch_shape + [1, 1])
for use_placeholder in False, True:
if use_placeholder and (num_columns is None or batch_shape is None):
return
with self.test_session(use_gpu=True) as sess:
if use_placeholder:
num_rows_placeholder = array_ops.placeholder(
dtypes.int32, name="num_rows")
num_columns_placeholder = array_ops.placeholder(
dtypes.int32, name="num_columns")
batch_shape_placeholder = array_ops.placeholder(
dtypes.int32, name="batch_shape")
eye = linalg_ops.eye(
num_rows_placeholder,
num_columns=num_columns_placeholder,
batch_shape=batch_shape_placeholder,
dtype=dtype)
eye_tf = sess.run(
eye,
feed_dict={
num_rows_placeholder: num_rows,
num_columns_placeholder: num_columns,
batch_shape_placeholder: batch_shape
})
else:
eye_tf = linalg_ops.eye(
num_rows,
num_columns=num_columns,
batch_shape=batch_shape,
dtype=dtype).eval()
self.assertAllEqual(eye_np, eye_tf)
开发者ID:chdinh,项目名称:tensorflow,代码行数:34,代码来源:linalg_ops_test.py
示例2: get_observation_model
def get_observation_model(self, times):
"""Construct observation model matrix from VARMA parameters.
Args:
times: A [batch size] vector indicating the times observation models are
requested for. Unused.
Returns:
the observation model matrix. It has shape
[self.num_features, self.state_dimension].
"""
del times # StateSpaceModel will broadcast along the batch dimension
if self.ar_order > self.ma_order or self.state_num_blocks < 2:
return array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],
name="observation_model")
else:
# Add a second observed component which "catches" the accumulated moving
# average errors as they reach the end of the state. If ar_order >
# ma_order, this is unnecessary, since accumulated errors cycle naturally.
return array_ops.concat(
[
array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0,
self.num_features * (self.state_num_blocks - 2)]]),
linalg_ops.eye(self.num_features, dtype=self.dtype)
],
axis=1,
name="observation_model")
开发者ID:1000sprites,项目名称:tensorflow,代码行数:30,代码来源:varma.py
示例3: testShapeInferenceStaticBatch
def testShapeInferenceStaticBatch(self):
batch_shape = (2, 3)
self.assertEqual(
(2, 3, 2, 2),
linalg_ops.eye(num_rows=2, batch_shape=batch_shape).shape)
self.assertEqual(
(2, 3, 2, 3),
linalg_ops.eye(
num_rows=2, num_columns=3, batch_shape=batch_shape).shape)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:9,代码来源:linalg_ops_test.py
示例4: _operator_and_mat_and_feed_dict
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
shape = list(shape)
assert shape[-1] == shape[-2]
batch_shape = shape[:-2]
num_rows = shape[-1]
# Uniform values that are at least length 1 from the origin. Allows the
# operator to be well conditioned.
# Shape batch_shape
multiplier = linear_operator_test_util.random_sign_uniform(
shape=batch_shape, minval=1., maxval=2., dtype=dtype)
operator = linalg_lib.LinearOperatorScaledIdentity(num_rows, multiplier)
# Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
if use_placeholder:
multiplier_ph = array_ops.placeholder(dtype=dtype)
multiplier = multiplier.eval()
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows, multiplier_ph)
feed_dict = {multiplier_ph: multiplier}
else:
feed_dict = None
multiplier_matrix = array_ops.expand_dims(
array_ops.expand_dims(multiplier, -1), -1)
mat = multiplier_matrix * linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=dtype)
return operator, mat, feed_dict
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:31,代码来源:linear_operator_identity_test.py
示例5: _compute_power_svd
def _compute_power_svd(self, var, mat_g, mat_g_size, alpha, mat_h_slot_name):
"""Computes mat_h = mat_g^alpha using svd. mat_g is a symmetric PSD matrix.
Args:
var: the variable we are updating.
mat_g: the symmetric PSD matrix whose power it to be computed
mat_g_size: size of mat_g
alpha: a real number
mat_h_slot_name: name of slot to store the power, if needed.
Returns:
mat_h = mat_g^alpha
Stores mat_h in the appropriate slot, if it exists.
Note that mat_g is PSD. So we could use linalg_ops.self_adjoint_eig.
"""
if mat_g_size == 1:
mat_h = math_ops.pow(mat_g + self._epsilon, alpha)
else:
damping = self._epsilon * linalg_ops.eye(math_ops.to_int32(mat_g_size))
diag_d, mat_u, mat_v = linalg_ops.svd(mat_g + damping, full_matrices=True)
mat_h = math_ops.matmul(
mat_v * math_ops.pow(math_ops.maximum(diag_d, self._epsilon), alpha),
array_ops.transpose(mat_u))
if mat_h_slot_name is not None:
return state_ops.assign(self.get_slot(var, mat_h_slot_name), mat_h)
return mat_h
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:27,代码来源:shampoo.py
示例6: test_inv_update_thunks
def test_inv_update_thunks(self):
"""Ensures inverse update ops run once per global_step."""
with self._graph.as_default(), self.test_session() as sess:
fisher_estimator = estimator.FisherEstimator(
damping_fn=lambda: 0.2,
variables=[self.weights],
layer_collection=self.layer_collection,
cov_ema_decay=0.0)
# Construct op that updates one inverse per global step.
global_step = training_util.get_or_create_global_step()
inv_matrices = [
matrix
for fisher_factor in self.layer_collection.get_factors()
for matrix in fisher_factor._inverses_by_damping.values()
]
inv_update_op_thunks = fisher_estimator.inv_update_thunks
inv_update_op = control_flow_ops.case(
[(math_ops.equal(global_step, i), thunk)
for i, thunk in enumerate(inv_update_op_thunks)])
increment_global_step = global_step.assign_add(1)
sess.run(variables.global_variables_initializer())
initial_inv_values = sess.run(inv_matrices)
# Ensure there's one update per inverse matrix. This is true as long as
# there's no fan-in/fan-out or parameter re-use.
self.assertEqual(len(inv_matrices), len(inv_update_op_thunks))
# Test is no-op if only 1 invariance matrix.
assert len(inv_matrices) > 1
# Assign each covariance matrix a value other than the identity. This
# ensures that the inverse matrices are updated to something different as
# well.
cov_matrices = [
fisher_factor.get_cov()
for fisher_factor in self.layer_collection.get_factors()
]
sess.run([
cov_matrix.assign(2 * linalg_ops.eye(int(cov_matrix.shape[0])))
for cov_matrix in cov_matrices
])
for i in range(len(inv_matrices)):
# Compare new and old inverse values
new_inv_values = sess.run(inv_matrices)
is_inv_equal = [
np.allclose(initial_inv_value, new_inv_value)
for (initial_inv_value,
new_inv_value) in zip(initial_inv_values, new_inv_values)
]
num_inv_equal = sum(is_inv_equal)
# Ensure exactly one inverse matrix changes per step.
self.assertEqual(num_inv_equal, len(inv_matrices) - i)
# Run all inverse update ops.
sess.run(inv_update_op)
sess.run(increment_global_step)
开发者ID:QiangCai,项目名称:tensorflow,代码行数:60,代码来源:estimator_test.py
示例7: transition_power_test_template
def transition_power_test_template(test_case, model, num_steps):
"""Tests the transition_to_powers function of a state space model."""
transition_matrix = ops.convert_to_tensor(
model.get_state_transition(), dtype=model.dtype)
step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)
state_dimension = transition_matrix.get_shape()[0].value
previous_matrix = array_ops.placeholder(
shape=[state_dimension, state_dimension], dtype=transition_matrix.dtype)
true_single_step_update = math_ops.matmul(previous_matrix,
transition_matrix)
model_output_tensor = model.transition_to_powers(powers=array_ops.stack(
[step_number, step_number]))
with test_case.test_session():
starting_matrix = linalg_ops.eye(
state_dimension, batch_shape=array_ops.shape(num_steps)).eval()
evaled_current_matrix = starting_matrix
for iteration_number in range(num_steps):
model_output = model_output_tensor.eval(
feed_dict={step_number: iteration_number})
test_case.assertAllClose(
evaled_current_matrix,
model_output[0],
rtol=1e-8 if evaled_current_matrix.dtype == numpy.float64 else 1e-4)
evaled_current_matrix = true_single_step_update.eval(
feed_dict={previous_matrix: evaled_current_matrix})
开发者ID:1000sprites,项目名称:tensorflow,代码行数:25,代码来源:test_utils.py
示例8: power_sums_tensor
def power_sums_tensor(array_size, power_matrix, multiplier):
r"""Computes \sum_{i=0}^{N-1} A^i B (A^i)^T for N=0..(array_size + 1).
Args:
array_size: The number of non-trivial sums to pre-compute.
power_matrix: The "A" matrix above.
multiplier: The "B" matrix above
Returns:
A Tensor with S[N] = \sum_{i=0}^{N-1} A^i B (A^i)^T
S[0] is the zero matrix
S[1] is B
S[2] is A B A^T + B
...and so on
"""
array_size = math_ops.cast(array_size, dtypes.int32)
power_matrix = ops.convert_to_tensor(power_matrix)
identity_like_power_matrix = linalg_ops.eye(
array_ops.shape(power_matrix)[0], dtype=power_matrix.dtype)
identity_like_power_matrix.set_shape(
ops.convert_to_tensor(power_matrix).get_shape())
transition_powers = functional_ops.scan(
lambda previous_power, _: math_ops.matmul(previous_power, power_matrix),
math_ops.range(array_size - 1),
initializer=identity_like_power_matrix)
summed = math_ops.cumsum(
array_ops.concat([
array_ops.expand_dims(multiplier, 0), math_ops.matmul(
batch_times_matrix(transition_powers, multiplier),
transition_powers,
adjoint_b=True)
], 0))
return array_ops.concat(
[array_ops.expand_dims(array_ops.zeros_like(multiplier), 0), summed], 0)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:33,代码来源:math_utils.py
示例9: _underdetermined
def _underdetermined(op, grad):
"""Gradients for the underdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the second
kind:
X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B
that (for lambda=0) solve the least squares problem
min ||X||_F subject to A*X = B.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
a_shape = array_ops.shape(a)
batch_shape = a_shape[:-2]
m = a_shape[-2]
identity = linalg_ops.eye(m, batch_shape=batch_shape, dtype=a.dtype)
gramian = math_ops.matmul(a, a, adjoint_b=True) + l2_regularizer * identity
chol = linalg_ops.cholesky(gramian)
grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad))
# Temporary tmp = (A * A^T + lambda * I)^{-1} * B.
tmp = linalg_ops.cholesky_solve(chol, b)
a1 = math_ops.matmul(tmp, a, adjoint_a=True)
a1 = -math_ops.matmul(grad_b, a1)
a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True)
a2 = math_ops.matmul(tmp, a2, adjoint_b=True)
grad_a = a1 + a2
return (grad_a, grad_b, None)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:28,代码来源:linalg_grad.py
示例10: _overdetermined
def _overdetermined(op, grad):
"""Gradients for the overdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the first
kind:
X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
which solve the least squares problem
min ||A * X - B||_F^2 + lambda ||X||_F^2.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
x = op.outputs[0]
a_shape = array_ops.shape(a)
batch_shape = a_shape[:-2]
n = a_shape[-1]
identity = linalg_ops.eye(n, batch_shape=batch_shape, dtype=a.dtype)
gramian = math_ops.matmul(a, a, adjoint_a=True) + l2_regularizer * identity
chol = linalg_ops.cholesky(gramian)
# Temporary z = (A^T * A + lambda * I)^{-1} * grad.
z = linalg_ops.cholesky_solve(chol, grad)
xzt = math_ops.matmul(x, z, adjoint_b=True)
zx_sym = xzt + array_ops.matrix_transpose(xzt)
grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True)
grad_b = math_ops.matmul(a, z)
return (grad_a, grad_b, None)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:27,代码来源:linalg_grad.py
示例11: _operator_and_matrix
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
batch_shape = shape[:-2]
num_rows = shape[-1]
# Uniform values that are at least length 1 from the origin. Allows the
# operator to be well conditioned.
# Shape batch_shape
multiplier = linear_operator_test_util.random_sign_uniform(
shape=batch_shape, minval=1., maxval=2., dtype=dtype)
# Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
lin_op_multiplier = multiplier
if use_placeholder:
lin_op_multiplier = array_ops.placeholder_with_default(
multiplier, shape=None)
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows, lin_op_multiplier)
multiplier_matrix = array_ops.expand_dims(
array_ops.expand_dims(multiplier, -1), -1)
matrix = multiplier_matrix * linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=dtype)
return operator, matrix
开发者ID:AnishShah,项目名称:tensorflow,代码行数:30,代码来源:linear_operator_identity_test.py
示例12: test_non_batch_2x2
def test_non_batch_2x2(self):
num_rows = 2
dtype = np.float32
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
eye = linalg_ops.eye(num_rows, dtype=dtype)
self.assertAllEqual((num_rows, num_rows), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:8,代码来源:linalg_ops_test.py
示例13: _create_slots
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
_ = self._zeros_slot(v, "gbar", self._name)
shape = np.array(v.get_shape())
for i, d in enumerate(shape):
d_tensor = ops.convert_to_tensor(d)
if d <= self._max_matrix_size:
mat_g_init = array_ops.zeros_like(linalg_ops.eye(d_tensor))
if self._svd_interval > 1:
_ = self._get_or_make_slot(v, linalg_ops.eye(d_tensor),
"H_" + str(i), self._name)
else:
mat_g_init = array_ops.zeros([d_tensor])
_ = self._get_or_make_slot(v, mat_g_init, "Gbar_" + str(i),
self._name)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:17,代码来源:shampoo.py
示例14: test_non_batch_0x0
def test_non_batch_0x0(self):
num_rows = 0
dtype = np.int64
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session(use_gpu=True):
eye = linalg_ops.eye(num_rows, dtype=dtype)
self.assertAllEqual((num_rows, num_rows), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:8,代码来源:linalg_ops_test.py
示例15: _verifyLu
def _verifyLu(self, x, output_idx_type=dtypes.int64):
# Verify that Px = LU.
lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)
# Prepare the lower factor of shape num_rows x num_rows
lu_shape = np.array(lu.shape.as_list())
batch_shape = lu_shape[:-2]
num_rows = lu_shape[-2]
num_cols = lu_shape[-1]
lower = array_ops.matrix_band_part(lu, -1, 0)
if num_rows > num_cols:
eye = linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=lower.dtype)
lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
elif num_rows < num_cols:
lower = lower[..., :num_rows]
# Fill the diagonal with ones.
ones_diag = array_ops.ones(
np.append(batch_shape, num_rows), dtype=lower.dtype)
lower = array_ops.matrix_set_diag(lower, ones_diag)
# Prepare the upper factor.
upper = array_ops.matrix_band_part(lu, 0, -1)
verification = math_ops.matmul(lower, upper)
# Permute the rows of product of the Cholesky factors.
if num_rows > 0:
# Reshape the product of the triangular factors and permutation indices
# to a single batch dimension. This makes it easy to apply
# invert_permutation and gather_nd ops.
perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
verification_reshaped = array_ops.reshape(verification,
[-1, num_rows, num_cols])
# Invert the permutation in each batch.
inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
perm_reshaped)
batch_size = perm_reshaped.shape.as_list()[0]
# Prepare the batch indices with the same shape as the permutation.
# The corresponding batch index is paired with each of the `num_rows`
# permutation indices.
batch_indices = math_ops.cast(
array_ops.broadcast_to(
math_ops.range(batch_size)[:, None], perm_reshaped.shape),
dtype=output_idx_type)
permuted_verification_reshaped = array_ops.gather_nd(
verification_reshaped,
array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))
# Reshape the verification matrix back to the original shape.
verification = array_ops.reshape(permuted_verification_reshaped,
lu_shape)
self._verifyLuBase(x, lower, upper, perm, verification,
output_idx_type)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:58,代码来源:lu_op_test.py
示例16: TriAngInvCompositeGrad
def TriAngInvCompositeGrad(l, grad):
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
return _GradWithInverseL(l, l_inverse, grad)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:9,代码来源:cholesky_op_test.py
示例17: test_cholesky
def test_cholesky(self):
z = random_ops.random_normal([2, 3, 3])
x = (math_ops.matmul(z, array_ops.matrix_transpose(z)) # Ensure pos. def.
+ linalg_ops.eye(3)) # Ensure well-conditioned.
def loop_fn(i):
return linalg_ops.cholesky(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 2)
开发者ID:aritratony,项目名称:tensorflow,代码行数:9,代码来源:math_test.py
示例18: test_non_batch_0x2
def test_non_batch_0x2(self):
num_rows = 0
num_columns = 2
dtype = np.int64
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype)
self.assertAllEqual((num_rows, num_columns), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:9,代码来源:linalg_ops_test.py
示例19: test_non_batch_2x3
def test_non_batch_2x3(self):
num_rows = 2
num_columns = 3
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session(use_gpu=True):
eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype)
self.assertAllEqual((num_rows, num_columns), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:9,代码来源:linalg_ops_test.py
示例20: testLossFunctionWithoutName
def testLossFunctionWithoutName(self):
"""Ensure loss functions get unique names if 'name' not specified."""
with ops.Graph().as_default():
logits = linalg_ops.eye(2)
lc = layer_collection.LayerCollection()
# Create a new loss function with default names.
lc.register_categorical_predictive_distribution(logits)
lc.register_categorical_predictive_distribution(logits)
self.assertEqual(2, len(lc.losses))
开发者ID:DILASSS,项目名称:tensorflow,代码行数:10,代码来源:layer_collection_test.py
注:本文中的tensorflow.python.ops.linalg_ops.eye函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论