本文整理汇总了Python中tensorflow.python.ops.random_ops.random_normal函数的典型用法代码示例。如果您正苦于以下问题:Python random_normal函数的具体用法?Python random_normal怎么用?Python random_normal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了random_normal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testOOM
def testOOM(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
with ops.device('/device:GPU:0'):
a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
c = a * b
try:
with session.Session() as sess:
sess.run(c, options=config_pb2.RunOptions(
report_tensor_allocations_upon_oom=True))
except Exception as e: # pylint: disable=broad-except
exception_str = '%s' % e
# This trace reports allocations for to random tensor.
self.assertTrue(
'OOM when allocating tensor with shape[30000,10000,20000]' in
exception_str)
mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:25,代码来源:model_analyzer_test.py
示例2: random_normal
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
"""Tensor with (possibly complex) Gaussian entries.
Samples are distributed like
```
N(mean, stddev^2), if dtype is real,
X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_normal"):
samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 1234
more_samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
开发者ID:Joetz,项目名称:tensorflow,代码行数:32,代码来源:linear_operator_test_util.py
示例3: testDistributedOOM
def testDistributedOOM(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
workers, _ = test_util.create_local_cluster(2, 0)
with ops.device('/job:worker/replica:0/task:0/gpu:0'):
a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
with ops.device('/job:worker/replica:0/task:1/gpu:0'):
b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
c = a * b
try:
with session.Session(workers[1].target) as sess:
sess.run(c, options=config_pb2.RunOptions(
report_tensor_allocations_upon_oom=True))
except Exception as e: # pylint: disable=broad-except
exception_str = '%s' % e
# test_random2 is reported because it's allocated in worker 1.
self.assertTrue('Current usage from device: '
'/job:worker/replica:0/task:1/device:GPU:0, '
'allocator: GPU_0_bfc' in exception_str)
mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
# test_random1 is not reported because it's allocated in worker 0.
mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
exception_str)
self.assertTrue(mat is None)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:30,代码来源:model_analyzer_test.py
示例4: testConcurrentExecutesWithoutError
def testConcurrentExecutesWithoutError(self):
with self.test_session(use_gpu=True) as sess:
all_ops = []
for compute_uv_ in True, False:
for full_matrices_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
if compute_uv_:
s1, u1, v1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2, u2, v2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, u1, v1, s2, u2, v2]
else:
s1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2]
val = sess.run(all_ops)
for i in range(2):
s = 6 * i
self.assertAllEqual(val[s], val[s + 3]) # s1 == s2
self.assertAllEqual(val[s + 1], val[s + 4]) # u1 == u2
self.assertAllEqual(val[s + 2], val[s + 5]) # v1 == v2
for i in range(2):
s = 12 + 2 * i
self.assertAllEqual(val[s], val[s + 1]) # s1 == s2
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:28,代码来源:svd_op_test.py
示例5: test_virtual_statistics
def test_virtual_statistics(self):
"""Check that `_virtual_statistics` gives same result as `nn.moments`."""
random_seed.set_random_seed(1234)
batch_axis = 0
partial_batch = random_ops.random_normal([4, 5, 7, 3])
single_example = random_ops.random_normal([1, 5, 7, 3])
full_batch = array_ops.concat([partial_batch, single_example], axis=0)
for reduction_axis in range(1, 4):
# Get `nn.moments` on the full batch.
reduction_axes = list(range(4))
del reduction_axes[reduction_axis]
mom_mean, mom_variance = nn.moments(full_batch, reduction_axes)
# Get virtual batch statistics.
vb_reduction_axes = list(range(4))
del vb_reduction_axes[reduction_axis]
del vb_reduction_axes[batch_axis]
vbn = virtual_batchnorm.VBN(partial_batch, reduction_axis)
vb_mean, mean_sq = vbn._virtual_statistics(
single_example, vb_reduction_axes)
vb_variance = mean_sq - math_ops.square(vb_mean)
# Remove singleton batch dim for easy comparisons.
vb_mean = array_ops.squeeze(vb_mean, batch_axis)
vb_variance = array_ops.squeeze(vb_variance, batch_axis)
with self.cached_session(use_gpu=True) as sess:
vb_mean_np, vb_var_np, mom_mean_np, mom_var_np = sess.run([
vb_mean, vb_variance, mom_mean, mom_variance])
self.assertAllClose(mom_mean_np, vb_mean_np)
self.assertAllClose(mom_var_np, vb_var_np)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:33,代码来源:virtual_batchnorm_test.py
示例6: testMultiplyInverseTuple
def testMultiplyInverseTuple(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = random_ops.random_normal((2, 2, 2, 2))
inputs = random_ops.random_normal((2, 2, 2, 2))
outputs = random_ops.random_normal((2, 2, 2, 2))
block = fb.ConvKFCBasicFB(lc.LayerCollection(), params, (1, 1, 1, 1),
'SAME')
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
block.instantiate_factors(([grads],), 0.5)
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
vector = (np.arange(1, 15).reshape(7, 2).astype(np.float32),
np.arange(2, 4).reshape(2, 1).astype(np.float32))
output = block.multiply_inverse((array_ops.constant(vector[0]),
array_ops.constant(vector[1])))
output = sess.run(output)
self.assertAllClose([0.136455, 0.27291], output[0][0])
self.assertAllClose([0.27291, 0.409365], output[1])
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:25,代码来源:fisher_blocks_test.py
示例7: approximate_hessian
def approximate_hessian(self, grads_and_vars, name=None):
"""
I haven't tested this yet so I have no idea if it works, but even if it
does it's probably super slow, and either way nothing else has been modified
to deal with it.
"""
gv = 0
var_refs = []
for g_t, x_tm1 in grads_and_vars:
var_refs.append(x_tm1.ref())
if g_t is None:
continue
with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
if isinstance(g_t, ops.Tensor):
gv += math_ops.reduce_sum(g_t * random_ops.random_normal(g_t.get_shape()))
else:
idxs, idxs_ = array_ops.unique(g_t.indices)
g_t_ = math_ops.unsorted_segment_sum(g_t.values, idxs_, array_ops.size(idxs))
gv += math_ops.reduce_sum(g_t_ * random_ops.random_normal(g_t_.get_shape()))
hesses = gradients.gradients(gv, var_refs,
gate_gradients=(gate_gradients == Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
return zip([g_t for g_t, _ in grads_and_vars], [x_tm1 for _, x_tm1 in grads_and_vars], hesses)
开发者ID:tdozat,项目名称:Optimization,代码行数:25,代码来源:optimizers.py
示例8: testMultiplyInverseNotTupleWithBias
def testMultiplyInverseNotTupleWithBias(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = [random_ops.random_normal((2, 2, 2, 2))]
inputs = random_ops.random_normal((2, 2, 2, 2))
outputs = random_ops.random_normal((2, 2, 2, 2))
block = fb.ConvKFCBasicFB(lc.LayerCollection(), params, (1, 1, 1, 1),
'SAME')
block.register_additional_minibatch(inputs, outputs)
self.assertTrue(block._has_bias)
grads = outputs**2
block.instantiate_factors(((grads,),), 0.5)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
vector = np.arange(1, 19).reshape(9, 2).astype(np.float32)
output = block.multiply_inverse(array_ops.constant(vector))
self.assertAllClose([0.136455, 0.27291], sess.run(output)[0])
开发者ID:DILASSS,项目名称:tensorflow,代码行数:27,代码来源:fisher_blocks_test.py
示例9: _run_model
def _run_model():
x = random_ops.random_normal(shape=[1, SIZE])
w = random_ops.random_normal(shape=[SIZE, 2 * SIZE])
y = math_ops.matmul(x, w)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(config=config) as sess:
run_metadata = config_pb2.RunMetadata()
opts = builder.time_and_memory()
opts['min_micros'] = 0
opts['min_bytes'] = 0
opts['order_by'] = 'name'
opts['output'] = 'none'
_ = sess.run(y,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_metadata)
tfprof_node = model_analyzer.profile(
sess.graph,
run_meta=run_metadata,
options=opts)
return tfprof_node, run_metadata
开发者ID:aeverall,项目名称:tensorflow,代码行数:25,代码来源:run_metadata_test.py
示例10: testMultiplyInverse
def testMultiplyInverse(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = random_ops.random_normal((3, 3, 8, 2))
inputs = random_ops.random_normal((32, 5, 5, 8))
outputs = random_ops.random_normal((32, 5, 5, 16))
layer_collection = lc.LayerCollection()
block = fb.DepthwiseConvKFCBasicFB(
layer_collection, params=params, strides=[1, 1, 1, 1], padding='SAME')
block.register_additional_tower(inputs, outputs)
grads = outputs**2
block.instantiate_factors(([grads],), 0.5)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
# Ensure inverse update op doesn't crash.
sess.run(tf_variables.global_variables_initializer())
sess.run([
factor.make_inverse_update_ops()
for factor in layer_collection.get_factors()
])
# Ensure inverse-vector multiply doesn't crash.
output = block.multiply_inverse(params)
sess.run(output)
# Ensure same shape.
self.assertAllEqual(output.shape, params.shape)
开发者ID:bikong2,项目名称:tensorflow,代码行数:31,代码来源:fisher_blocks_test.py
示例11: test_optimize
def test_optimize(self):
scalar = variables.Variable(random_ops.random_normal([]), 'scalar')
vector = variables.Variable(random_ops.random_normal([2]), 'vector')
matrix = variables.Variable(random_ops.random_normal([2, 3]), 'matrix')
minimum_location = constant_op.constant(np.arange(9), dtype=dtypes.float32)
loss = math_ops.reduce_sum(
math_ops.square(vector - minimum_location[:2])) / 2.
loss += math_ops.reduce_sum(
math_ops.square(scalar - minimum_location[2])) / 2.
loss += math_ops.reduce_sum(
math_ops.square(
matrix - array_ops.reshape(minimum_location[3:], [2, 3]))) / 2.
optimizer = MockOptimizerInterface(loss)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
optimizer.minimize(sess)
self.assertAllClose(np.arange(2), sess.run(vector))
self.assertAllClose(np.arange(1) + 2, sess.run(scalar))
self.assertAllClose(np.arange(6).reshape(2, 3) + 3, sess.run(matrix))
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:25,代码来源:external_optimizer_test.py
示例12: _testSimpleModel
def _testSimpleModel(self, use_forward_func, use_resource=False):
def _Model(x):
w = variable_scope.get_variable(
"w", (64, 64),
initializer=init_ops.random_uniform_initializer(seed=312),
use_resource=use_resource)
b = variable_scope.get_variable(
"b", (64),
initializer=init_ops.zeros_initializer(),
use_resource=use_resource),
return math_ops.sigmoid(math_ops.matmul(x, w) + b)
@function.Defun()
def Model(x):
return _Model(x)
cvars = []
@function.Defun()
def Grad(x, y0):
if use_forward_func:
y = Model(x)
else:
y = _Model(x)
loss = math_ops.reduce_mean(
math_ops.reduce_sum(y0 * math_ops.log(y), 1), 0)
arg_w, arg_b = function.get_extra_args()
self.assertEqual(arg_w.get_shape(), tensor_shape.TensorShape([64, 64]))
self.assertEqual(arg_b.get_shape(), tensor_shape.TensorShape([64]))
dw, db = gradients_impl.gradients(loss, [arg_w, arg_b])
cvars.extend(function.get_extra_vars())
return loss, dw, db
g = ops.Graph()
with g.as_default():
x = random_ops.random_normal([64, 64], seed=100)
y0 = random_ops.random_normal([64, 64], seed=200)
with variable_scope.variable_scope("Foo"):
loss, dw, db = Grad(x, y0)
self.assertEqual(2, len(cvars))
w, b = cvars[:2]
self.assertEqual("Foo/w", w.op.name)
self.assertEqual("Foo/b", b.op.name)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
w, b, x, y0, loss, dw, db = sess.run([w, b, x, y0, loss, dw, db])
self.assertAllEqual(w.shape, (64, 64))
self.assertAllClose(np.sum(w), 2050.44)
self.assertAllEqual(b.shape, (64,))
self.assertAllClose(np.sum(b), 0.0)
self.assertAllClose(loss, -2.27, rtol=1e-2)
self.assertAllEqual(dw.shape, (64, 64))
self.assertAllClose(np.sum(dw), -1.04, rtol=1e-2)
self.assertAllEqual(db.shape, (64,))
self.assertAllClose(np.sum(db), 0.509, rtol=1e-2)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:59,代码来源:function_test.py
示例13: testNoCSE
def testNoCSE(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
rnd2 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:8,代码来源:random_ops_test.py
示例14: testConcurrentExecutesWithoutError
def testConcurrentExecutesWithoutError(self):
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
lu1, p1 = linalg_ops.lu(matrix1)
lu2, p2 = linalg_ops.lu(matrix2)
lu1_val, p1_val, lu2_val, p2_val = self.evaluate([lu1, p1, lu2, p2])
self.assertAllEqual(lu1_val, lu2_val)
self.assertAllEqual(p1_val, p2_val)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:8,代码来源:lu_op_test.py
示例15: testConcurrentExecutesWithoutError
def testConcurrentExecutesWithoutError(self):
with self.test_session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
expm1 = linalg_impl.matrix_exponential(matrix1)
expm2 = linalg_impl.matrix_exponential(matrix2)
expm = sess.run([expm1, expm2])
self.assertAllEqual(expm[0], expm[1])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:8,代码来源:matrix_exponential_op_test.py
示例16: testConcurrentExecutesWithoutError
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
det1 = linalg_ops.matrix_determinant(matrix1)
det2 = linalg_ops.matrix_determinant(matrix2)
det1_val, det2_val = sess.run([det1, det2])
self.assertEqual(det1_val, det2_val)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:8,代码来源:determinant_op_test.py
示例17: testTransposedStatistics
def testTransposedStatistics(self):
a = variables.Variable(random_ops.random_normal([16, 25]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b, transpose_a=True)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
开发者ID:aeverall,项目名称:tensorflow,代码行数:9,代码来源:matmul_op_test.py
示例18: testConcurrentExecutesWithoutError
def testConcurrentExecutesWithoutError(self):
with self.test_session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
sqrt1 = gen_linalg_ops.matrix_square_root(matrix1)
sqrt2 = gen_linalg_ops.matrix_square_root(matrix2)
all_ops = [sqrt1, sqrt2]
sqrt = sess.run(all_ops)
self.assertAllEqual(sqrt[0], sqrt[1])
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:9,代码来源:matrix_square_root_op_test.py
示例19: testConcurrentExecutesWithoutError
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = math_ops.cast(
random_ops.random_normal([5, 5], seed=42), dtypes.complex64)
matrix2 = math_ops.cast(
random_ops.random_normal([5, 5], seed=42), dtypes.complex64)
logm1 = gen_linalg_ops.matrix_logarithm(matrix1)
logm2 = gen_linalg_ops.matrix_logarithm(matrix2)
logm = sess.run([logm1, logm2])
self.assertAllEqual(logm[0], logm[1])
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:10,代码来源:matrix_logarithm_op_test.py
示例20: testConcurrentExecutesWithoutError
def testConcurrentExecutesWithoutError(self):
with self.test_session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
matrix1 = math_ops.matmul(matrix1, matrix1, adjoint_a=True)
matrix2 = math_ops.matmul(matrix2, matrix2, adjoint_a=True)
c1 = linalg_ops.cholesky(matrix1)
c2 = linalg_ops.cholesky(matrix2)
c1_val, c2_val = sess.run([c1, c2])
self.assertAllEqual(c1_val, c2_val)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:10,代码来源:cholesky_op_test.py
注:本文中的tensorflow.python.ops.random_ops.random_normal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论