本文整理汇总了Python中tensorflow.python.ops.random_ops.random_uniform函数的典型用法代码示例。如果您正苦于以下问题:Python random_uniform函数的具体用法?Python random_uniform怎么用?Python random_uniform使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了random_uniform函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: benchmarkBatchSelect
def benchmarkBatchSelect(self):
for (m, n, use_gpu) in itertools.product([1000, 10000, 100000],
[10, 100, 1000], [False, True]):
name = "m_%d_n_%d_use_gpu_%s" % (m, n, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
y_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
c_gen = random_ops.random_uniform([m], dtype=dtypes.float32) <= 0.5
x = resource_variable_ops.ResourceVariable(x_gen)
y = resource_variable_ops.ResourceVariable(y_gen)
c = resource_variable_ops.ResourceVariable(c_gen)
op = array_ops.where(c, x, y)
with session.Session(config=benchmark.benchmark_config()) as sess:
x.initializer.run()
y.initializer.run()
c.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
# approximate size of output: m*n*2 floats for each axis.
gb_processed = m * n * 8 / 1.0e9
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:25,代码来源:where_op_test.py
示例2: _test_unary_cwise_ops
def _test_unary_cwise_ops(self, ops, is_complex):
for op in ops:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
if is_complex:
y = random_ops.random_uniform([3, 5])
g.watch(y)
x = math_ops.complex(x, y)
# pylint: disable=cell-var-from-loop
output_dtypes = []
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y1 = op(x1)
outputs = [op(x), y1]
if y1.dtype == dtypes.float32:
loss = math_ops.reduce_sum(y1 * y1)
else:
loss = None
if loss is not None:
grad = g.gradient(loss, x1)
if grad is not None:
outputs.append(grad)
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
开发者ID:aritratony,项目名称:tensorflow,代码行数:33,代码来源:math_test.py
示例3: testHasBias
def testHasBias(self):
with tf_ops.Graph().as_default():
inputs = random_ops.random_uniform(
[self.batch_size, self.height, self.width, self.in_channels])
outputs_grads = [
random_ops.random_uniform([
self.batch_size, self.height // self.strides[1],
self.width // self.strides[2], self.out_channels
]) for _ in range(3)
]
factor = ff.ConvDiagonalFactor(
inputs,
outputs_grads,
self.kernel_shape,
self.strides,
self.padding,
data_format=self.data_format,
has_bias=True)
factor.instantiate_cov_variables()
# Ensure shape accounts for bias.
self.assertEqual([
self.kernel_height * self.kernel_width * self.in_channels + 1,
self.out_channels
],
factor.get_cov_var().shape.as_list())
# Ensure update op doesn't crash.
cov_update_op = factor.make_covariance_update_op(0.0)
with self.test_session() as sess:
sess.run(tf_variables.global_variables_initializer())
sess.run(cov_update_op)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:33,代码来源:fisher_factors_test.py
示例4: test_while_jacobian
def test_while_jacobian(self):
x = random_ops.random_uniform([1, 3])
y = random_ops.random_uniform([3, 3])
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# The above code does not work with tf.while_loop instead of pfor. So we
# manually compute the expected output here.
# Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
expected_output = y
for _ in range(3):
expected_output = math_ops.matmul(expected_output, y)
expected_output = array_ops.transpose(expected_output, [1, 0])
with session.Session() as sess:
out, expected = sess.run([out, expected_output])
self.assertAllClose(expected, out)
开发者ID:aritratony,项目名称:tensorflow,代码行数:26,代码来源:control_flow_ops_test.py
示例5: testImportGraphWithFunctionTwice
def testImportGraphWithFunctionTwice(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = array_ops.placeholder(dtype=dtypes.float32, name="x")
y = array_ops.placeholder(dtype=dtypes.float32, name="y")
_ = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
x = random_ops.random_uniform(dtype=dtypes.float32, shape=())
y = random_ops.random_uniform(dtype=dtypes.float32, shape=())
input_map = {"x:0": x, "y:0": y}
with ops.name_scope("first"):
z1 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with ops.name_scope("second"):
z2 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with self.test_session() as sess:
z1_val, z2_val = sess.run((z1, z2))
self.assertAllEqual(z1_val, z2_val)
开发者ID:clsung,项目名称:tensorflow,代码行数:29,代码来源:importer_test.py
示例6: testConstraints
def testConstraints(self):
# Conv1D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv1d = conv_layers.Conv1D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 5), seed=1)
conv1d(inputs)
self.assertEqual(conv1d.kernel_constraint, k_constraint)
self.assertEqual(conv1d.bias_constraint, b_constraint)
# Conv2D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv2d = conv_layers.Conv2D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
conv2d(inputs)
self.assertEqual(conv2d.kernel_constraint, k_constraint)
self.assertEqual(conv2d.bias_constraint, b_constraint)
# Conv3D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv3d = conv_layers.Conv3D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1)
conv3d(inputs)
self.assertEqual(conv3d.kernel_constraint, k_constraint)
self.assertEqual(conv3d.bias_constraint, b_constraint)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:33,代码来源:convolutional_test.py
示例7: test_good_kernel_approximation_multiple_inputs
def test_good_kernel_approximation_multiple_inputs(self, initializer, scale,
exact_kernel_fn):
# Parameters.
input_dim = 5
output_dim = 5000
x_rows = 20
y_rows = 30
random_seed.set_random_seed(1234)
x = random_ops.random_uniform(shape=(x_rows, input_dim), maxval=1.0)
y = random_ops.random_uniform(shape=(y_rows, input_dim), maxval=1.0)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
name='random_fourier_features')
# The shapes of output_x and output_y are (x_rows, output_dim) and
# (y_rows, output_dim) respectively.
output_x = math.sqrt(2.0 / output_dim) * rff_layer.apply(x)
output_y = math.sqrt(2.0 / output_dim) * rff_layer.apply(y)
approx_kernel_matrix = kernelized_utils.inner_product(output_x, output_y)
exact_kernel_matrix = exact_kernel_fn(x, y)
self._assert_all_close(approx_kernel_matrix, exact_kernel_matrix, atol=0.1)
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:26,代码来源:kernelized_test.py
示例8: input_fn
def input_fn():
start = random_ops.random_uniform(
(), minval=0, maxval=sequence_length, dtype=dtypes.int32, seed=seed)
# Concatenate lyrics_list so inputs and labels wrap when start > 0.
lyrics_list_concat = lyrics_list + lyrics_list
inputs_dense = array_ops.slice(lyrics_list_concat, [start],
[sequence_length])
indices = array_ops.constant(
[[i, 0] for i in range(sequence_length)], dtype=dtypes.int64)
dense_shape = [sequence_length, 1]
inputs = sparse_tensor.SparseTensor(
indices=indices, values=inputs_dense, dense_shape=dense_shape)
table = lookup.string_to_index_table_from_tensor(
mapping=list(vocab), default_value=-1, name='lookup')
labels = table.lookup(
array_ops.slice(lyrics_list_concat, [start + 1], [sequence_length]))
input_key = string_ops.string_join([
'key_', string_ops.as_string(
random_ops.random_uniform(
(),
minval=0,
maxval=10000000,
dtype=dtypes.int32,
seed=seed))
])
return {'lyrics': inputs, input_key_column_name: input_key}, labels
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:26,代码来源:state_saving_rnn_estimator_test.py
示例9: _testKLPenaltyBoth
def _testKLPenaltyBoth(self, layer_class):
def _make_normal(dtype, *args): # pylint: disable=unused-argument
return normal_lib.Normal(
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.))
with self.test_session():
layer = layer_class(
filters=2,
kernel_size=3,
bias_posterior_fn=prob_layers_util.default_mean_field_normal_fn(),
bias_prior_fn=_make_normal)
if layer_class == prob_layers_lib.Conv1DVariational:
inputs = random_ops.random_uniform([2, 3, 1], seed=1)
elif layer_class == prob_layers_lib.Conv2DVariational:
inputs = random_ops.random_uniform([2, 3, 3, 1], seed=1)
elif layer_class == prob_layers_lib.Conv3DVariational:
inputs = random_ops.random_uniform([2, 3, 3, 3, 1], seed=1)
# No keys.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(losses), 0)
self.assertListEqual(layer.losses, losses)
_ = layer(inputs)
# Yes keys.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(losses), 2)
self.assertListEqual(layer.losses, losses)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:28,代码来源:layers_conv_variational_test.py
示例10: model_fn
def model_fn():
"""Mnist model with synthetic input."""
data_format = 'channels_last'
input_shape = [28, 28, 1]
l = keras.layers
max_pool = l.MaxPooling2D((2, 2), (2, 2),
padding='same',
data_format=data_format)
model = keras.Sequential([
l.Reshape(target_shape=input_shape, input_shape=(28 * 28,)),
l.Conv2D(
32,
5,
padding='same',
data_format=data_format,
activation=nn.relu), max_pool,
l.Conv2D(
64,
5,
padding='same',
data_format=data_format,
activation=nn.relu), max_pool,
l.Flatten(),
l.Dense(1024, activation=nn.relu),
l.Dropout(0.4),
l.Dense(10)
])
image = random_ops.random_uniform([2, 28, 28])
label = random_ops.random_uniform([2, 1], maxval=10, dtype=dtypes.int32)
logits = model(image, training=True)
loss = losses.sparse_softmax_cross_entropy(labels=label, logits=logits)
optimizer = adam.AdamOptimizer(learning_rate=1e-4)
train_op = optimizer.minimize(loss,
training_util.get_or_create_global_step())
return train_op
开发者ID:zhaoyongke,项目名称:tensorflow,代码行数:35,代码来源:collective_all_reduce_strategy_test.py
示例11: test_binary_cwise_ops
def test_binary_cwise_ops(self):
logical_ops = [
math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor
]
bool_ops = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
float_ops = [
math_ops.add, math_ops.subtract, math_ops.multiply, math_ops.divide,
math_ops.maximum, math_ops.minimum
]
for op in logical_ops + bool_ops + float_ops:
x = random_ops.random_uniform([7, 3, 5])
y = random_ops.random_uniform([3, 5])
if op in logical_ops:
x = x > 0
y = y > 0
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
return op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)
# pylint: enable=cell-var-from-loop
dtype = dtypes.float32 if op in float_ops else dtypes.bool
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtype] * 5)
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:29,代码来源:control_flow_ops_test.py
示例12: testLargeCase
def testLargeCase(self):
shape = [32, 512, 256, 1]
predictions = random_ops.random_uniform(
shape, 0.0, 1.0, dtype=dtypes_lib.float32)
labels = math_ops.greater(random_ops.random_uniform(shape, 0.0, 1.0), 0.5)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions, num_thresholds=201)
# Run many updates, enough to cause highly inaccurate values if the
# code used float32 for accumulation.
num_updates = 71
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_updates):
sess.run(update_op)
prdata = sess.run(result)
# Since we use random values, we won't know the tp/fp/tn/fn values, but
# tp and fp at threshold 0 should be the total number of positive and
# negative labels, hence their sum should be total number of pixels.
expected_value = 1.0 * np.product(shape) * num_updates
got_value = prdata.tp[0] + prdata.fp[0]
# They should be at least within 1.
self.assertNear(got_value, expected_value, 1.0)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:26,代码来源:metric_ops_large_test.py
示例13: testGradientFloat16
def testGradientFloat16(self):
with self.test_session(use_gpu=True) as sess:
# Randomly construct a 1D shape from [1, 40)
shape = random_ops.random_uniform(
[1], minval=1, maxval=40, dtype=dtypes.int32)
# Construct the fp32 graph and its gradient.
x = random_ops.random_uniform(shape, minval=-1, maxval=1, name="x")
y1 = nn_ops.relu(x, name="relu_fp32")
l1 = nn_ops.l2_loss(y1)
dx_f32 = gradients_impl.gradients(l1, x)
# Construct the fp16 graph and its gradient.
# It starts with the same x, in fp32. But before it reaches Relu, it is
# cast into fp16. So during backprop, the gradient computation is in fp16.
x2 = math_ops.cast(x, dtype=dtypes.float16, name="cast")
y2 = nn_ops.relu(x2, name="relu_fp16")
l2 = nn_ops.l2_loss(y2)
dx_f16 = gradients_impl.gradients(l2, x)
# Repeat the experiment for 100 times. All tensor shapes and its tensor
# values are randomly generated for each run.
for _ in xrange(100):
dx_f32_v, dx_f16_v = sess.run([dx_f32, dx_f16])
self.assertAllClose(dx_f32_v, dx_f16_v, atol=3e-4)
开发者ID:HughKu,项目名称:tensorflow,代码行数:25,代码来源:relu_op_test.py
示例14: testVirtualCluster
def testVirtualCluster(self):
with ops.Graph().as_default() as g:
a = random_ops.random_uniform(shape=())
b = random_ops.random_uniform(shape=())
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
device_properties = device_properties_pb2.DeviceProperties(
type='GPU',
frequency=1000,
num_cores=60,
environment={
'architecture': '7'
})
named_device = device_properties_pb2.NamedDevice(
properties=device_properties, name='/GPU:0')
grappler_cluster = cluster.Cluster(devices=[named_device])
op_perfs, run_time, _ = grappler_cluster.MeasureCosts(grappler_item)
self.assertGreater(run_time, 0)
self.assertEqual(len(op_perfs), 15)
estimated_perf = grappler_cluster.EstimatePerformance(named_device)
self.assertEqual(7680.0, estimated_perf)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:25,代码来源:cluster_test.py
示例15: testCustomGrad
def testCustomGrad(self):
def fn(a, b, c):
return core_layers.dense(a, 10, use_bias=False) + math_ops.matmul(b, c)
def grad_fn(inputs, trainable_variables, unused_outputs,
unused_grad_outputs):
grad_inputs = [
array_ops.ones_like(t) * (i + 1.) for i, t in enumerate(inputs)
]
grad_vars = [
array_ops.ones_like(t) * (i + len(inputs) + 1.)
for i, t in enumerate(trainable_variables)
]
return grad_inputs, grad_vars
a = random_ops.random_uniform([11, 6])
b = random_ops.random_uniform([11, 7])
c = random_ops.random_uniform([7, 10])
w = random_ops.random_uniform([6, 10])
out = rev_block_lib._fn_with_custom_grad(grad_fn)(fn)(a, b, c)
loss = math_ops.reduce_mean(out)
grads = gradients_impl.gradients(
loss, [a, b, c, variables.trainable_variables()[0]])
expected_grads = [
array_ops.ones_like(t) * (i + 1.) for i, t in enumerate([a, b, c, w])
]
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
g_val, eg_val = sess.run([grads, expected_grads])
for g1, g2 in zip(g_val, eg_val):
self.assertAllClose(g1, g2)
开发者ID:bikong2,项目名称:tensorflow,代码行数:32,代码来源:rev_block_lib_test.py
示例16: make_relaxed_categorical
def make_relaxed_categorical(batch_shape, num_classes, dtype=dtypes.float32):
logits = random_ops.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtype) - 50.
temperatures = random_ops.random_uniform(
list(batch_shape), 0.1, 10, dtype=dtypes.float32)
return relaxed_onehot_categorical.RelaxedOneHotCategorical(
temperatures, logits, dtype=dtype)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:relaxed_onehot_categorical_test.py
示例17: testInit
def testInit(self):
with tf_ops.Graph().as_default():
inputs = random_ops.random_uniform(
[self.batch_size, self.height, self.width, self.in_channels])
outputs_grads = [
random_ops.random_uniform([
self.batch_size, self.height // self.strides[1],
self.width // self.strides[2], self.out_channels
]) for _ in range(3)
]
factor = ff.ConvDiagonalFactor(
inputs,
outputs_grads,
self.kernel_shape,
self.strides,
self.padding,
data_format=self.data_format)
factor.instantiate_cov_variables()
# Ensure covariance matrix's shape makes sense.
self.assertEqual([
self.kernel_height * self.kernel_width * self.in_channels,
self.out_channels
],
factor.get_cov_var().shape.as_list())
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:26,代码来源:fisher_factors_test.py
示例18: build_graph
def build_graph(device, n, m, k, transpose_a, transpose_b, dtype):
"""Build a graph containing a sequence of matmul operations.
Args:
device: String, the device to run on.
n: tensor A's first dimension size.
m: tensor A's second dimension size.
k: tensor B's second dimension size.
transpose_a: boolean value to show if tensor A is transposed.
transpose_b: boolean value to show if tensor B is transposed.
dtype: numpy data type of the input tensor.
Returns:
A matmul operation to run()
"""
with ops.device('%s' % device):
if not transpose_a:
x = variables.VariableV1(random_ops.random_uniform([n, m], dtype=dtype),
use_resource=False)
else:
x = variables.VariableV1(random_ops.random_uniform([m, n], dtype=dtype),
use_resource=False)
if not transpose_b:
y = variables.VariableV1(random_ops.random_uniform([m, k], dtype=dtype),
use_resource=False)
else:
y = variables.VariableV1(random_ops.random_uniform([k, m], dtype=dtype),
use_resource=False)
z = math_ops.matmul(x, y, transpose_a=transpose_a, transpose_b=transpose_b)
return control_flow_ops.group(z)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:31,代码来源:matmul_benchmark.py
示例19: testExtractPointwiseConv2dPatches
def testExtractPointwiseConv2dPatches(self):
with ops.Graph().as_default(), self.test_session() as sess:
batch_size = 10
image_height = image_width = 8
in_channels = out_channels = 3
kernel_height = kernel_width = 1
strides = [1, 1, 1, 1]
padding = 'VALID'
images = random_ops.random_uniform(
[batch_size, image_height, image_width, in_channels], seed=0)
kernel_shape = [kernel_height, kernel_width, in_channels, out_channels]
kernel = random_ops.random_uniform(kernel_shape, seed=1)
# Ensure shape matches expectation.
patches = utils.extract_pointwise_conv2d_patches(images, kernel_shape)
self.assertEqual(patches.shape.as_list(), [
batch_size, image_height, image_width, kernel_height, kernel_width,
in_channels
])
# Ensure extract...patches() + matmul() and conv2d() implementation
# give the same answer.
outputs = nn_ops.conv2d(images, kernel, strides, padding)
patches_flat = array_ops.reshape(
patches, [-1, kernel_height * kernel_width * in_channels])
kernel_flat = array_ops.reshape(kernel, [-1, out_channels])
outputs_flat = math_ops.matmul(patches_flat, kernel_flat)
outputs_, outputs_flat_ = sess.run([outputs, outputs_flat])
self.assertAllClose(outputs_.flatten(), outputs_flat_.flatten())
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:32,代码来源:utils_test.py
示例20: testAttentionCellWrapperCorrectResult
def testAttentionCellWrapperCorrectResult(self):
num_units = 4
attn_length = 6
batch_size = 2
expected_output = np.array(
[[1.068372, 0.45496, -0.678277, 0.340538],
[1.018088, 0.378983, -0.572179, 0.268591]],
dtype=np.float32)
expected_state = np.array(
[[0.74946702, 0.34681597, 0.26474735, 1.06485605, 0.38465962,
0.11420801, 0.10272158, 0.30925757, 0.63899988, 0.7181077,
0.47534478, 0.33715725, 0.58086717, 0.49446869, 0.7641536,
0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063,
0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228,
0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432,
0.99211812, 0.12295902, 1.14606023, 0.34370938, -0.79251152,
0.51843399],
[0.5179342, 0.48682183, -0.25426468, 0.96810579, 0.28809637,
0.13607743, -0.11446252, 0.26792109, 0.78047138, 0.63460857,
0.49122369, 0.52007174, 0.73000264, 0.66986895, 0.73576689,
0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957,
0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421,
0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971,
0.36127412, 0.12125921, 1.1362772, 0.34361625, -0.78150457,
0.70582712]],
dtype=np.float32)
seed = 12345
random_seed.set_random_seed(seed)
for state_is_tuple in [False, True]:
with session.Session() as sess:
with variable_scope.variable_scope(
"state_is_tuple", reuse=state_is_tuple,
initializer=init_ops.glorot_uniform_initializer()):
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
zeros1 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 1)
zeros2 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 2)
zeros3 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 3)
attn_state_zeros = random_ops.random_uniform(
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
zero_state = array_ops.concat([
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
], 1)
inputs = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 5)
output, state = cell(inputs, zero_state)
if state_is_tuple:
state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
sess.run(variables.global_variables_initializer())
self.assertAllClose(sess.run(output), expected_output)
self.assertAllClose(sess.run(state), expected_state)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:59,代码来源:rnn_cell_test.py
注:本文中的tensorflow.python.ops.random_ops.random_uniform函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论