本文整理汇总了Python中tensorflow.global_variables_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python global_variables_initializer函数的具体用法?Python global_variables_initializer怎么用?Python global_variables_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了global_variables_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testAdagradDAWithL1
def testAdagradDAWithL1(self):
for dtype in [tf.float64, tf.float32]:
with self.test_session() as sess:
global_step = tf.Variable(0, dtype=tf.int64)
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
tf.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run a step of AdagradDA
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.895489, -1.59555]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.085339, -0.17989]), v1_val)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:31,代码来源:adagrad_da_test.py
示例2: testYesShuffle
def testYesShuffle(self):
id_source = rs.ReaderSource(reader_cls=tf.IdentityReader,
work_units=self.work_units,
batch_size=1,
shuffle=True,
num_threads=10,
seed=1234)
index_column, value_column = id_source()
cache = {}
index_tensor = index_column.build(cache)
value_tensor = value_column.build(cache)
self.assertEqual([1], index_tensor.get_shape().as_list())
self.assertEqual([1], value_tensor.get_shape().as_list())
seen = set([])
with self.test_session() as sess:
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(500):
index, value = sess.run([index_tensor, value_tensor])
self.assertEqual(index, value)
self.assertNotIn(int(value[0]), seen)
seen.add(int(value[0]))
coord.request_stop()
coord.join(threads)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:25,代码来源:reader_source_test.py
示例3: init_training_graph
def init_training_graph(self):
with tf.name_scope('Evaluation'):
# self.logits = self.conv_layer_f(self.last, self.logits_weight, strides=[1,1,1,1], scope_name="logits/")
with tf.name_scope("logits/"):
self.logits2 = tf.nn.conv2d(self.last, self.logits_weight, strides=[1,1,1,1], padding="VALID")
self.logits = tf.nn.bias_add(self.logits2, self.logits_biases)
self.predictions = self.logits
#self.predictions = tf.squeeze(self.logits, [3])
#softmax = tf.nn.softmax(self.logits)
#print softmax.get_shape()
#self.predictions = tf.slice(softmax, [0, 0, 0, 0], [-1, -1, -1, 1])
with tf.name_scope('Loss'):
self.loss = tf.reduce_mean(tf.losses.mean_squared_error(self.logits, self.train_labels_node))
#self.loss = tf.reduce_mean(tf.losses.mean_squared_error(self.predictions, self.train_labels_node))
tf.summary.scalar("mean_squared_error", self.loss)
self.predictions = tf.squeeze(self.predictions, [3])
self.train_prediction = self.predictions
self.test_prediction = self.predictions
tf.global_variables_initializer().run()
print('Computational graph initialised')
开发者ID:PeterJackNaylor,项目名称:PhD_Fabien,代码行数:25,代码来源:UNet_Normalized.py
示例4: testRasterScanKernel
def testRasterScanKernel(self):
kernel_size = 5
input_depth = 1
output_depth = 1
kernel_shape = [kernel_size, kernel_size, input_depth, output_depth]
# pylint: disable=bad-whitespace
kernel_feed = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0],
[16.0, 17.0, 18.0, 19.0, 20.0],
[21.0, 22.0, 23.0, 24.0, 25.0]]
kernel_feed = np.reshape(kernel_feed, kernel_shape)
kernel_expected = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0]]
kernel_expected = np.reshape(kernel_expected, kernel_shape)
# pylint: enable=bad-whitespace
init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s)
masked_conv2d = blocks_masked_conv2d.RasterScanConv2D(
output_depth, [kernel_size] * 2, [1] * 2, 'SAME',
initializer=init_kernel)
x = tf.placeholder(dtype=tf.float32, shape=[10] * 3 + [input_depth])
_ = masked_conv2d(x)
with self.test_session():
tf.global_variables_initializer().run()
kernel_value = masked_conv2d._kernel.eval()
self.assertAllEqual(kernel_expected, kernel_value)
开发者ID:Hukongtao,项目名称:models,代码行数:33,代码来源:blocks_masked_conv2d_test.py
示例5: basic_operation
def basic_operation():
v1 = tf.Variable(10)
v2 = tf.Variable(5)
addv = v1 + v2
print(addv)
print(type(addv))
print(type(v1))
c1 = tf.constant(10)
c2 = tf.constant(5)
addc = c1 + c2
print(addc)
print(type(addc))
print(type(c1))
# 用来运行计算图谱的对象/实例?
# session is a runtime
sess = tf.Session()
# Variable -> 初始化 -> 有值的Tensor
tf.global_variables_initializer().run(session=sess)
print('变量是需要初始化的')
print('加法(v1, v2) = ', addv.eval(session=sess))
print('加法(v1, v2) = ', sess.run(addv))
print('加法(c1, c2) = ', addc.eval(session=sess))
开发者ID:benjaminhuanghuang,项目名称:ml_playground,代码行数:26,代码来源:basic.py
示例6: testMultipleDequeue
def testMultipleDequeue(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 4
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = tf.random_normal([image_size, image_size, 3], dtype=tf.float32, name="images")
label = tf.random_uniform([1], 0, 10, dtype=tf.int32, name="labels")
batches = tf.train.batch([counter, image, label], batch_size=batch_size, num_threads=4)
batcher = slim.prefetch_queue.prefetch_queue(batches)
batches_list = [batcher.dequeue() for _ in range(2)]
tf.global_variables_initializer().run()
threads = tf.train.start_queue_runners()
value_counter = []
for _ in range(int(num_batches / 2)):
for batches in batches_list:
results = sess.run(batches)
value_counter.append(results[0])
self.assertEquals(results[1].shape, (batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
self.assertAllEqual(np.sort(np.concatenate(value_counter)), np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
开发者ID:brchiu,项目名称:tensorflow,代码行数:35,代码来源:prefetch_queue_test.py
示例7: train
def train(data_dir, checkpoint_path, config):
"""Trains the model with the given data
Args:
data_dir: path to the data for the model (see data_utils for data
format)
checkpoint_path: the path to save the trained model checkpoints
config: one of the above configs that specify the model and how it
should be run and trained
Returns:
None
"""
# Prepare Name data.
print("Reading Name data in %s" % data_dir)
names, counts = data_utils.read_names(data_dir)
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = NamignizerModel(is_training=True, config=config)
tf.global_variables_initializer().run()
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, names, counts, config.epoch_size, m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" %
(i + 1, train_perplexity))
m.saver.save(session, checkpoint_path, global_step=i)
开发者ID:ALISCIFP,项目名称:models,代码行数:35,代码来源:names.py
示例8: testDenseFeaturesSeparableWithinMargins
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:29,代码来源:sdca_ops_test.py
示例9: testDenseFeaturesWeightedExamples
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:31,代码来源:sdca_ops_test.py
示例10: testDenseFeaturesWithArbitraryWeights
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0/3],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:32,代码来源:sdca_ops_test.py
示例11: testDenseFeaturesPerfectlySeparable
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:31,代码来源:sdca_ops_test.py
示例12: testDenseFeaturesWithDefaultWeights
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:30,代码来源:sdca_ops_test.py
示例13: testL1Regularization
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:34,代码来源:sdca_ops_test.py
示例14: testFractionalExampleLabel
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0.1),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:25,代码来源:sdca_ops_test.py
示例15: testMultiLabelWithCenteredBias
def testMultiLabelWithCenteredBias(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, enable_centered_bias=True,
metric_class_ids=range(n_classes))
with tf.Graph().as_default(), tf.Session():
logits = tf.constant([[1., 0., 0.]])
labels = tf.constant([[0, 0, 1]])
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
_assert_variables(self, expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",
), expected_trainable=(
"centered_bias_weight:0",
))
tf.global_variables_initializer().run()
_assert_summary_tags(self, ["loss",
"centered_bias/bias_0",
"centered_bias/bias_1",
"centered_bias/bias_2"])
expected_loss = .89985204
_assert_metrics(
self, expected_loss, self._expected_eval_metrics(expected_loss),
model_fn_ops)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:26,代码来源:head_test.py
示例16: testGradientsAsVariables
def testGradientsAsVariables(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
sgd_op = tf.train.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(cost, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
tf.Variable(tf.zeros([2], dtype)) for i in grads_and_vars
]
convert_ops = [
tf.assign(converted_grads[i], gv[0])
for i, gv in enumerate(grads_and_vars)
]
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd_op.apply_gradients(converted_grads_and_vars, global_step)
tf.global_variables_initializer().run()
# Run convert_ops to achieve the gradietns converting
sess.run(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], var0.eval())
self.assertAllClose([-6., -5.], var1.eval())
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:32,代码来源:optimizer_test.py
示例17: testOneThread
def testOneThread(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = tf.random_normal([image_size, image_size, 3], dtype=tf.float32, name="images")
label = tf.random_uniform([1], 0, 10, dtype=tf.int32, name="labels")
batches = tf.train.batch([counter, image, label], batch_size=batch_size, num_threads=1)
batches = slim.prefetch_queue.prefetch_queue(batches).dequeue()
tf.global_variables_initializer().run()
threads = tf.train.start_queue_runners()
for i in range(num_batches):
results = sess.run(batches)
self.assertAllEqual(results[0], np.arange(i * batch_size, (i + 1) * batch_size))
self.assertEquals(results[1].shape, (batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
开发者ID:brchiu,项目名称:tensorflow,代码行数:31,代码来源:prefetch_queue_test.py
示例18: testBasicLSTMCell
def testBasicLSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 8])
g, out_m = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)] * 2,
state_is_tuple=False)(x, m)
sess.run([tf.global_variables_initializer()])
res = sess.run([g, out_m], {x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem = np.array([[0.68967271, 0.68967271,
0.44848421, 0.44848421,
0.39897051, 0.39897051,
0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem)
with tf.variable_scope("other", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3]) # Test BasicLSTMCell with input_size != num_units.
m = tf.zeros([1, 4])
g, out_m = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)(x, m)
sess.run([tf.global_variables_initializer()])
res = sess.run([g, out_m], {x.name: np.array([[1., 1., 1.]]),
m.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 2)
开发者ID:brchiu,项目名称:tensorflow,代码行数:27,代码来源:rnn_cell_test.py
示例19: benchmark_create_1000_partitions_with_100_parameter_servers
def benchmark_create_1000_partitions_with_100_parameter_servers(self):
workers, _ = create_local_cluster(num_workers=1, num_ps=100)
worker_sessions = [tf.Session(w.target) for w in workers]
worker = worker_sessions[0]
partition_sizes = (1, 512, 1024*32, 1024*128)
partitioned = []
for partition_size in partition_sizes:
# max_shard_bytes is 4, shape is 1000*partition_size float32s which should
# partition into 1000 shards, each containing partition_size float32s.
print("Building partitioned variable with %d floats per partition"
% partition_size)
with tf.device(tf.train.replica_device_setter(ps_tasks=100)):
partitioned_ix = tf.get_variable(
"partitioned_%d" % partition_size,
shape=[1000 * partition_size],
dtype=tf.float32,
# Each partition to have exactly N float32s
partitioner=tf.variable_axis_size_partitioner(
max_shard_bytes=4 * partition_size))
# Concatenates along axis 0
partitioned.append(tf.convert_to_tensor(partitioned_ix))
tf.global_variables_initializer().run(session=worker)
for ix, partition_size in enumerate(partition_sizes):
print("Running benchmark having partitions with %d floats"
% partition_size)
self.run_op_benchmark(
worker,
partitioned[ix],
name=("read_concat_1000_partitions_from_"
"100_parameter_servers_partsize_%d_floats" % partition_size))
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:34,代码来源:localhost_cluster_performance_test.py
示例20: testVariableCopy
def testVariableCopy(self):
with graph1.as_default():
#Define a Variable in graph1
some_var = tf.Variable(2)
#Initialize session
sess1 = tf.Session()
#Initialize the Variable
tf.global_variables_initializer().run(session=sess1)
#Make a copy of some_var in the defsult scope in graph2
copy1 = tf.contrib.copy_graph.copy_variable_to_graph(
some_var, graph2)
#Make another copy with different scope
copy2 = tf.contrib.copy_graph.copy_variable_to_graph(
some_var, graph2, "test_scope")
#Initialize both the copies
with graph2.as_default():
#Initialize Session
sess2 = tf.Session()
#Initialize the Variables
tf.global_variables_initializer().run(session=sess2)
#Ensure values in all three variables are the same
v1 = some_var.eval(session=sess1)
v2 = copy1.eval(session=sess2)
v3 = copy2.eval(session=sess2)
assert isinstance(copy1, tf.Variable)
assert isinstance(copy2, tf.Variable)
assert v1 == v2 == v3 == 2
开发者ID:moolighty,项目名称:tensorflow,代码行数:33,代码来源:copy_test.py
注:本文中的tensorflow.global_variables_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论