本文整理汇总了Python中tensorflow.zeros函数的典型用法代码示例。如果您正苦于以下问题:Python zeros函数的具体用法?Python zeros怎么用?Python zeros使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zeros函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: make_variable_dict
def make_variable_dict(max_age, max_gender):
# TODO(sibyl-toe9oF2e): Figure out how to derive max_age & max_gender from
# examples_dict.
age_weights = tf.Variable(tf.zeros([max_age + 1], dtype=tf.float32))
gender_weights = tf.Variable(tf.zeros([max_gender + 1], dtype=tf.float32))
return dict(sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:7,代码来源:sdca_ops_test.py
示例2: __init__
def __init__(self, dim_image, n_words, dim_hidden, batch_size, n_lstm_steps, drop_out_rate, bias_init_vector=None):
self.dim_image = dim_image
self.n_words = n_words
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.drop_out_rate = drop_out_rate
with tf.device("/cpu:0"):
self.Wemb = tf.Variable(tf.random_uniform([n_words, dim_hidden], -0.1, 0.1), name='Wemb')
#self.Wemb_W = tf.Variable(tf.random_uniform([n_words, dim_hidden], -0.1, 0.1), name='Wemb_W')
#self.Wemb_b = tf.Variable(tf.random_uniform([dim_hidden], -0.1, 0.1), name='Wemb_b')
#self.lstm3 = rnn_cell.BasicLSTMCell(dim_hidden)
self.lstm3 = rnn_cell.LSTMCell(self.dim_hidden,2*self.dim_hidden,use_peepholes = True)
self.lstm3_dropout = rnn_cell.DropoutWrapper(self.lstm3,output_keep_prob=1 - self.drop_out_rate)
self.encode_image_W = tf.Variable( tf.random_uniform([dim_image, dim_hidden], -0.1, 0.1), name='encode_image_W')
self.encode_image_b = tf.Variable( tf.zeros([dim_hidden]), name='encode_image_b')
self.embed_att_w = tf.Variable(tf.random_uniform([dim_hidden, 1], -0.1,0.1), name='embed_att_w')
self.embed_att_Wa = tf.Variable(tf.random_uniform([dim_hidden, dim_hidden], -0.1,0.1), name='embed_att_Wa')
self.embed_att_Ua = tf.Variable(tf.random_uniform([dim_hidden, dim_hidden],-0.1,0.1), name='embed_att_Ua')
self.embed_att_ba = tf.Variable( tf.zeros([dim_hidden]), name='embed_att_ba')
self.embed_word_W = tf.Variable(tf.random_uniform([dim_hidden, n_words], -0.1,0.1), name='embed_word_W')
if bias_init_vector is not None:
self.embed_word_b = tf.Variable(bias_init_vector.astype(np.float32), name='embed_word_b')
else:
self.embed_word_b = tf.Variable(tf.zeros([n_words]), name='embed_word_b')
self.embed_nn_Wp = tf.Variable(tf.random_uniform([3*dim_hidden, dim_hidden], -0.1,0.1), name='embed_nn_Wp')
self.embed_nn_bp = tf.Variable(tf.zeros([dim_hidden]), name='embed_nn_bp')
开发者ID:KuoHaoZeng,项目名称:VH,代码行数:32,代码来源:Att.py
示例3: testBlockGRUToGRUCellSingleStep
def testBlockGRUToGRUCellSingleStep(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
seed = 1994
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=seed)
# Inputs
x = tf.zeros([batch_size, input_size])
h = tf.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the basic GRU cell implementation.
with tf.variable_scope("basic", initializer=initializer):
output = tf.nn.rnn_cell.GRUCell(cell_size)(x, h)
sess.run([tf.initialize_all_variables()])
basic_res = sess.run([output], {x: x_value, h: h_value})
# Output from the block GRU cell implementation.
with tf.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([tf.initialize_all_variables()])
block_res = sess.run([output], {x: x_value, h: h_value})
self.assertEqual(len(block_res), len(basic_res))
for block, basic in zip(block_res, basic_res):
self.assertAllClose(block, basic)
开发者ID:damienmg,项目名称:tensorflow,代码行数:32,代码来源:gru_ops_test.py
示例4: __init__
def __init__(self, dim_image, n_words, dim_hidden, batch_size, n_lstm_steps, drop_out_rate, bias_init_vector=None):
self.dim_image = dim_image
self.n_words = n_words
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.drop_out_rate = drop_out_rate
with tf.device("/gpu:2"):
self.Wemb = tf.Variable(tf.random_uniform([n_words, dim_hidden], -0.1, 0.1), name='Wemb')
# self.lstm1 = rnn_cell.BasicLSTMCell(dim_hidden)
# self.lstm2 = rnn_cell.BasicLSTMCell(dim_hidden)
self.lstm1 = rnn_cell.LSTMCell(self.dim_hidden,self.dim_hidden,use_peepholes = True)
self.lstm1_dropout = rnn_cell.DropoutWrapper(self.lstm1,output_keep_prob=1 - self.drop_out_rate)
self.lstm2 = rnn_cell.LSTMCell(self.dim_hidden,self.dim_hidden,use_peepholes = True)
self.lstm2_dropout = rnn_cell.DropoutWrapper(self.lstm2,output_keep_prob=1 - self.drop_out_rate)
# W is Weight, b is Bias
self.encode_image_W = tf.Variable( tf.random_uniform([dim_image, dim_hidden], -0.1, 0.1), name='encode_image_W')
self.encode_image_b = tf.Variable( tf.zeros([dim_hidden]), name='encode_image_b')
self.embed_word_W = tf.Variable(tf.random_uniform([dim_hidden, n_words], -0.1,0.1), name='embed_word_W')
if bias_init_vector is not None:
self.embed_word_b = tf.Variable(bias_init_vector.astype(np.float32), name='embed_word_b')
else:
self.embed_word_b = tf.Variable(tf.zeros([n_words]), name='embed_word_b')
开发者ID:meteora9479,项目名称:video_to_sequence,代码行数:30,代码来源:msrvtt_model.py
示例5: get_idx_map
def get_idx_map(shape):
"""Get index map for a image.
Args:
shape: [B, T, H, W] or [B, H, W]
Returns:
idx: [B, T, H, W, 2], or [B, H, W, 2]
"""
s = shape
ndims = tf.shape(s)
wdim = ndims - 1
hdim = ndims - 2
idx_shape = tf.concat(0, [s, tf.constant([1])])
ones_h = tf.ones(hdim - 1, dtype='int32')
ones_w = tf.ones(wdim - 1, dtype='int32')
h_shape = tf.concat(0, [ones_h, tf.constant([-1]), tf.constant([1, 1])])
w_shape = tf.concat(0, [ones_w, tf.constant([-1]), tf.constant([1])])
idx_y = tf.zeros(idx_shape, dtype='float')
idx_x = tf.zeros(idx_shape, dtype='float')
h = tf.slice(s, ndims - 2, [1])
w = tf.slice(s, ndims - 1, [1])
idx_y += tf.reshape(tf.to_float(tf.range(h[0])), h_shape)
idx_x += tf.reshape(tf.to_float(tf.range(w[0])), w_shape)
idx = tf.concat(ndims[0], [idx_y, idx_x])
return idx
开发者ID:renmengye,项目名称:deep-tracker,代码行数:27,代码来源:build_deep_tracker.py
示例6: main
def main():
sess = tf.Session()
# 2進数3ビットから10進数
x = tf.placeholder(tf.float32, [None, 3])
w = tf.Variable(tf.zeros([3, 8]))
b = tf.Variable(tf.zeros([8]))
y = tf.nn.softmax(tf.matmul(x, w) + b)
y_ = tf.placeholder(tf.float32, [None, 8])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(cross_entropy)
sess.run(tf.initialize_all_variables())
for i in range(1000):
train_step.run({x: [[0, 0, 0]], y_: [[1, 0, 0, 0, 0, 0, 0, 0]]}, session=sess)
train_step.run({x: [[1, 0, 0]], y_: [[0, 1, 0, 0, 0, 0, 0, 0]]}, session=sess)
train_step.run({x: [[0, 1, 0]], y_: [[0, 0, 1, 0, 0, 0, 0, 0]]}, session=sess)
train_step.run({x: [[1, 1, 0]], y_: [[0, 0, 0, 1, 0, 0, 0, 0]]}, session=sess)
train_step.run({x: [[0, 0, 1]], y_: [[0, 0, 0, 0, 1, 0, 0, 0]]}, session=sess)
train_step.run({x: [[1, 0, 1]], y_: [[0, 0, 0, 0, 0, 1, 0, 0]]}, session=sess)
train_step.run({x: [[0, 1, 1]], y_: [[0, 0, 0, 0, 0, 0, 1, 0]]}, session=sess)
train_step.run({x: [[1, 1, 1]], y_: [[0, 0, 0, 0, 0, 0, 0, 1]]}, session=sess)
## 1に近い予測があってるか 平均
#correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
#accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(y, feed_dict={x: [[0, 0, 0]]}))
print(sess.run(y, feed_dict={x: [[1, 0, 0]]}))
print(sess.run(y, feed_dict={x: [[0, 1, 0]]}))
print(sess.run(y, feed_dict={x: [[1, 1, 0]]}))
print(sess.run(y, feed_dict={x: [[0, 0, 1]]}))
return 0
开发者ID:octaltree,项目名称:tensorFlowTest,代码行数:35,代码来源:binary.py
示例7: testDiscretizedMixLogisticLoss
def testDiscretizedMixLogisticLoss(self):
batch = 2
height = 4
width = 4
channels = 3
num_mixtures = 5
logits = tf.concat( # assign all probability mass to first component
[tf.ones([batch, height, width, 1]) * 1e8,
tf.zeros([batch, height, width, num_mixtures - 1])],
axis=-1)
locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
minval=-.9, maxval=.9)
log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3],
minval=-1., maxval=1.)
coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)
# Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.
labels = tf.random_uniform([batch, height, width, channels],
minval=-.9, maxval=.9)
locs_0 = locs[..., :3]
log_scales_0 = log_scales[..., :3]
centered_labels = labels - locs_0
inv_stdv = tf.exp(-log_scales_0)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1)
actual_loss = common_layers.discretized_mix_logistic_loss(
pred=pred, labels=labels)
actual_loss_val, expected_loss_val = self.evaluate(
[actual_loss, expected_loss])
self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:35,代码来源:common_layers_test.py
示例8: inference
def inference(images,hidden1_units,hidden2_units):
"""建立前馈神经网络模型
Args:
images:输入图像数据
hidden1_units:第一个隐藏层的神经元数目
hidden2_units:第二个隐藏层 的神经元数目
returns:
softmax_linear:输出张量为计算后的结果
"""
#隐藏层1
with tf.name_scope('hidden1'):
weights = tf.Variable(tf.truncated_normal([IMAGE_PIXELS,hidden1_units],stddev=1.0/math.sqrt(float(IMAGE_PIXELS))),name='weights')#?
biases = tf.Variable(tf.zeros([hidden1_units]),name='biases')
hidden1 = tf.nn.relu(tf.matmul(images,weights)+biases)
#隐藏层2
with tf.name_scope('hidden2'):
weights = tf.Variable(tf.truncated_normal([hidden1_units,hidden2_units],stddev=1.0/math.sqrt(float(hidden1_units))),name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1,weights)+biases)
#线性输出层
with tf.name_scope('softmax_linear'):
weights = tf.Variable(tf.truncated_normal([hidden2_units,NUM_CLASSES]),name='biases')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),name='biases')
logits = tf.matmul(hidden2,weights) + biases
return logits
开发者ID:rickyall,项目名称:tensorflow,代码行数:26,代码来源:MNIST_FFNN.py
示例9: testBasicLSTMCellWithStateTuple
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m0 = tf.zeros([1, 4])
m1 = tf.zeros([1, 4])
cell = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(2)] * 2, state_is_tuple=True)
g, (out_m0, out_m1) = cell(x, (m0, m1))
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m0, out_m1],
{x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 4]),
m1.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 3)
# The numbers in results were not calculated, this is just a smoke test.
# Note, however, these values should match the original
# version having state_is_tuple=False.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem0 = np.array([[0.68967271, 0.68967271,
0.44848421, 0.44848421]])
expected_mem1 = np.array([[0.39897051, 0.39897051,
0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:25,代码来源:rnn_cell_test.py
示例10: testBasicLSTMCellStateTupleType
def testBasicLSTMCellStateTupleType(self):
with self.test_session():
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m0 = (tf.zeros([1, 2]),) * 2
m1 = (tf.zeros([1, 2]),) * 2
cell = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(2)] * 2,
state_is_tuple=True)
self.assertTrue(isinstance(cell.state_size, tuple))
self.assertTrue(isinstance(cell.state_size[0],
tf.nn.rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(cell.state_size[1],
tf.nn.rnn_cell.LSTMStateTuple))
# Pass in regular tuples
_, (out_m0, out_m1) = cell(x, (m0, m1))
self.assertTrue(isinstance(out_m0,
tf.nn.rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(out_m1,
tf.nn.rnn_cell.LSTMStateTuple))
# Pass in LSTMStateTuples
tf.get_variable_scope().reuse_variables()
zero_state = cell.zero_state(1, tf.float32)
self.assertTrue(isinstance(zero_state, tuple))
self.assertTrue(isinstance(zero_state[0],
tf.nn.rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(zero_state[1],
tf.nn.rnn_cell.LSTMStateTuple))
_, (out_m0, out_m1) = cell(x, zero_state)
self.assertTrue(
isinstance(out_m0, tf.nn.rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(out_m1, tf.nn.rnn_cell.LSTMStateTuple))
开发者ID:brchiu,项目名称:tensorflow,代码行数:35,代码来源:rnn_cell_test.py
示例11: __init__
def __init__(self, name, input_size, output_size):
with tf.name_scope("rbm_" + name):
self.weights = tf.Variable(
tf.truncated_normal([input_size, output_size],
stddev=1.0 / math.sqrt(float(input_size))), name="weights")
self.v_bias = tf.Variable(tf.zeros([input_size]), name="v_bias")
self.h_bias = tf.Variable(tf.zeros([output_size]), name="h_bias")
开发者ID:btpeter,项目名称:DeepLearning4Medical,代码行数:7,代码来源:RBM_tensorflow.py
示例12: testGradientsAsVariables
def testGradientsAsVariables(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
sgd_op = tf.train.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(cost, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
tf.Variable(tf.zeros([2], dtype)) for i in grads_and_vars
]
convert_ops = [
tf.assign(converted_grads[i], gv[0])
for i, gv in enumerate(grads_and_vars)
]
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd_op.apply_gradients(converted_grads_and_vars, global_step)
tf.global_variables_initializer().run()
# Run convert_ops to achieve the gradietns converting
sess.run(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], var0.eval())
self.assertAllClose([-6., -5.], var1.eval())
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:32,代码来源:optimizer_test.py
示例13: model
def model(images, inits, num_iterations=4, num_patches=68, patch_shape=(24, 24), num_channels=3):
batch_size = images.get_shape().as_list()[0]
hidden_state = tf.zeros((batch_size, 512))
dx = tf.zeros((batch_size, num_patches, 2))
endpoints = {}
dxs = []
for step in range(num_iterations):
with tf.device('/cpu:0'):
patches = tf.image.extract_patches(images, tf.constant(patch_shape), inits+dx)
patches = tf.reshape(patches, (batch_size * num_patches, patch_shape[0], patch_shape[1], num_channels))
endpoints['patches'] = patches
with tf.variable_scope('convnet', reuse=step>0):
net = conv_model(patches)
ims = net['concat']
ims = tf.reshape(ims, (batch_size, -1))
with tf.variable_scope('rnn', reuse=step>0) as scope:
hidden_state = slim.ops.fc(tf.concat(1, [ims, hidden_state]), 512, activation=tf.tanh)
prediction = slim.ops.fc(hidden_state, num_patches * 2, scope='pred', activation=None)
endpoints['prediction'] = prediction
prediction = tf.reshape(prediction, (batch_size, num_patches, 2))
dx += prediction
dxs.append(dx)
return inits + dx, dxs, endpoints
开发者ID:tdeboissiere,项目名称:mdm,代码行数:28,代码来源:mdm_model.py
示例14: testCompatibleNames
def testCompatibleNames(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()):
cell = tf.nn.rnn_cell.LSTMCell(10)
pcell = tf.nn.rnn_cell.LSTMCell(10, use_peepholes=True)
inputs = [tf.zeros([4, 5])] * 6
tf.nn.rnn(cell, inputs, dtype=tf.float32, scope="basic")
tf.nn.rnn(pcell, inputs, dtype=tf.float32, scope="peephole")
basic_names = {v.name: v.get_shape() for v in tf.trainable_variables()}
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()):
cell = tf.contrib.rnn.LSTMBlockCell(10, use_compatible_names=True)
pcell = tf.contrib.rnn.LSTMBlockCell(
10, use_peephole=True, use_compatible_names=True)
inputs = [tf.zeros([4, 5])] * 6
tf.nn.rnn(cell, inputs, dtype=tf.float32, scope="basic")
tf.nn.rnn(pcell, inputs, dtype=tf.float32, scope="peephole")
block_names = {v.name: v.get_shape() for v in tf.trainable_variables()}
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()):
cell = tf.contrib.rnn.LSTMBlockFusedCell(10)
pcell = tf.contrib.rnn.LSTMBlockFusedCell(10, use_peephole=True)
inputs = [tf.zeros([4, 5])] * 6
cell(inputs, dtype=tf.float32, scope="basic/LSTMCell")
pcell(inputs, dtype=tf.float32, scope="peephole/LSTMCell")
fused_names = {v.name: v.get_shape() for v in tf.trainable_variables()}
self.assertEqual(basic_names, block_names)
self.assertEqual(basic_names, fused_names)
开发者ID:brchiu,项目名称:tensorflow,代码行数:28,代码来源:lstm_ops_test.py
示例15: autoencoder_contd
def autoencoder_contd(input_dim, representation):
x = tf.placeholder(tf.float32, [None, input_dim]);
high_decW=tf.Variable(
initial_value=tf.random_normal(
[representation,input_dim],
-math.sqrt(6.0/(input_dim+representation)),
math.sqrt(6.0/(input_dim+representation))),
dtype=tf.float32,
name='high_decW');
# high_encW=tf.transpose(high_decW);
high_encW=tf.Variable(
initial_value=tf.random_normal(
[input_dim, representation],
-math.sqrt(6.0/(input_dim+representation)),
math.sqrt(6.0/(input_dim+representation))),
name='high_encW');
high_encb=tf.Variable(tf.zeros([representation]),
name='high_encb');
z=tf.nn.sigmoid(tf.matmul(x,high_encW) + high_encb);
hidden_weights=high_encW;
high_decb=tf.Variable(
tf.zeros([input_dim]),
name='high_decb');
y=tf.nn.sigmoid(tf.matmul(z,high_decW)+high_decb);
cost=tf.nn.l2_loss(x-y);
loss_per_pixel=tf.reduce_mean(tf.abs(x-y));
return {'x':x,'z':z,'y':y,'cost':cost,
'weights':hidden_weights,
'encW':high_encW,'decW':high_decW,
'encb':high_encb,'decb':high_decb,
'ppx':loss_per_pixel
};
开发者ID:manic-milos,项目名称:Autoencoders,代码行数:33,代码来源:upscaling_ae_def.py
示例16: testLSTMCell
def testLSTMCell(self):
with self.test_session() as sess:
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([batch_size, input_size])
m = tf.zeros([batch_size, state_size])
output, state = tf.nn.rnn_cell.LSTMCell(
num_units=num_units, input_size=input_size,
num_proj=num_proj, forget_bias=1.0)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([output, state],
{x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1].shape, (batch_size, state_size))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:28,代码来源:rnn_cell_test.py
示例17: recover_feeling
def recover_feeling(checkpoint):
#设置变量
in_sentence = tf.placeholder(tf.float32, [None, 140])
weight = tf.Variable(tf.zeros([140, 6]))
biases = tf.Variable(tf.zeros([6]))
global_step = tf.Variable(0, name='global_step', trainable=False)
#y = softmax(Wx + b)
y = tf.nn.softmax(tf.matmul(in_sentence, weight) + biases)
y_ = tf.placeholder(tf.float32, [None, 6])
sess = tf.InteractiveSession()
#恢复模型.
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
#读取模型完毕,加载词表
vocab, tmp_vocab = read_vocabulary('data/emotion/vocabulary.txt')
#把一些句子转化为vec的array
vec_list1 = convert_sentence_to_vec_list('你今天感觉怎么样', vocab, 140)
vec_list2 = convert_sentence_to_vec_list('高兴啊', vocab, 140)
vec_list_final = [np.array(vec_list1), np.array(vec_list2)]
print (vec_list_final)
#交给sess进行检查
data = np.array(vec_list_final)
result = sess.run(y, feed_dict={in_sentence: data})
print (result)
开发者ID:chibimiku,项目名称:InoueAoi,代码行数:30,代码来源:train_emotion.py
示例18: testMultiRNNCellWithStateTuple
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m_bad = tf.zeros([1, 4])
m_good = (tf.zeros([1, 2]), tf.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.GRUCell(2)] * 2, state_is_tuple=True)(x, m_bad)
_, ml = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.GRUCell(2)] * 2, state_is_tuple=True)(x, m_good)
sess.run([tf.initialize_all_variables()])
res = sess.run(ml, {x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
开发者ID:0-T-0,项目名称:tensorflow,代码行数:25,代码来源:rnn_cell_test.py
示例19: __init__
def __init__(self, n_input, n_latent, n_hidden_enc):
# initialize network
self.prng = numpy.random.RandomState()
sigma_init = 0.01
x = tf.placeholder(tf.float32, [None, n_input], name='input')
# encoder
# x->hidden layer
W_xh = tf.Variable(tf.random_normal([n_input, n_hidden_enc],
mean=0., stddev=sigma_init, dtype=tf.float32))
b_xh = tf.Variable(tf.zeros([n_hidden_enc], dtype=tf.float32))
# hidden layer -> latent variables (mu & log sigma^2)
W_hmu = tf.Variable(tf.random_normal([n_hidden_enc, n_latent],
mean=0., stddev=sigma_init, dtype=tf.float32))
b_hsigma = tf.Variable(tf.zeros([n_latent], dtype=tf.float32))
# decoder
W_zx = tf.Variable(tf.random_normal([n_latent, n_input],
mean=0., stddev=sigma_init, dtype=tf.float32))
b_zx = tf.Variable(tf.zeros([n_input], dtype=tf.float32))
# create functions
h_encoder = tf.nn.relu(tf.mat_mul(x, W_xh) + b_xh)
pass
开发者ID:zhaocq-nlp,项目名称:NeuralDocModel,代码行数:31,代码来源:NVDM.py
示例20: testBasicLSTMCell
def testBasicLSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 8])
g, out_m = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(2)] * 2)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m], {x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem = np.array([[0.68967271, 0.68967271,
0.44848421, 0.44848421,
0.39897051, 0.39897051,
0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem)
with tf.variable_scope("other", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3]) # Test BasicLSTMCell with input_size != num_units.
m = tf.zeros([1, 4])
g, out_m = tf.nn.rnn_cell.BasicLSTMCell(2, input_size=3)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m], {x.name: np.array([[1., 1., 1.]]),
m.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 2)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:26,代码来源:rnn_cell_test.py
注:本文中的tensorflow.zeros函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论