本文整理汇总了Python中tensorflow.python.ops.variable_scope.variable_scope函数的典型用法代码示例。如果您正苦于以下问题:Python variable_scope函数的具体用法?Python variable_scope怎么用?Python variable_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了variable_scope函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __call__
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__):
if self._dropMaskInput.get_shape()[1:] != inputs.get_shape()[1:]:
print("error: "+str(self._dropMaskInput.get_shape()[1:])+" != "+str(inputs.get_shape()[1:]))
assert(False)
if self._dropMaskState.get_shape()[1:] != state.get_shape()[1:]:
print("error: "+str(self._dropMaskState.get_shape()[1:])+" != "+str(state.get_shape()[1:]))
assert(False)
dropin = tf.mul(self._dropMaskInput, inputs)
dropst = tf.mul(self._dropMaskState, state)
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
concat = rnn_cell._linear([dropin, dropst], 2 * self._num_units, True, 1.0)
r, u = tf.split(1, 2, concat)
r, u = tf.sigmoid(r), tf.sigmoid(u)
with vs.variable_scope("Candidate"):
htilda = self._activation(rnn_cell._linear([dropin, r * dropst], self._num_units, True))
new_h = u * dropst + (1 - u) * htilda
return new_h, new_h
开发者ID:jasonbunk,项目名称:char-rnn-tensorflow,代码行数:25,代码来源:dropgru.py
示例2: __call__
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell with attention (LSTMA)."""
with vs.variable_scope(scope or type(self).__name__):
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(
states, [0, self._cell.state_size], [-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
inputs = _linear([inputs, attns], input_size, True)
lstm_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(1, _unpacked_state(new_state))
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("AttnOutputProjection"):
output = _linear([lstm_output, new_attns], self._attn_size, True)
new_attn_states = array_ops.concat(1, [new_attn_states,
array_ops.expand_dims(output, 1)])
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(1, list(new_state))
return output, new_state
开发者ID:Assassin0028,项目名称:tensorflow,代码行数:35,代码来源:rnn_cell.py
示例3: testInitFromCheckpoint
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("my1", [1, 10])
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable("my2", [10, 10])
with variable_scope.variable_scope("other_useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
my3 = variable_scope.get_variable("my3", [100, 100])
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
"useful_scope/": "some_scope/some_other_scope/other_useful_scope/",
})
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var2": "some_scope/some_other_scope/my2",
"var3": my3,
})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
# Check that tensors are not explicitly in the graph.
self.assertLess(len(str(session.graph.as_graph_def())), 29000)
开发者ID:QiangCai,项目名称:tensorflow,代码行数:33,代码来源:checkpoint_utils_test.py
示例4: call
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(
1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, new_h
开发者ID:q64545,项目名称:x-deeplearning,代码行数:31,代码来源:utils.py
示例5: testReuse
def testReuse(self):
def f(x):
return core_layers.dense(x, self.CHANNELS // 2)
def g(x):
return core_layers.dense(x, self.CHANNELS // 2)
x = random_ops.random_uniform(
[self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
x1, x2 = array_ops.split(x, 2, axis=-1)
with variable_scope.variable_scope("test"):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_before = len(variables.global_variables())
with variable_scope.variable_scope("test", reuse=True):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_after = len(variables.global_variables())
self.assertEqual(num_vars_before, num_vars_after)
loss = math_ops.reduce_mean(y1 + y2)
_ = gradients_impl.gradients(loss,
[x] + variables.trainable_variables())
with variable_scope.variable_scope("test", reuse=True):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_after = len(variables.global_variables())
self.assertEqual(num_vars_before, num_vars_after)
开发者ID:clsung,项目名称:tensorflow,代码行数:32,代码来源:rev_block_lib_test.py
示例6: __call__
def __call__(self, *args, **kwargs):
if self._variable_scope:
if self._variables_created:
# This is not the first visit to __call__, so variables have already
# been created, and we want to reuse them.
with variable_scope.variable_scope(self._variable_scope,
reuse=variable_scope.AUTO_REUSE):
with self._eager_variable_store.as_default():
return self._call_func(args, kwargs, check_for_new_variables=True)
else:
# This is the first visit to __call__, but the scope has already been
# created in the constructor. Set _variables_created after the inner
# function is successfully called so that subsequent calls take the if
# branch above.
with variable_scope.variable_scope(self._variable_scope,
reuse=variable_scope.AUTO_REUSE):
with self._eager_variable_store.as_default():
result = self._call_func(args, kwargs,
check_for_new_variables=False)
self._variables_created = True
return result
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
with variable_scope.variable_scope(
self._unique_name, self._name,
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
with self._eager_variable_store.as_default():
result = self._call_func(args, kwargs,
check_for_new_variables=False)
self._variables_created = True
return result
开发者ID:Lin-jipeng,项目名称:tensorflow,代码行数:33,代码来源:template.py
示例7: separable_lstm
def separable_lstm(images, num_filters_out,
kernel_size=None, nhidden=None, scope=None):
"""Run bidirectional LSTMs first horizontally then vertically.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same. Set to None for
not using blocks
nhidden: hidden layer depth
scope: optional scope name
Returns:
(num_images, height/kernel_height, width/kernel_width,
num_filters_out) tensor
"""
with variable_scope.variable_scope(scope, "SeparableLstm", [images]):
if nhidden is None:
nhidden = num_filters_out
if kernel_size is not None:
images = get_blocks(images, kernel_size)
hidden = horizontal_lstm(images, nhidden)
with variable_scope.variable_scope("vertical"):
transposed = array_ops.transpose(hidden, [0, 2, 1, 3])
output_transposed = horizontal_lstm(transposed, num_filters_out)
output = array_ops.transpose(output_transposed, [0, 2, 1, 3])
return output
开发者ID:1000sprites,项目名称:tensorflow,代码行数:28,代码来源:lstm2d.py
示例8: __call__
def __call__(self, *args, **kwargs):
# In both branches below, the template store is installed as default after
# the variable scope is opened in order to ensure that templates nested at
# the same level correctly uniquify lower variable scope names.
if self._variable_scope:
# Create a cache for the variable scope context manager the first time
# around so that we don't have to keep recreating it.
if not self._variable_scope_context_manager:
self._variable_scope_context_manager = variable_scope.variable_scope(
self._variable_scope, reuse=variable_scope.AUTO_REUSE)
with self._variable_scope_context_manager:
with self._template_store.as_default():
result = self._call_func(args, kwargs)
return result
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
with variable_scope.variable_scope(
self._unique_name, self._name,
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
# Because the scope was not created at construction time, the template
# store's variable scope name is unset; set it here.
self._template_store.set_variable_scope_name(vs.name)
with self._template_store.as_default():
result = self._call_func(args, kwargs)
return result
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:27,代码来源:template.py
示例9: sequence_softmax
def sequence_softmax(inputs, noutput, scope=None, name=None, linear_name=None):
"""Run a softmax layer over all the time steps of an input sequence.
Args:
inputs: (length, batch_size, depth) tensor
noutput: output depth
scope: optional scope name
name: optional name for output tensor
linear_name: name for linear (pre-softmax) output
Returns:
A tensor of size (length, batch_size, noutput).
"""
length, _, ninputs = _shape(inputs)
inputs_u = array_ops.unstack(inputs)
output_u = []
with variable_scope.variable_scope(scope, "SequenceSoftmax", [inputs]):
initial_w = random_ops.truncated_normal([0 + ninputs, noutput], stddev=0.1)
initial_b = constant_op.constant(0.1, shape=[noutput])
w = variables.model_variable("weights", initializer=initial_w)
b = variables.model_variable("biases", initializer=initial_b)
for i in xrange(length):
with variable_scope.variable_scope(scope, "SequenceSoftmaxStep",
[inputs_u[i]]):
# TODO(tmb) consider using slim.fully_connected(...,
# activation_fn=tf.nn.softmax)
linear = nn_ops.xw_plus_b(inputs_u[i], w, b, name=linear_name)
output = nn_ops.softmax(linear)
output_u += [output]
outputs = array_ops.stack(output_u, name=name)
return outputs
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:32,代码来源:lstm1d.py
示例10: dnn_logits_fn
def dnn_logits_fn():
"""Builds the logits from the input layer."""
previous_layer = input_layer
for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(previous_layer,)) as hidden_layer_scope:
net = layers.fully_connected(
previous_layer,
num_hidden_units,
activation_fn=dnn_activation_fn,
variables_collections=[dnn_parent_scope],
scope=hidden_layer_scope)
if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
previous_layer = net
with variable_scope.variable_scope(
"logits", values=(previous_layer,)) as logits_scope:
dnn_logits = layers.fully_connected(
previous_layer,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(dnn_logits, logits_scope.name)
return dnn_logits
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:dnn_tree_combined_estimator.py
示例11: call
def call(self, inputs, state):
"""
"""
(c_prev, m_prev) = state
self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer):
x = array_ops.concat([inputs, m_prev], axis=1)
with vs.variable_scope("first_gemm"):
if self._linear1 is None:
# no bias for bottleneck
self._linear1 = _Linear(x, self._fact_size, False)
R_fact = self._linear1(x)
with vs.variable_scope("second_gemm"):
if self._linear2 is None:
self._linear2 = _Linear(R_fact, 4*self._num_units, True)
R = self._linear2(R_fact)
i, j, f, o = array_ops.split(R, 4, 1)
c = (math_ops.sigmoid(f + self._forget_bias) * c_prev +
math_ops.sigmoid(i) * math_ops.tanh(j))
m = math_ops.sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
if self._linear3 is None:
self._linear3 = _Linear(m, self._num_proj, False)
m = self._linear3(m)
new_state = rnn_cell_impl.LSTMStateTuple(c, m)
return m, new_state
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:31,代码来源:flstm.py
示例12: __call__
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or "multi_rnn_cell"):
cur_state_pos = 0
cur_inp = inputs
new_states = []
outputs = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
outputs.append(cur_inp)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
array_ops.concat_v2(new_states, 1))
return tuple(outputs), new_states
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:25,代码来源:multi_rnn_cell.py
示例13: testIndyGRUCell
def testIndyGRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.185265, 0.17704]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test IndyGRUCell with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.155127, 0.157328]])
开发者ID:Eagle732,项目名称:tensorflow,代码行数:27,代码来源:core_rnn_cell_test.py
示例14: dnn
def dnn(tensor_in, hidden_units, activation=nn.relu, dropout=None):
"""Creates fully connected deep neural network subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function between layers. Can be None.
dropout: if not None, will add a dropout layer with given probability.
Returns:
A tensor which would be a deep neural network.
"""
with vs.variable_scope('dnn'):
for i, n_units in enumerate(hidden_units):
with vs.variable_scope('layer%d' % i):
# Weight initializer was set to None to replicate the behavior of
# rnn_cell.linear. Using fully_connected's default initializer gets
# slightly worse quality results on unit tests.
tensor_in = layers.legacy_fully_connected(
tensor_in,
n_units,
weight_init=None,
weight_collections=['dnn_weights'],
bias_collections=['dnn_biases'])
if activation is not None:
tensor_in = activation(tensor_in)
if dropout is not None:
is_training = array_ops_.squeeze(ops.get_collection('IS_TRAINING'))
tensor_in = control_flow_ops.cond(
is_training,
lambda: dropout_ops.dropout(tensor_in, prob=(1.0 - dropout)),
lambda: tensor_in)
return tensor_in
开发者ID:01-,项目名称:tensorflow,代码行数:33,代码来源:dnn_ops.py
示例15: _serving_ops
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
prediction_outputs = self.model.predict(features=features)
with variable_scope.variable_scope("model", reuse=True):
filtering_outputs = self.create_loss(
features, estimator_lib.ModeKeys.EVAL)
with variable_scope.variable_scope("model", reuse=True):
no_state_features = {
k: v for k, v in features.items()
if not k.startswith(feature_keys.State.STATE_PREFIX)}
# Ignore any state management when cold-starting. The model's default
# start state is replicated across the batch.
cold_filtering_outputs = self.model.define_loss(
features=no_state_features, mode=estimator_lib.ModeKeys.EVAL)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
export_lib.PredictOutput(prediction_outputs),
feature_keys.SavedModelLabels.FILTER:
export_lib.PredictOutput(
state_to_dictionary(filtering_outputs.end_state)),
feature_keys.SavedModelLabels.COLD_START_FILTER:
_NoStatePredictOutput(
state_to_dictionary(cold_filtering_outputs.end_state))
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:30,代码来源:head.py
示例16: testCustomGradientErrorsWithNonResourceVariables
def testCustomGradientErrorsWithNonResourceVariables(self):
def F(x, use_resource=False):
with variable_scope.variable_scope("f", use_resource=use_resource):
out = core_layers.dense(x, 4, use_bias=False)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
del out_grad
self.assertEqual(1, len(variables))
return (array_ops.ones((3, 2)), [array_ops.ones((2, 4))])
return out, Grad
@custom_gradient.custom_gradient
def FResource(x):
return F(x, use_resource=True)
@custom_gradient.custom_gradient
def FNonResource(x):
return F(x, use_resource=False)
x = array_ops.ones((3, 2)) + 2.
# Wrapping scope has use_resource=True but inner scope sets to False. Fails.
with variable_scope.variable_scope("vs1", use_resource=True):
with self.assertRaisesWithPredicateMatch(TypeError,
"must be `ResourceVariable`s"):
FNonResource(x)
# Wrapping scope has use_resource=False but inner scope sets to True.
# Passes.
with variable_scope.variable_scope("vs2", use_resource=False):
FResource(x)
开发者ID:didukhle,项目名称:tensorflow,代码行数:33,代码来源:gradients_test.py
示例17: dnn_autoencoder
def dnn_autoencoder(
tensor_in, hidden_units, activation=nn.relu, add_noise=None, dropout=None,
scope=None):
"""Creates fully connected autoencoder subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
dropout: if not None, will add a dropout layer with given
probability.
scope: the variable scope for this op.
Returns:
Tensors for encoder and decoder.
"""
with vs.variable_op_scope([tensor_in], scope, "autoencoder"):
if add_noise is not None:
tensor_in = add_noise(tensor_in)
with vs.variable_scope("encoder"):
# build DNN encoder
encoder = dnn_ops.dnn(
tensor_in, hidden_units, activation=activation, dropout=dropout)
with vs.variable_scope("decoder"):
# reverse hidden_units and built DNN decoder
decoder = dnn_ops.dnn(
encoder, hidden_units[::-1], activation=activation, dropout=dropout)
return encoder, decoder
开发者ID:0ruben,项目名称:tensorflow,代码行数:32,代码来源:autoencoder_ops.py
示例18: _set_scope_for_nonnetwork_sublayer
def _set_scope_for_nonnetwork_sublayer(self, sublayer):
if sublayer._scope is None:
if sublayer._first_parent is None:
constituent_first_parent = None
else:
constituent_first_parent = sublayer._first_parent()
if constituent_first_parent:
constituent_first_parent._set_scope()
parent_scope = constituent_first_parent._scope
else:
self._finalize_name(False)
raise ValueError(
("The parent of a Layer added to Network %s was garbage collected "
"before the Layer was built. If this limitation bothers you "
"please file a feature request.") %
(self.name,))
with variable_scope.variable_scope(parent_scope):
# Horrid hack to make Layer variable names which are direct
# sub-layers of Networks conform to the Network variable naming
# conventions.
with variable_scope.variable_scope(
None, use_resource=True,
default_name=sublayer.name) as sub_scope:
sublayer._scope = sub_scope
# Also switch op naming for this Layer to match Network conventions,
# i.e. op naming matching variable naming.
sublayer._name_scope_name = _network_name_scope_naming
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:network.py
示例19: testBasicLSTMCell
def testBasicLSTMCell(self):
for dtype in [dtypes.float16, dtypes.float32]:
np_dtype = dtype.as_numpy_dtype
with self.test_session(graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2], dtype=dtype)
m = array_ops.zeros([1, 8], dtype=dtype)
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=False)
self.assertEqual(cell.dtype, None)
g, out_m = cell(x, m)
# Layer infers the input type.
self.assertEqual(cell.dtype, dtype.name)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(expected_variable_names,
[v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])
})
self.assertEqual(len(res), 2)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# The numbers in results were not calculated, this is just a
# smoke test.
self.assertAllClose(res[0], np.array(
[[0.240, 0.240]], dtype=np_dtype), 1e-2)
expected_mem = np.array(
[[0.689, 0.689, 0.448, 0.448, 0.398, 0.398, 0.240, 0.240]],
dtype=np_dtype)
self.assertAllClose(res[1], expected_mem, 1e-2)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test BasicLSTMCell with input_size != num_units.
x = array_ops.zeros([1, 3], dtype=dtype)
m = array_ops.zeros([1, 4], dtype=dtype)
g, out_m = rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.array([[1., 1., 1.]], dtype=np_dtype),
m.name: 0.1 * np.ones([1, 4], dtype=np_dtype)
})
self.assertEqual(len(res), 2)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:60,代码来源:core_rnn_cell_test.py
示例20: reduce_to_final
def reduce_to_final(images, num_filters_out, nhidden=None, scope=None):
"""Reduce an image to a final state by running two LSTMs.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
nhidden: hidden layer depth (defaults to num_filters_out)
scope: optional scope name
Returns:
A (num_images, num_filters_out) batch.
"""
with variable_scope.variable_scope(scope, "ReduceToFinal", [images]):
nhidden = nhidden or num_filters_out
batch_size, height, width, depth = _shape(images)
transposed = array_ops.transpose(images, [1, 0, 2, 3])
reshaped = array_ops.reshape(transposed,
[height, batch_size * width, depth])
with variable_scope.variable_scope("reduce1"):
reduced = lstm1d.sequence_to_final(reshaped, nhidden)
transposed_hidden = array_ops.reshape(reduced,
[batch_size, width, nhidden])
hidden = array_ops.transpose(transposed_hidden, [1, 0, 2])
with variable_scope.variable_scope("reduce2"):
output = lstm1d.sequence_to_final(hidden, num_filters_out)
return output
开发者ID:1000sprites,项目名称:tensorflow,代码行数:26,代码来源:lstm2d.py
注:本文中的tensorflow.python.ops.variable_scope.variable_scope函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论