本文整理汇总了Python中tensorflow.variable_op_scope函数的典型用法代码示例。如果您正苦于以下问题:Python variable_op_scope函数的具体用法?Python variable_op_scope怎么用?Python variable_op_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了variable_op_scope函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testVarOpScopeReuseParam
def testVarOpScopeReuseParam(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_op_scope([], "tower", "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with tf.variable_op_scope([], None, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer) as outer:
with tf.variable_op_scope([], "tower", "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with tf.variable_op_scope([], None, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
开发者ID:285219011,项目名称:hello-world,代码行数:26,代码来源:variable_scope_test.py
示例2: testVarOpScopeOuterScope
def testVarOpScopeOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
pass
with tf.variable_op_scope([], outer, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_op_scope([], None, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with tf.variable_op_scope([], outer, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with tf.variable_op_scope([], None, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
开发者ID:285219011,项目名称:hello-world,代码行数:26,代码来源:variable_scope_test.py
示例3: testVarOpScope
def testVarOpScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with tf.variable_op_scope([], "tower", "default"):
self.assertEqual(tf.get_variable("w", []).name,
"tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with tf.variable_op_scope([], "tower", "default"):
with self.assertRaises(ValueError):
tf.get_variable("w", [])
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with tf.name_scope("scope2"):
with tf.variable_op_scope([], None, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default/scope2/")
with tf.variable_op_scope([], None, "default"):
self.assertEqual(tf.get_variable("w", []).name,
"default_1/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default_1/scope2/")
开发者ID:285219011,项目名称:hello-world,代码行数:25,代码来源:variable_scope_test.py
示例4: _rnn_template
def _rnn_template(incoming, cell, dropout=None, return_seq=False,
return_state=False, initial_state=None, dynamic=False,
scope=None, name="LSTM"):
""" RNN Layer Template. """
sequence_length = None
if dynamic:
sequence_length = retrieve_seq_length_op(
incoming if isinstance(incoming, tf.Tensor) else tf.pack(incoming))
input_shape = utils.get_incoming_shape(incoming)
with tf.variable_op_scope([incoming], scope, name) as scope:
name = scope.name
_cell = cell
# Apply dropout
if dropout:
if type(dropout) in [tuple, list]:
in_keep_prob = dropout[0]
out_keep_prob = dropout[1]
elif isinstance(dropout, float):
in_keep_prob, out_keep_prob = dropout, dropout
else:
raise Exception("Invalid dropout type (must be a 2-D tuple of "
"float)")
cell = DropoutWrapper(cell, in_keep_prob, out_keep_prob)
inference = incoming
# If a tensor given, convert it to a per timestep list
if type(inference) not in [list, np.array]:
ndim = len(input_shape)
assert ndim >= 3, "Input dim should be at least 3."
axes = [1, 0] + list(range(2, ndim))
inference = tf.transpose(inference, (axes))
inference = tf.unpack(inference)
outputs, state = _rnn(cell, inference, dtype=tf.float32,
initial_state=initial_state, scope=name,
sequence_length=sequence_length)
# Retrieve RNN Variables
c = tf.GraphKeys.LAYER_VARIABLES + '/' + scope.name
for v in [_cell.W, _cell.b]:
if hasattr(v, "__len__"):
for var in v: tf.add_to_collection(c, var)
else:
tf.add_to_collection(c, v)
# Track activations.
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, outputs[-1])
if dynamic:
outputs = tf.transpose(tf.pack(outputs), [1, 0, 2])
o = advanced_indexing_op(outputs, sequence_length)
else:
o = outputs if return_seq else outputs[-1]
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, o)
return (o, state) if return_state else o
开发者ID:CharlesShang,项目名称:tflearn,代码行数:60,代码来源:recurrent.py
示例5: drop_path
def drop_path(columns,
coin):
with tf.variable_op_scope([columns], None, "DropPath"):
out = tf.cond(coin,
lambda : drop_some(columns),
lambda : random_column(columns))
return out
开发者ID:edgelord,项目名称:FractalNet,代码行数:7,代码来源:fractal_block.py
示例6: batch_norm
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
shape = input.get_shape()
num_out = shape[-1]
with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
beta = tf.get_variable('beta', [num_out],
initializer=tf.constant_initializer(0.0),
trainable=True)
gamma = tf.get_variable('gamma', [num_out],
initializer=tf.constant_initializer(1.0),
trainable=True)
batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(is_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
开发者ID:juho-lee,项目名称:tf_practice,代码行数:25,代码来源:nn.py
示例7: hidden_layer
def hidden_layer(data, input_size, layer_size, keep_prob_prior, name=None):
with tf.variable_op_scope([data, input_size, layer_size], name, "hidden_layer") as scope:
ewma = tf.train.ExponentialMovingAverage(decay=0.99, name='ema_' + name)
bn = BatchNormalizer(layer_size, 0.001, ewma, True, keep_prob_prior,'bn_'+name)
weights = tf.get_variable('weights',
[input_size, layer_size],
initializer=tf.truncated_normal_initializer(0,
stddev=math.sqrt(2.0 / ((1.0 + initial_a ** 2.0) * float(input_size)))))
#weights = clip_weight_norm(weights, max_norm, name='clipped_weights')
if not scope.reuse:
tf.histogram_summary(weights.name, weights)
x = bn.normalize(tf.matmul(data,weights), train=keep_prob < 1.0)
mean, variance = tf.nn.moments(x, [0])
c = tf.div(tf.matmul(x-mean, x-mean, transpose_a=True), tf.to_float(tf.shape(x)[0]))
weight_decay = 0.0
if (keep_prob < 1.0):
weight_decay = tf.nn.l2_loss(c) - tf.nn.l2_loss(variance)#tf.mul(tf.nn.l2_loss(weights), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
hidden = tf.nn.elu(x)
#tf.scalar_summary('sparsity_'+hidden.name, tf.nn.zero_fraction(hidden))
hidden_dropout = tf.nn.dropout(hidden, keep_prob)
return hidden_dropout, bn
开发者ID:mikowals,项目名称:mnist,代码行数:28,代码来源:mnist.py
示例8: repeat_op
def repeat_op(repetitions, inputs, op, *args, **kwargs):
"""Build a sequential Tower starting from inputs by using an op repeatedly.
It creates new scopes for each operation by increasing the counter.
Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
it will repeat the given op under the following variable_scopes:
conv1/Conv
conv1/Conv_1
conv1/Conv_2
Args:
repetitions: number or repetitions.
inputs: a tensor of size [batch_size, height, width, channels].
op: an operation.
*args: args for the op.
**kwargs: kwargs for the op.
Returns:
a tensor result of applying the operation op, num times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
tower = inputs
for _ in range(repetitions):
tower = op(tower, *args, **kwargs)
return tower
开发者ID:paengs,项目名称:Net2Net,代码行数:28,代码来源:ops.py
示例9: __call__
def __call__(self, flow=None):
"""Constructs the layer in `Tensorflow` graph.
Args:
flow: This argument is ignored. (Default value = None)
Returns:
Output of this layer.
"""
with tf.variable_op_scope([flow], self.name, 'Embedding', reuse=self.reuse):
if not self.reuse:
self._table_loader = tf.placeholder(tf.float32, shape=self._init_values.shape, name='loader')
self._lookup_table = tf.get_variable(
'lookup_table',
initializer=self._table_loader,
trainable=self.trainable)
self.params.append(self._lookup_table)
tf.initialize_variables(self.params).run(feed_dict={self._table_loader: self._init_values})
self.reuse = True
flow = tf.placeholder(tf.int64, [None] + self._input_shape, 'input')
tf.add_to_collection(GraphKeys.MODEL_INPUTS, flow)
flow = tf.nn.embedding_lookup(self._lookup_table, flow)
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, flow)
return flow
开发者ID:Anna-Jiang,项目名称:first-test,代码行数:28,代码来源:layers.py
示例10: __call__
def __call__(self, flow=None):
"""Constructs the Sequential and its inner pieces.
Args:
flow: Input `Tensor` object. (Default value = None)
Returns:
Output of this `Parallel`.
"""
# build inner pieces.
with tf.variable_op_scope([], self.name, 'Parallel', reuse=self.reuse):
if not self.reuse:
self.reuse = True
outputs = []
for i, piece in enumerate(self.child_pieces):
outputs.append(piece(flow))
if self.mode == 'concat':
return tf.concat(self.along_dim, outputs)
elif self.mode == 'mean':
return tf.add_n(outputs) / len(self.child_pieces)
elif self.mode == 'sum':
return tf.add_n(outputs)
开发者ID:Anna-Jiang,项目名称:first-test,代码行数:26,代码来源:pieces.py
示例11: l2_normalize
def l2_normalize(incoming, dim, epsilon=1e-12, name="l2_normalize"):
""" L2 Normalization.
Normalizes along dimension `dim` using an L2 norm.
For a 1-D tensor with `dim = 0`, computes
```
output = x / sqrt(max(sum(x**2), epsilon))
```
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `dim`.
Arguments:
incoming: `Tensor`. Incoming Tensor.
dim: `int`. Dimension along which to normalize.
epsilon: `float`. A lower bound value for the norm. Will use
`sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`.
name: `str`. A name for this layer (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with tf.variable_op_scope([incoming], name) as name:
x = tf.ops.convert_to_tensor(incoming, name="x")
square_sum = tf.reduce_sum(tf.square(x), [dim], keep_dims=True)
x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))
return tf.mul(x, x_inv_norm, name=name)
开发者ID:MLDL,项目名称:tflearn,代码行数:29,代码来源:normalization.py
示例12: embedding
def embedding(incoming, input_dim, output_dim, validate_indices=False,
weights_init='truncated_normal', trainable=True, restore=True,
reuse=False, scope=None, name="Embedding"):
""" Embedding.
Embedding layer for a sequence of ids.
Input:
2-D Tensor [samples, ids].
Output:
3-D Tensor [samples, embedded_ids, features].
Arguments:
incoming: Incoming 2-D Tensor.
input_dim: list of `int`. Vocabulary size (number of ids).
output_dim: list of `int`. Embedding size.
validate_indices: `bool`. Whether or not to validate gather indices.
weights_init: `str` (name) or `Tensor`. Weights initialization.
(see tflearn.initializations) Default: 'truncated_normal'.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model
reuse: `bool`. If True and 'scope' is provided, this layer variables
will be reused (shared).
scope: `str`. Define this layer scope (optional). A scope can be
used to share varibales between layers. Note that scope will
override name.
name: A name for this layer (optional). Default: 'Embedding'.
"""
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) == 2, "Incoming Tensor shape must be 2-D"
W_init = weights_init
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
name = scope.name
with tf.device('/cpu:0'):
W = vs.variable("W", shape=[input_dim, output_dim],
initializer=W_init, trainable=trainable,
restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
inference = tf.cast(incoming, tf.int32)
inference = tf.nn.embedding_lookup(W, inference,
validate_indices=validate_indices)
inference.W = W
inference.scope = scope
# Embedding doesn't support masking, so we save sequence length prior
# to the lookup. Expand dim to 3d.
shape = [-1] + inference.get_shape().as_list()[1:3] + [1]
inference.seq_length = retrieve_seq_length_op(tf.reshape(incoming, shape))
return inference
开发者ID:Aeefire,项目名称:tflearn,代码行数:59,代码来源:embedding_ops.py
示例13: policy_network
def policy_network(state,theta,name='policy'):
with tf.variable_op_scope([state],name,name):
h0 = tf.identity(state,name='h0-state')
h1 = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1')
h2 = tf.nn.relu( tf.matmul(h1,theta[2]) + theta[3],name='h2')
h3 = tf.identity(tf.matmul(h2,theta[4]) + theta[5],name='h3')
action = tf.nn.tanh(h3,name='h4-action')
return action
开发者ID:songrotek,项目名称:DDPG-tensorflow,代码行数:8,代码来源:networks.py
示例14: create_policy_network
def create_policy_network(self, state, theta, name="policy_network"):
with tf.variable_op_scope([state], name, name):
h0 = tf.identity(state, "state")
h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name="h2")
h3 = tf.identity(tf.matmul(h2, theta[4]) + theta[5], name='h3')
action = tf.nn.tanh(h3, name='action')
return action
开发者ID:witwolf,项目名称:RL-DDPG,代码行数:8,代码来源:ddpg.py
示例15: sin_bank
def sin_bank(x, bank_size, length, scope=None):
with tf.variable_op_scope([x], scope, "SinBank") as scope:
bank = tf.get_variable("bank", dtype=tf.float32, shape=[bank_size, ],
initializer=tf.random_uniform_initializer(0.0, length))
shift = tf.get_variable("shift", dtype=tf.float32, shape=[bank_size, ],
initializer=tf.random_uniform_initializer(0.0, length))
if not tf.get_variable_scope().reuse:
tf.histogram_summary(bank.name, bank)
return tf.sin(x*bank+shift)
开发者ID:lukemetz,项目名称:cppn,代码行数:9,代码来源:adv_cppn_model.py
示例16: model
def model(x, is_training=True):
# Create model
outputs = []
for i,m in enumerate(moduleList):
name = 'layer_'+str(i)
with tf.variable_op_scope([x], name, 'Layer', reuse=reuse):
outputs[i] = m(x, is_training=is_training)
output = tf.concat(dim, outputs)
return output
开发者ID:SlideLucask,项目名称:BinaryNet.tf,代码行数:9,代码来源:nnUtils.py
示例17: dropout_layer
def dropout_layer(x, is_training=True):
with tf.variable_op_scope([x], None, name):
# def drop(): return tf.nn.dropout(x,p)
# def no_drop(): return x
# return tf.cond(is_training, drop, no_drop)
if is_training:
return tf.nn.dropout(x,p)
else:
return x
开发者ID:SlideLucask,项目名称:BinaryNet.tf,代码行数:9,代码来源:nnUtils.py
示例18: policy
def policy(obs,theta,name='policy'):
with tf.variable_op_scope([obs],name,name):
h0 = tf.identity(obs,name='h0-obs')
h1 = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1')
h2 = tf.nn.relu( tf.matmul(h1,theta[2]) + theta[3],name='h2')
h3 = tf.identity(tf.matmul(h2,theta[4]) + theta[5],name='h3')
action = tf.nn.tanh(h3,name='h4-action')
summary = hist_summaries(h0,h1,h2,h3,action)
return action,summary
开发者ID:amoliu,项目名称:ddpg,代码行数:9,代码来源:ddpg_nets_dm.py
示例19: vgg_a
def vgg_a(inputs,
num_classes=1000,
dropout_keep_prob=0.5,
is_training=True,
spatial_squeeze=True,
scope='vgg_a'):
"""Oxford Net VGG 11-Layers version A Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
is_training: whether or not the model is being trained.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_op_scope([inputs], scope, 'vgg_a') as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = dict(tf.get_collection(end_points_collection))
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
开发者ID:10imaging,项目名称:tensorflow,代码行数:56,代码来源:vgg.py
示例20: create_q_network
def create_q_network(self, state, action, theta, name='q_network'):
with tf.variable_op_scope([state, action], name, name):
h0 = tf.identity(state, name='state')
h1_state = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1])
# h1 = concat(h1_state,action)
h1 = tf.concat(1, [h1_state, action], name="h1")
h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name="h2")
h3 = tf.add(tf.matmul(h2, theta[4]), theta[5], name='h3')
q = tf.squeeze(h3, [1], name='q')
return q
开发者ID:witwolf,项目名称:RL-DDPG,代码行数:10,代码来源:ddpg.py
注:本文中的tensorflow.variable_op_scope函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论