本文整理汇总了Python中tensorflow.python.ops.variable_scope.variable_op_scope函数的典型用法代码示例。如果您正苦于以下问题:Python variable_op_scope函数的具体用法?Python variable_op_scope怎么用?Python variable_op_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了variable_op_scope函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testVarOpScope
def testVarOpScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with variable_scope.variable_op_scope([], "tower", "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with variable_scope.variable_op_scope([], "tower", "default"):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with tf.name_scope("scope2"):
with variable_scope.variable_op_scope([], None, "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default/scope2/")
with variable_scope.variable_op_scope([], None, "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"default_1/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default_1/scope2/")
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:25,代码来源:variable_scope_test.py
示例2: testVarOpScopeReuse
def testVarOpScopeReuse(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with variable_scope.variable_op_scope([], "tower", "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_op_scope([], None, "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_op_scope([], "tower", "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_op_scope([], None, "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:25,代码来源:variable_scope_test.py
示例3: build_model
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_op_scope(
features.values(),
"input_from_feature_columns",
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
features,
self._get_feature_columns(),
weight_collections=[self._weight_collection_name],
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas))
for layer_id, num_hidden_units in enumerate(self._hidden_units):
with variable_scope.variable_op_scope(
[net], "hiddenlayer_%d" % layer_id,
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=self._activation_fn,
variables_collections=[self._weight_collection_name],
scope=scope)
if self._dropout is not None and is_training:
net = layers.dropout(
net,
keep_prob=(1.0 - self._dropout))
self._add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_op_scope(
[net], "dnn_logits",
partitioner=hidden_layer_partitioner) as scope:
logits = layers.fully_connected(
net,
self._num_label_columns,
activation_fn=None,
variables_collections=[self._weight_collection_name],
scope=scope)
self._add_hidden_layer_summary(logits, "dnn_logits")
return logits
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:48,代码来源:composable_model.py
示例4: stack
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
x = fully_connected(x, 128, scope='fc/fc_3')
```
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer(inputs, *args, **kwargs)
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
a `Tensor` result of applying the stacked layers.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_op_scope([inputs], scope, 'Stack'):
outputs = inputs
scope = scope or layer.__name__
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
开发者ID:brando90,项目名称:tensor_flow_experiments,代码行数:35,代码来源:bn_official_excerp.py
示例5: __init__
def __init__(self, name, func, create_scope_now=False):
"""Creates a template for the given function.
Args:
name: A name for the scope created by this template. The
name will be made unique by appending `_N` to the it (see how
`tf.variable_op_scope` treats the `default_name` for details).
func: The function to apply each time.
create_scope_now: Whether to create the scope at Template construction
time, rather than first call. Defaults to false. Creating the scope at
construction time may be more convenient if the template is to passed
through much lower level code, and you want to be sure of the scope
name without knowing exactly where it will be first called. If set to
True, the scope will be created in the constructor, and all subsequent
times in __call__, leading to a trailing numeral being added to the
names of all created Tensors. If set to False, the scope will be created
at the first call location.
Raises:
ValueError: if the name is None.
"""
self._func = func
self._stacktrace = traceback.format_stack()[:-2]
self._name = name
if name is None:
raise ValueError("name cannot be None.")
if create_scope_now:
with variable_scope.variable_op_scope([], None, self._name) as vs:
self._var_scope = vs
else:
self._var_scope = None
# This variable keeps track of whether the template has been called yet,
# which is not the same as whether the scope has been created.
self._variables_created = False
开发者ID:01-,项目名称:tensorflow,代码行数:34,代码来源:template.py
示例6: dnn_autoencoder
def dnn_autoencoder(
tensor_in, hidden_units, activation=nn.relu, add_noise=None, dropout=None,
scope=None):
"""Creates fully connected autoencoder subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
dropout: if not None, will add a dropout layer with given
probability.
scope: the variable scope for this op.
Returns:
Tensors for encoder and decoder.
"""
with vs.variable_op_scope([tensor_in], scope, "autoencoder"):
if add_noise is not None:
tensor_in = add_noise(tensor_in)
with vs.variable_scope("encoder"):
# build DNN encoder
encoder = dnn_ops.dnn(
tensor_in, hidden_units, activation=activation, dropout=dropout)
with vs.variable_scope("decoder"):
# reverse hidden_units and built DNN decoder
decoder = dnn_ops.dnn(
encoder, hidden_units[::-1], activation=activation, dropout=dropout)
return encoder, decoder
开发者ID:0ruben,项目名称:tensorflow,代码行数:32,代码来源:autoencoder_ops.py
示例7: _auc_hist_accumulate
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
"""Accumulate histograms in new variables."""
with variable_scope.variable_op_scope(
[hist_true, hist_false], None, 'hist_accumulate'):
# Holds running total histogram of scores for records labeled True.
hist_true_acc = variable_scope.get_variable(
'hist_true_acc',
initializer=array_ops.zeros_initializer(
[nbins],
dtype=hist_true.dtype),
collections=collections,
trainable=False)
# Holds running total histogram of scores for records labeled False.
hist_false_acc = variable_scope.get_variable(
'hist_false_acc',
initializer=array_ops.zeros_initializer(
[nbins],
dtype=hist_false.dtype),
collections=collections,
trainable=False)
update_op = control_flow_ops.group(
hist_true_acc.assign_add(hist_true),
hist_false_acc.assign_add(hist_false),
name='update_op')
return hist_true_acc, hist_false_acc, update_op
开发者ID:285219011,项目名称:hello-world,代码行数:27,代码来源:histogram_ops.py
示例8: weighted_moving_average
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to. Defaults to `[GraphKeys.VARIABLES]`.
name: Optional name of the returned operation.
Defaults to "WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.VARIABLES]
with variable_scope.variable_op_scope(
[value, weight, decay], name, "WeightedMovingAvg") as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
initializer=init_ops.zeros_initializer(value.get_shape(),
dtype=value.dtype),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
initializer=init_ops.zeros_initializer(weight.get_shape(),
dtype=weight.dtype),
trainable=False,
collections=collections)
numerator = assign_moving_average(value_x_weight_var, value * weight, decay)
denominator = assign_moving_average(weight_var, weight, decay)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
开发者ID:AriaAsuka,项目名称:tensorflow,代码行数:57,代码来源:moving_averages.py
示例9: auc_using_histogram
def auc_using_histogram(boolean_labels,
scores,
score_range,
nbins=100,
collections=None,
check_shape=True,
name=None):
"""AUC computed by maintaining histograms.
Rather than computing AUC directly, this Op maintains Variables containing
histograms of the scores associated with `True` and `False` labels. By
comparing these the AUC is generated, with some discretization error.
See: "Efficient AUC Learning Curve Calculation" by Bouckaert.
This AUC Op updates in `O(batch_size + nbins)` time and works well even with
large class imbalance. The accuracy is limited by discretization error due
to finite number of bins. If scores are concentrated in a fewer bins,
accuracy is lower. If this is a concern, we recommend trying different
numbers of bins and comparing results.
Args:
boolean_labels: 1-D boolean `Tensor`. Entry is `True` if the corresponding
record is in class.
scores: 1-D numeric `Tensor`, same shape as boolean_labels.
score_range: `Tensor` of shape `[2]`, same dtype as `scores`. The min/max
values of score that we expect. Scores outside range will be clipped.
nbins: Integer number of bins to use. Accuracy strictly increases as the
number of bins increases.
collections: List of graph collections keys. Internal histogram Variables
are added to these collections. Defaults to `[GraphKeys.LOCAL_VARIABLES]`.
check_shape: Boolean. If `True`, do a runtime shape check on the scores
and labels.
name: A name for this Op. Defaults to "auc_using_histogram".
Returns:
auc: `float32` scalar `Tensor`. Fetching this converts internal histograms
to auc value.
update_op: `Op`, when run, updates internal histograms.
"""
if collections is None:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
with variable_scope.variable_op_scope(
[boolean_labels, scores, score_range], name, 'auc_using_histogram'):
scores, boolean_labels = metric_ops.remove_squeezable_dimensions(
scores, boolean_labels)
score_range = ops.convert_to_tensor(score_range, name='score_range')
boolean_labels, scores = _check_labels_and_scores(
boolean_labels, scores, check_shape)
hist_true, hist_false = _make_auc_histograms(boolean_labels, scores,
score_range, nbins)
hist_true_acc, hist_false_acc, update_op = _auc_hist_accumulate(hist_true,
hist_false,
nbins,
collections)
auc = _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins)
return auc, update_op
开发者ID:285219011,项目名称:hello-world,代码行数:56,代码来源:histogram_ops.py
示例10: __call__
def __call__(self, *args, **kwargs):
# Capture the name of the variable_scope here because if we capture at
# construction, then name_scopes would have a '_N+1' suffix.
if self._var_scope:
with variable_scope.variable_scope(self._var_scope, reuse=True):
return self._call_func(args, kwargs, check_for_new_variables=True)
else:
with variable_scope.variable_op_scope([], None, self._name) as vs:
self._var_scope = vs
return self._call_func(args, kwargs, check_for_new_variables=False)
开发者ID:natalya-patrikeeva,项目名称:tensorflow,代码行数:10,代码来源:template.py
示例11: stack
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
y = stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
y = fully_connected(x, 128, scope='fc/fc_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
a `Tensor` result of applying the stacked layers.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_op_scope([inputs], scope, 'Stack'):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'stack'
outputs = inputs
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:53,代码来源:layers.py
示例12: bias_add
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer,
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Optional activation function.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_op_scope.
Returns:
a tensor representing the result of adding biases to the inputs.
"""
with variable_scope.variable_op_scope([inputs],
scope, 'BiasAdd', reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
num_features = utils.last_dimension(inputs.get_shape(), min_rank=2)
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable('biases',
shape=[num_features,],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:49,代码来源:layers.py
示例13: _dnn_logits
def _dnn_logits(self, features, is_training=False):
net = layers.input_from_feature_columns(
features, self._get_dnn_feature_columns(), weight_collections=[self._dnn_weight_collection]
)
for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
with variable_scope.variable_op_scope(
[net],
"hiddenlayer_%d" % layer_id,
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=self._config.num_ps_replicas
),
) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=self._dnn_activation_fn,
variables_collections=[self._dnn_weight_collection],
scope=scope,
)
if self._dnn_dropout is not None and is_training:
net = layers.dropout(net, keep_prob=(1.0 - self._dnn_dropout))
self._add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_op_scope(
[net],
"dnn_logit",
partitioner=partitioned_variables.min_max_variable_partitioner(max_partitions=self._config.num_ps_replicas),
) as scope:
logit = layers.fully_connected(
net,
self._target_column.num_label_columns,
activation_fn=None,
variables_collections=[self._dnn_weight_collection],
scope=scope,
)
self._add_hidden_layer_summary(logit, "dnn_logit")
return logit
开发者ID:285219011,项目名称:liuwenfeng,代码行数:36,代码来源:dnn_linear_combined.py
示例14: __init__
def __init__(self, value, decay,
truediv=True,
collections=None,
reduction_indices=None,
name=None):
self.value = value
self.reduction_indices = reduction_indices or [0]
eps = 1e-8
if truediv:
div = math_ops.truediv
else:
div = math_ops.div
if collections is None:
collections = [ops.GraphKeys.VARIABLES]
value_shape = value.get_shape().as_list()
shape = []
for dim in range(len(value_shape)):
if dim in self.reduction_indices:
shape.append(1)
else:
shape.append(value_shape[dim])
with variable_scope.variable_op_scope(
[value, decay], name, "MomentTracker") as scope:
mean_x_weight_var = variable_scope.get_variable("mean_x_weight", trainable=False, collections=collections,
initializer=init_ops.zeros_initializer(shape, dtype=value.dtype))
variance_x_weight_var = variable_scope.get_variable("variance_x_weight", trainable=False,
collections=collections, initializer=init_ops.zeros_initializer(shape, dtype=value.dtype))
weight_var = variable_scope.get_variable("weight", trainable=False, collections=collections,
initializer=init_ops.zeros_initializer([1], dtype=tf.float32))
self.tracked_mean = div(mean_x_weight_var, weight_var + eps)
self.tracked_variance = div(variance_x_weight_var, weight_var + eps)
self.batch_mean, self.batch_variance = tf.nn.moments(self.value, axes=self.reduction_indices,
shift=self.tracked_mean, keep_dims=True)
mean_numerator = assign_moving_average(mean_x_weight_var, self.batch_mean, decay)
variance_numerator = assign_moving_average(variance_x_weight_var, self.batch_variance, decay)
denominator = assign_moving_average(weight_var, 1.0, decay)
self.update_mean = div(mean_numerator, denominator + eps, name=scope.name)
self.update_variance = div(variance_numerator, denominator + eps, name=scope.name)
开发者ID:NoahDStein,项目名称:NeuralNetSandbox,代码行数:48,代码来源:moment_tracker.py
示例15: weighted_moving_average
def weighted_moving_average(
value, decay, weight, truediv=True, name="WeightedMovingAvg"):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
moving_average(value * weight) / moving_average(weight),
where a moving average updates by the rule
new_value = decay * old_value + (1 - decay) * update
Args:
value: A tensor.
decay: A float Tensor or float value. The moving average decay.
weight: A tensor that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if True, dividing by moving_average(weight) is floating
point division. If False, use division implied by dtypes.
name: Optional name of the returned operation.
Returns:
An Operation that updates the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
with variable_scope.variable_op_scope(
[value, weight, decay], name, name) as scope:
value_variable = variable_scope.get_variable(
"value",
initializer=array_ops.zeros_initializer(
value.get_shape(), dtype=value.dtype),
trainable=False
)
weight_variable = variable_scope.get_variable(
"weight",
initializer=array_ops.zeros_initializer(
weight.get_shape(), dtype=weight.dtype),
trainable=False
)
numerator = assign_moving_average(value_variable, value * weight, decay)
denominator = assign_moving_average(weight_variable, weight, decay)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:46,代码来源:moving_averages.py
示例16: repeat
def repeat(inputs, repetitions, layer, *args, **kwargs):
"""Applies the same layer with the same arguments repeatedly.
```python
y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')
# It is equivalent to:
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')
y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
repetitions: Int, number of repetitions.
layer: A layer with arguments `(inputs, *args, **kwargs)`
*args: Extra args for the layer.
**kwargs: Extra kwargs for the layer.
Returns:
a tensor result of applying the layer, repetitions times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with variable_scope.variable_op_scope([inputs], scope, 'Repeat'):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'repeat'
outputs = inputs
for i in range(repetitions):
kwargs['scope'] = scope + '_' + str(i+1)
outputs = layer(outputs, *args, **kwargs)
return outputs
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:44,代码来源:layers.py
示例17: _make_auc_histograms
def _make_auc_histograms(boolean_labels, scores, score_range, nbins):
"""Create histogram tensors from one batch of labels/scores."""
with variable_scope.variable_op_scope(
[boolean_labels, scores, nbins], None, 'make_auc_histograms'):
# Histogram of scores for records in this batch with True label.
hist_true = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, boolean_labels),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_true')
# Histogram of scores for records in this batch with False label.
hist_false = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, math_ops.logical_not(boolean_labels)),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_false')
return hist_true, hist_false
开发者ID:285219011,项目名称:hello-world,代码行数:20,代码来源:histogram_ops.py
示例18: __call__
def __call__(self, *args, **kwargs):
if self._var_scope:
if self._variables_created:
# This is not the first visit to __call__, so variables have already
# been created, and we want to reuse them.
with variable_scope.variable_scope(self._var_scope, reuse=True):
return self._call_func(args, kwargs, check_for_new_variables=True)
else:
# This is the first visit to __call__, but the scope has already been
# created in the constructor. Set _variables_created so that subsequent
# calls take the if branch above.
self._variables_created = True
with variable_scope.variable_scope(self._var_scope):
return self._call_func(args, kwargs, check_for_new_variables=False)
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
self._variables_created = True
with variable_scope.variable_op_scope([], None, self._name) as vs:
self._var_scope = vs
return self._call_func(args, kwargs, check_for_new_variables=False)
开发者ID:01-,项目名称:tensorflow,代码行数:21,代码来源:template.py
示例19: to_weighted_sum
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
def _weight(name):
return variable_scope.get_variable(
name,
shape=[self.dimension, num_outputs],
initializer=array_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
if self.name:
with variable_scope.variable_op_scope([input_tensor], None, self.name):
weight = _weight("weight")
else:
# Old behavior to support a subset of old checkpoints.
weight = _weight("_weight")
# The _RealValuedColumn has the shape of [batch_size, column.dimension].
log_odds_by_dim = math_ops.matmul(input_tensor, weight)
return log_odds_by_dim, [weight]
开发者ID:735545856,项目名称:tensorflow,代码行数:23,代码来源:feature_column.py
示例20: legacy_fully_connected
def legacy_fully_connected(x,
num_output_units,
activation_fn=None,
weight_init=initializers.xavier_initializer(),
bias_init=init_ops.zeros_initializer,
name=None,
weight_collections=(ops.GraphKeys.WEIGHTS,),
bias_collections=(ops.GraphKeys.BIASES,),
output_collections=(ops.GraphKeys.ACTIVATIONS,),
trainable=True,
weight_regularizer=None,
bias_regularizer=None):
# pylint: disable=anomalous-backslash-in-string
r"""Adds the parameters for a fully connected layer and returns the output.
A fully connected layer is generally defined as a matrix multiply:
`y = f(w * x + b)` where `f` is given by `activation_fn`. If
`activation_fn` is `None`, the result of `y = w * x + b` is
returned.
If `x` has shape [\\\(\\text{dim}_0, \\text{dim}_1, ..., \\text{dim}_n\\\)]
with more than 2 dimensions (\\\(n > 1\\\)), then we repeat the matrix
multiply along the first dimensions. The result r is a tensor of shape
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`],
where \\\( r_{i_0, ..., i_{n-1}, k} =
\\sum_{0 \\leq j < \\text{dim}_n} x_{i_0, ... i_{n-1}, j} \cdot w_{j, k}\\\).
This is accomplished by reshaping `x` to 2-D
[\\\(\\text{dim}_0 \\cdot ... \\cdot \\text{dim}_{n-1}, \\text{dim}_n\\\)]
before the matrix multiply and afterwards reshaping it to
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`].
This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting
`bias_init` to `None`.
The variable creation is compatible with `tf.variable_scope` and so can be
reused with `tf.variable_scope` or `tf.make_template`.
Most of the details of variable creation can be controlled by specifying the
initializers (`weight_init` and `bias_init`) and in which collections to place
the created variables (`weight_collections` and `bias_collections`; note that
the variables are always added to the `VARIABLES` collection). The output of
the layer can be placed in custom collections using `output_collections`.
The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,
respectively.
A per layer regularization can be specified by setting `weight_regularizer`
and `bias_regularizer`, which are applied to the weights and biases
respectively, and whose output is added to the `REGULARIZATION_LOSSES`
collection.
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: A function that requires a single Tensor that is applied as a
non-linearity. If None is used, do not apply any activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in
order to disable bias.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_op_scope` for details.
weight_collections: List of graph collections to which weights are added.
bias_collections: List of graph collections to which biases are added.
output_collections: List of graph collections to which outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for weights.
bias_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for biases.
Returns:
The output of the fully connected layer.
Raises:
ValueError: if x has rank less than 2 or if its last dimension is not set.
"""
with variable_scope.variable_op_scope([x], name, 'fully_connected'):
dims = x.get_shape().dims
if dims is None:
raise ValueError('dims of x must be known but is None')
if len(dims) < 2:
raise ValueError('rank of x must be at least 2 not: %d' % len(dims))
num_input_units = dims[-1].value
if num_input_units is None:
raise ValueError('last dimension of x must be known but is None')
dtype = x.dtype.base_dtype
weight_collections = set(list(weight_collections or []) +
[ops.GraphKeys.VARIABLES])
w = variable_scope.get_variable('weights',
shape=[num_input_units, num_output_units],
dtype=dtype,
initializer=weight_init,
collections=weight_collections,
regularizer=weight_regularizer,
|
请发表评论