本文整理汇总了Python中tensorflow.python.ops.init_ops.uniform_unit_scaling_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python uniform_unit_scaling_initializer函数的具体用法?Python uniform_unit_scaling_initializer怎么用?Python uniform_unit_scaling_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了uniform_unit_scaling_initializer函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testInitializerDifferent
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
self.assertFalse(identicaltest(self, init1, init3))
self.assertFalse(identicaltest(self, init2, init3))
开发者ID:HughKu,项目名称:tensorflow,代码行数:9,代码来源:init_ops_test.py
示例2: testInitializerIdentical
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
init4 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init3, init4))
开发者ID:HughKu,项目名称:tensorflow,代码行数:10,代码来源:init_ops_test.py
示例3: testZeroSize
def testZeroSize(self):
shape = [0, 2]
with self.test_session():
x = variable_scope.get_variable(
"x",
shape=shape,
initializer=init_ops.uniform_unit_scaling_initializer())
self.assertAllEqual(shape, x.eval().shape)
开发者ID:kadeng,项目名称:tensorflow,代码行数:8,代码来源:init_ops_test.py
示例4: testZeroSize
def testZeroSize(self):
shape = [0, 2]
with self.cached_session():
x = variable_scope.get_variable(
"x",
shape=shape,
initializer=init_ops.uniform_unit_scaling_initializer())
variables.global_variables_initializer().run()
self.assertAllEqual(shape, self.evaluate(x).shape)
开发者ID:aeverall,项目名称:tensorflow,代码行数:9,代码来源:init_ops_test.py
示例5: testDuplicatedInitializer
def testDuplicatedInitializer(self):
init = init_ops.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, 1))
开发者ID:HughKu,项目名称:tensorflow,代码行数:3,代码来源:init_ops_test.py
示例6: _get_single_variable
def _get_single_variable(self, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None,
caching_device=None, validate_shape=True):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and isinstance(initializer, ops.Tensor):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
should_check = reuse is not None
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if should_check and not reuse:
tb = self._vars[name].op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("Variable %s already exists, disallowed."
" Did you mean to set reuse=True in VarScope? "
"Originally defined at:\n\n%s" % (
name, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if should_check and reuse:
raise ValueError("Variable %s does not exist, disallowed."
" Did you mean to set reuse=None in VarScope?" % name)
if not shape.is_fully_defined() and not initializing_from_value:
raise ValueError("Shape of a new variable (%s) must be fully defined, "
"but instead was %s." % (name, shape))
# Create the tensor to initialize the variable.
if initializer is None:
initializer = init_ops.uniform_unit_scaling_initializer()
# Clear control dependencies while creating the initializer.
with ops.control_dependencies(None):
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
init_val = lambda: initializer(shape.as_list(), dtype=dtype)
variable_dtype = dtype.base_dtype
# Create the variable.
v = variables.Variable(initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape)
self._vars[name] = v
logging.info("Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.name_scope(name + "/Regularizer/"):
loss = regularizer(v)
if loss is not None:
logging.info("Applied regularizer to %s and added the result %s to "
#.........这里部分代码省略.........
开发者ID:2php,项目名称:tensorflow,代码行数:101,代码来源:variable_scope.py
示例7: _get_partitioned_variable_list
#.........这里部分代码省略.........
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
if initializing_from_value:
shape = initializer.get_shape()
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable (%s) must be "
"fully defined, but instead was %s." % (name, shape))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
partitions = partitioner(shape=shape, dtype=dtype)
if not isinstance(partitions, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% partitions)
if len(partitions) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (partitions, shape))
if any([p < 1 for p in partitions]):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" % partitions)
slice_dim, slice_shape = _compute_slice_dim_and_shape(
shape.as_list(), partitions)
vs = []
num_slices = partitions[slice_dim]
num_slices_with_excess = shape[slice_dim].value % num_slices
slice_offset = [0] * shape.ndims
if "%s_0" % name in self._vars:
if "%s_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard %s_0 "
"was found, but %s_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard %s_0 "
"was found, but so was the extra shard %s_%d."
% (num_slices, name, name, num_slices))
for i in xrange(num_slices):
var_shape = slice_shape[:]
var_offset = slice_offset[:]
if i < num_slices_with_excess:
var_shape[slice_dim] += 1
slice_offset[slice_dim] += var_shape[slice_dim]
with ops.op_scope([], name + "/PartitionedVariableList"):
if initializer is None:
init = init_ops.uniform_unit_scaling_initializer(
full_shape=shape.as_list())
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name="%s_%d" % (name, i),
shape=init_shape,
dtype=dtype,
initializer=init,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
# pylint: enable=protected-access
vs.append(var)
return (vs, partitions)
开发者ID:2php,项目名称:tensorflow,代码行数:101,代码来源:variable_scope.py
示例8: get_variable
def get_variable(self, name, shape=None, dtype=types.float32,
initializer=None, reuse=None, trainable=True,
collections=None):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
If `reuse` is `None` (the default), both new and existing variables are
returned.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`UniformUnitScalingInitializer`.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable.
reuse: a Boolean or `None`. Controls reuse or creation of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see variables.Variable).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.VARIABLES]` (see variables.Variable).
Returns:
The created or existing variable.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
"""
should_check = reuse is not None
dtype = types.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if should_check and not reuse:
raise ValueError("Over-sharing: Variable %s already exists, disallowed."
" Did you mean to set reuse=True in VarScope?" % name)
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if should_check and reuse:
raise ValueError("Under-sharing: Variable %s does not exist, disallowed."
" Did you mean to set reuse=None in VarScope?" % name)
if not shape.is_fully_defined():
raise ValueError("Shape of a new variable (%s) must be fully defined, "
"but instead was %s." % (name, shape))
if initializer is None:
initializer = init_ops.uniform_unit_scaling_initializer()
with ops.name_scope(name + "/Initializer/"):
init_val = initializer(shape.as_list(), dtype=dtype)
v = variables.Variable(init_val, name=name, trainable=trainable,
collections=collections)
self._vars[name] = v
logging.info("Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
return v
开发者ID:ray2020,项目名称:tensorflow,代码行数:74,代码来源:variable_scope.py
示例9: get_variable
def get_variable(self, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
If `reuse` is `None` (the default), both new and existing variables are
returned.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`UniformUnitScalingInitializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean or `None`. Controls reuse or creation of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.VARIABLES]` (see tf.Variable).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
Returns:
The created or existing variable.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and isinstance(initializer, ops.Tensor):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
should_check = reuse is not None
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if should_check and not reuse:
tb = self._vars[name].op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("Variable %s already exists, disallowed."
" Did you mean to set reuse=True in VarScope? "
"Originally defined at:\n\n%s" % (
name, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if should_check and reuse:
raise ValueError("Variable %s does not exist, disallowed."
" Did you mean to set reuse=None in VarScope?" % name)
if not shape.is_fully_defined() and not initializing_from_value:
raise ValueError("Shape of a new variable (%s) must be fully defined, "
"but instead was %s." % (name, shape))
# Create the tensor to initialize the variable.
if initializer is None:
initializer = init_ops.uniform_unit_scaling_initializer()
# Clear control dependencies while creating the initializer.
with ops.control_dependencies(None):
if initializing_from_value:
init_val = initializer
else:
with ops.name_scope(name + "/Initializer/"):
init_val = initializer(shape.as_list(), dtype=dtype)
# Create the variable.
v = variables.Variable(init_val, name=name, trainable=trainable,
#.........这里部分代码省略.........
开发者ID:6779660,项目名称:tensorflow,代码行数:101,代码来源:variable_scope.py
注:本文中的tensorflow.python.ops.init_ops.uniform_unit_scaling_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论