本文整理汇总了Python中tensorflow.python.ops.nn.bias_add函数的典型用法代码示例。如果您正苦于以下问题:Python bias_add函数的具体用法?Python bias_add怎么用?Python bias_add使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bias_add函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: call
def call(self, inputs):
outputs = nn.convolution(
input=inputs,
filter=self.masked_kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, self.rank + 2))
if self.bias is not None:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3], outputs_shape[4]
])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:34,代码来源:core_layers.py
示例2: _conv_pool
def _conv_pool(x):
"""(Conv -> bias -> relu -> max_pool) x2."""
x_image = array_ops.reshape(x, [-1, 8, 8, 1])
w_conv1 = _weight([3, 3, 1, 6])
b_conv1 = _bias([6])
h_conv1 = nn.relu(nn.bias_add(_conv2d(x_image, w_conv1), b_conv1))
h_pool1 = _max_pool_2x2(h_conv1)
w_conv2 = _weight([3, 3, 6, 4])
b_conv2 = _bias([4])
h_conv2 = nn.relu(nn.bias_add(_conv2d(h_pool1, w_conv2), b_conv2))
h_pool2 = _max_pool_2x2(h_conv2)
return h_pool2
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:12,代码来源:auto_mixed_precision_test.py
示例3: _apply_variational_bias
def _apply_variational_bias(self, inputs):
if self.bias.posterior is None:
self.bias.posterior_tensor = None
return inputs
self.bias.posterior_tensor = self.bias.posterior_tensor_fn(
self.bias.posterior)
return nn.bias_add(inputs, self.bias.posterior_tensor)
开发者ID:Kongsea,项目名称:tensorflow,代码行数:7,代码来源:layers_dense_variational_impl.py
示例4: call
def call(self, inputs):
if self.data_format == 'channels_first':
# Reshape to channels last
inputs = array_ops.transpose(inputs, (0, 2, 3, 1))
# Apply the actual ops.
outputs = nn.separable_conv2d(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=(1,) + self.strides + (1,),
padding=self.padding.upper(),
rate=self.dilation_rate)
if self.data_format == 'channels_first':
# Reshape to channels first
outputs = array_ops.transpose(outputs, (0, 3, 1, 2))
if self.bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:27,代码来源:convolutional.py
示例5: GetParams
def GetParams(self):
"""Single vgg layer test in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [5, 8, 8, 2]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
x, _, _ = nn_impl.fused_batch_norm(
x, [1.0, 1.0], [0.0, 0.0],
mean=[0.5, 0.5],
variance=[1.0, 1.0],
is_training=False)
e = constant_op.constant(
np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
t = nn.bias_add(conv, b, name="biasAdd")
relu = nn.relu(t, "relu")
idty = array_ops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
array_ops.squeeze(v, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[(5, 2, 2, 6)])
开发者ID:aeverall,项目名称:tensorflow,代码行数:31,代码来源:vgg_block_test.py
示例6: _testConvReparameterization
def _testConvReparameterization(self, layer_class):
batch_size, depth, height, width, channels, filters = 2, 4, 4, 4, 3, 5
with self.test_session() as sess:
(kernel_posterior, kernel_prior, kernel_divergence,
bias_posterior, bias_prior, bias_divergence, layer, inputs,
outputs, kl_penalty, kernel_shape) = self._testConvSetUp(
layer_class, batch_size,
depth=depth, height=height, width=width, channels=channels,
filters=filters)
convolution_op = nn_ops.Convolution(
tensor_shape.TensorShape(inputs.shape),
filter_shape=tensor_shape.TensorShape(kernel_shape),
padding="SAME")
expected_outputs = convolution_op(inputs, kernel_posterior.result_sample)
expected_outputs = nn.bias_add(expected_outputs,
bias_posterior.result_sample,
data_format="NHWC")
[
expected_outputs_, actual_outputs_,
expected_kernel_, actual_kernel_,
expected_kernel_divergence_, actual_kernel_divergence_,
expected_bias_, actual_bias_,
expected_bias_divergence_, actual_bias_divergence_,
] = sess.run([
expected_outputs, outputs,
kernel_posterior.result_sample, layer.kernel_posterior_tensor,
kernel_divergence.result, kl_penalty[0],
bias_posterior.result_sample, layer.bias_posterior_tensor,
bias_divergence.result, kl_penalty[1],
])
self.assertAllClose(
expected_kernel_, actual_kernel_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_bias_, actual_bias_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_outputs_, actual_outputs_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_kernel_divergence_, actual_kernel_divergence_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_bias_divergence_, actual_bias_divergence_,
rtol=1e-6, atol=0.)
self.assertAllEqual(
[[kernel_posterior.distribution,
kernel_prior.distribution,
kernel_posterior.result_sample]],
kernel_divergence.args)
self.assertAllEqual(
[[bias_posterior.distribution,
bias_prior.distribution,
bias_posterior.result_sample]],
bias_divergence.args)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:60,代码来源:layers_conv_variational_test.py
示例7: _DenseLayer
def _DenseLayer(x, num_inputs, num_outputs, quantization_range, name):
"""Dense layer with quantized outputs.
Args:
x: input to the dense layer
num_inputs: number of input columns of x
num_outputs: number of output columns
quantization_range: the min/max range for quantization
name: name of the variable scope
Returns:
The output of the layer.
"""
with variable_scope.variable_scope(name):
kernel = variable_scope.get_variable(
'kernel',
shape=[num_inputs, num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.glorot_uniform())
bias = variable_scope.get_variable(
'bias',
shape=[num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.zeros())
x = math_ops.matmul(x, kernel)
x = _Quantize(x, quantization_range)
x = nn.bias_add(x, bias)
x = _Quantize(x, quantization_range)
return x
开发者ID:kylin9872,项目名称:tensorflow,代码行数:29,代码来源:quantization_mnist_test.py
示例8: get_simple_graph_def
def get_simple_graph_def(self):
"""Create a simple graph and return its graph_def."""
g = ops.Graph()
with g.as_default():
a = aops.placeholder(
dtype=dtypes.float32, shape=(None, 24, 24, 2), name="input")
e = cop.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtypes.float32)
conv = nn.conv2d(
input=a,
filter=e,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
b = cop.constant(
[4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtypes.float32)
t = nn.bias_add(conv, b, name="biasAdd")
relu = nn.relu(t, "relu")
idty = aops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
aops.squeeze(v, name="output")
return g.as_graph_def()
开发者ID:ebrevdo,项目名称:tensorflow,代码行数:25,代码来源:tf_trt_integration_test.py
示例9: GetSingleEngineGraphDef
def GetSingleEngineGraphDef(dtype=dtypes.float32):
"""Create a graph containing single segment."""
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
bias = constant_op.constant(
[4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype)
added = nn.bias_add(conv, bias, name="bias_add")
relu = nn.relu(added, "relu")
identity = array_ops.identity(relu, "identity")
pool = nn_ops.max_pool(
identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
array_ops.squeeze(pool, name=OUTPUT_NAME)
return g.as_graph_def()
开发者ID:Eagle732,项目名称:tensorflow,代码行数:26,代码来源:tf_trt_integration_test.py
示例10: GetParams
def GetParams(self):
# TODO(laigd): we should test the following cases:
# - batch size is not changed, other dims are changing
# - batch size is decreasing, other dims are identical
# - batch size is decreasing, other dims are changing
# - batch size is increasing, other dims are identical
# - batch size is increasing, other dims are changing
input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],
[[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],
[[1, 224, 224, 1]], [[1, 128, 224, 1]]]
expected_output_dims = input_dims
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(
shape=(None, None, None, 1), dtype=dtypes.float32, name="input")
conv_filter1 = constant_op.constant(
np.ones([3, 3, 1, 8]), name="weights1", dtype=dtypes.float32)
bias1 = constant_op.constant(np.random.randn(8), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter1,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
x = nn.bias_add(x, bias1)
x = nn.relu(x)
conv_filter2 = constant_op.constant(
np.ones([3, 3, 8, 1]), name="weights2", dtype=dtypes.float32)
bias2 = constant_op.constant(np.random.randn(1), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter2,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
x = nn.bias_add(x, bias2)
x = array_ops.identity(x, name="output")
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=["input"],
input_dims=input_dims,
output_names=["output"],
expected_output_dims=expected_output_dims)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:45,代码来源:dynamic_input_shapes_test.py
示例11: _lstm_cell
def _lstm_cell(prev_c, prev_h, x):
"""Create an LSTM cell."""
# i: input gate
# f: forget gate
# o: output gate
# c: cell state
# x: input
# h: embedding
bias = _bias([4])
w = _weight([8, 16])
ifoc = math_ops.matmul(array_ops.concat([x, prev_h], axis=1), w)
i, f, o, c = array_ops.split(ifoc, 4, axis=1)
i = math_ops.sigmoid(nn.bias_add(i, bias))
f = math_ops.sigmoid(nn.bias_add(f, bias))
o = math_ops.sigmoid(nn.bias_add(o, bias))
c = math_ops.tanh(nn.bias_add(c, bias))
next_c = f * prev_c + i * c
next_h = o * math_ops.tanh(next_c)
return next_c, next_h
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:19,代码来源:auto_mixed_precision_test.py
示例12: loop_fn
def loop_fn(i):
with g:
a = array_ops.gather(x, i) if stacked_value else x
b = array_ops.gather(bias, i) if stacked_bias else bias
y = nn.bias_add(a, b, data_format=data_format)
loss = math_ops.reduce_sum(y * y)
grad = g.gradient(loss, bias)
if stacked_bias:
# If we gather over bias in loop_fn, the gradient will be an
# instance of `IndexedSlices` with attrs `values` and `indices`.
return y, grad.values, grad.indices
else:
return y, grad
开发者ID:aritratony,项目名称:tensorflow,代码行数:13,代码来源:math_test.py
示例13: _predictions
def _predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` before applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
self.logits_dimension,
self._centered_bias_weight_collection))
return self._logits_to_predictions(logits)
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:14,代码来源:head.py
示例14: bias_add
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer,
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Optional activation function.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_op_scope.
Returns:
a tensor representing the result of adding biases to the inputs.
"""
with variable_scope.variable_op_scope([inputs],
scope, 'BiasAdd', reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
num_features = utils.last_dimension(inputs.get_shape(), min_rank=2)
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable('biases',
shape=[num_features,],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:49,代码来源:layers.py
示例15: _eval_op
def _eval_op(self, features, target, logits=None, logits_input=None,
name="eval_op"):
target = _check_target(target, self._label_name)
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
self.logits_dimension,
self._centered_bias_weight_collection))
loss_unweighted = self._eval_loss_fn(logits, target)
loss, _ = _loss(loss_unweighted,
_weight_tensor(features, self._weight_column_name),
name=name)
predictions = self._logits_to_prediction(logits)
return predictions, loss
开发者ID:caikehe,项目名称:tensorflow,代码行数:15,代码来源:head.py
示例16: _logits
def _logits(self, features):
if not (self._get_linear_feature_columns() or
self._get_dnn_feature_columns()):
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"should be defined.")
features = self._get_feature_dict(features)
if self._get_linear_feature_columns() and self._get_dnn_feature_columns():
logits = self._linear_logits(features) + self._dnn_logits(features)
elif self._get_dnn_feature_columns():
logits = self._dnn_logits(features)
else:
logits = self._linear_logits(features)
return nn.bias_add(logits, self._centered_bias())
开发者ID:Baaaaam,项目名称:tensorflow,代码行数:15,代码来源:dnn_linear_combined.py
示例17: call
def call(self, inputs):
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel,
[[len(shape) - 1], [0]])
# Reshape the output back to the original ndim of the input.
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
开发者ID:brainwy12,项目名称:tensorflow,代码行数:16,代码来源:core.py
示例18: _logits
def _logits(self, features, is_training=False):
linear_feature_columns = self._get_linear_feature_columns()
dnn_feature_columns = self._get_dnn_feature_columns()
if not (linear_feature_columns or dnn_feature_columns):
raise ValueError("Either linear_feature_columns or dnn_feature_columns " "should be defined.")
if linear_feature_columns and dnn_feature_columns:
logits = self._linear_logits(features, is_training) + self._dnn_logits(features, is_training)
elif dnn_feature_columns:
logits = self._dnn_logits(features, is_training)
else:
logits = self._linear_logits(features, is_training)
if self._enable_centered_bias:
return nn.bias_add(logits, self._centered_bias())
else:
return logits
开发者ID:jinniahn,项目名称:tensorflow,代码行数:17,代码来源:dnn_linear_combined.py
示例19: call
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
if len(shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
[0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
开发者ID:yanchen036,项目名称:tensorflow,代码行数:18,代码来源:core.py
示例20: _training_loss
def _training_loss(self, features, target, logits=None,
logits_input=None, name="training_loss"):
"""Returns training loss tensor for this head.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
features: features dict.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
logits: logits, a float tensor.
logits_input: Output of last hidden layer.
name: Op name.
Returns:
A tuple of training Loss and additional_train_op (possibly None)
"""
target = _check_target(target, self._label_name)
centered_bias_step = None
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
self.logits_dimension,
self._centered_bias_weight_collection))
centered_bias_step = [_centered_bias_step(
self.logits_dimension,
self._centered_bias_weight_collection,
target,
self._train_loss_fn)]
loss_unweighted = self._train_loss_fn(logits, target)
loss, weighted_average_loss = _loss(
loss_unweighted,
_weight_tensor(features, self._weight_column_name),
name=name)
logging_ops.scalar_summary(_head_prefixed(self._head_name, "loss"),
weighted_average_loss)
return loss, centered_bias_step
开发者ID:caikehe,项目名称:tensorflow,代码行数:44,代码来源:head.py
注:本文中的tensorflow.python.ops.nn.bias_add函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论