本文整理汇总了Python中tensorflow.python.ops.nn_ops.bias_add函数的典型用法代码示例。如果您正苦于以下问题:Python bias_add函数的具体用法?Python bias_add怎么用?Python bias_add使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bias_add函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testBiasVec
def testBiasVec(self):
with self.assertRaises(ValueError):
nn_ops.bias_add(
array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[1, 2]))
开发者ID:aeverall,项目名称:tensorflow,代码行数:7,代码来源:bias_op_test.py
示例2: _testGradient
def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
with self.test_session(use_gpu=use_gpu):
if data_format == "NCHW":
np_input = self._NHWCToNCHW(np_input)
input_tensor = constant_op.constant(
np_input, shape=np_input.shape, dtype=dtype)
bias_tensor = constant_op.constant(bias, shape=bias.shape, dtype=dtype)
output_tensor = nn_ops.bias_add(
input_tensor, bias_tensor, data_format=data_format)
tensor_jacob_t, tensor_jacob_n = gradient_checker.compute_gradient(
input_tensor, np_input.shape, output_tensor, np_input.shape)
bias_jacob_t, bias_jacob_n = gradient_checker.compute_gradient(
bias_tensor, bias.shape, output_tensor, np_input.shape)
# Test gradient of BiasAddGrad
bias_add_grad = gradients_impl.gradients(
nn_ops.l2_loss(output_tensor), bias_tensor)[0]
grad_jacob_t, grad_jacob_n = gradient_checker.compute_gradient(
output_tensor, np_input.shape, bias_add_grad, bias.shape)
if dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
input_tensor = constant_op.constant(
np_input, shape=np_input.shape, dtype=np.float32)
bias_tensor = constant_op.constant(
bias, shape=bias.shape, dtype=np.float32)
output_tensor = nn_ops.bias_add(
input_tensor, bias_tensor, data_format=data_format)
_, tensor_jacob_n = gradient_checker.compute_gradient(input_tensor,
np_input.shape,
output_tensor,
np_input.shape)
_, bias_jacob_n = gradient_checker.compute_gradient(bias_tensor,
bias.shape,
output_tensor,
np_input.shape)
bias_add_grad = gradients_impl.gradients(
nn_ops.l2_loss(output_tensor), bias_tensor)[0]
_, grad_jacob_n = gradient_checker.compute_gradient(output_tensor,
np_input.shape,
bias_add_grad,
bias.shape)
threshold = 2e-3
if dtype == dtypes.float64:
threshold = 1e-10
self.assertAllClose(tensor_jacob_t, tensor_jacob_n, threshold, threshold)
# TODO(annarev): Re-add assertion for float16, float32 dtypes and NCHW
# once we figure out why this check started failing with cuda mavx.
if dtype == dtypes.float64 or data_format != "NCHW":
self.assertAllClose(bias_jacob_t, bias_jacob_n, threshold, threshold)
self.assertAllClose(grad_jacob_t, grad_jacob_n, threshold, threshold)
开发者ID:xylary,项目名称:tensorflow,代码行数:57,代码来源:bias_op_test.py
示例3: _testBiasNCHW
def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
np_val = self._npBias(np_inputs, np_bias)
np_inputs = self._NHWCToNCHW(np_inputs)
with self.cached_session(use_gpu=use_gpu):
tf_val = nn_ops.bias_add(np_inputs, np_bias, data_format="NCHW").eval()
tf_val = self._NCHWToNHWC(tf_val)
self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
开发者ID:aeverall,项目名称:tensorflow,代码行数:7,代码来源:bias_op_test.py
示例4: SimulateFusedConv2dBiasActivationInt8
def SimulateFusedConv2dBiasActivationInt8(conv_input_scale, conv_input, kernel,
padding, strides, side_input_scale,
side_input, biases):
"""Simulates the int8 fused 2-D convolution op using separate float ops.
The arguments and return values have the same format, meanings and
restrictions as the actual op.
Args:
conv_input_scale: A scalar 'float'.
conv_input: A `Tensor` of type `qint8` in NCHW_VECT_C layout.
kernel: A `Tensor` of type `qint8` in OIHW_VECT_I layout.
padding: A `string` from: `"SAME", "VALID"`.
strides: A list of `ints`.
side_input_scale: A scalar 'float'.
side_input: A `Tensor` of type `qint8` in NCHW_VECT_C layout.
biases: A `Tensor` of type `float32` in NCHW layout.
Returns:
A `Tensor` of type `qint8` in NCHW_VECT_C layout.
"""
conv_result = nn_ops.conv2d(
NchwVectCToNchw(gen_array_ops.dequantize(conv_input, -128, 127)),
OihwVectIToHwio(gen_array_ops.dequantize(kernel, -128, 127)),
strides=strides,
padding=padding,
data_format="NCHW") * conv_input_scale
conv_and_side_inputs = conv_result + side_input_scale * NchwVectCToNchw(
gen_array_ops.dequantize(side_input, -128, 127))
logit = nn_ops.bias_add(conv_and_side_inputs, biases, data_format="NCHW")
result, _, _ = gen_array_ops.quantize_v2(
NchwToNchwVectC(nn_ops.relu(logit)), -128, 127, dtypes.qint8)
return result
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:34,代码来源:fused_conv2d_bias_activation_op_test.py
示例5: call
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, state], 1), self._kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
return output, output
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:8,代码来源:rnn_cell_impl.py
示例6: _linear
def _linear(args, output_size, bias, bias_initializer=None,
kernel_initializer=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias; None by default.
kernel_initializer: starting value to initialize the weight; None by default.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype,
initializer=kernel_initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
return nn_ops.bias_add(res, biases)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:58,代码来源:core_rnn_cell_impl.py
示例7: _linear
def _linear(self, args, copy):
out_size = copy * self._num_units
proj_size = args.get_shape()[-1]
weights = vs.get_variable("kernel", [proj_size, out_size])
out = math_ops.matmul(args, weights)
if not self._layer_norm:
bias = vs.get_variable("bias", [out_size])
out = nn_ops.bias_add(out, bias)
return out
开发者ID:codealphago,项目名称:ML-KWS-for-MCU,代码行数:9,代码来源:models.py
示例8: _SetupValuesForDevice
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, bias,
strides, padding, activation_mode, data_format,
dtype):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
bias: 1-D bias tensor of length output_depth.
strides: Stride: [col_stride, row_stride]
padding: Padding type.
activation_mode: Activation mode.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
Returns:
Symbolic tensor value and reference value that can be used to
execute the computation and verify the results.
"""
input_size = np.prod(tensor_in_sizes)
filter_size = np.prod(filter_in_sizes)
bias_size = filter_in_sizes[-1] # equals to output depth
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, input_size + 1)]
x2 = [f * 1.0 for f in range(1, filter_size + 1)]
# This is to guarantee that there is always negative values after
# bias add so that we can test whether relu works correctly.
x3 = bias
with self.test_session(use_gpu=True):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
t3 = constant_op.constant(x3, shape=[bias_size], dtype=dtype)
strides = [1] + strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
output = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
t1,
t2,
t3,
strides=strides,
padding=padding,
data_format=data_format,
activation_mode=activation_mode)
ref_conv_output = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
ref_bias_output = nn_ops.bias_add(
ref_conv_output, t3, data_format=data_format)
ref_output = nn_ops.relu(ref_bias_output)
if data_format == "NCHW":
output = test_util.NCHWToNHWC(output)
ref_output = test_util.NCHWToNHWC(ref_output)
return output, ref_output
开发者ID:1000sprites,项目名称:tensorflow,代码行数:56,代码来源:fused_conv2d_bias_activation_op_test.py
示例9: build_conv_bias_relu_graph
def build_conv_bias_relu_graph(device, input_shape, filter_shape, strides,
padding, num_iters, data_format):
"""builds a graph containing a sequence of conv2d operations.
Args:
device: String, the device to run on.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use.
num_iters: number of iterations to run conv2d.
data_format: data format string of input, 'NHWC' and 'NCHW' are
supported.
Returns:
An array of tensors to run()
"""
if data_format == "NCHW":
input_shape = [
input_shape[0], input_shape[3], input_shape[1], input_shape[2]
]
with ops.device("/%s:0" % device):
inp = variables.Variable(random_ops.truncated_normal(input_shape))
filt = variables.Variable(random_ops.truncated_normal(filter_shape))
bias_shape = [filter_shape[-1]]
bias = variables.Variable(random_ops.truncated_normal(bias_shape))
outputs = []
conv2d_out = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
bias_out = nn_ops.bias_add(conv2d_out, bias, data_format=data_format)
relu_out = nn_ops.relu(bias_out)
outputs.append(relu_out)
for _ in range(1, num_iters):
with ops.control_dependencies([relu_out]):
conv2d_out = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
bias_out = nn_ops.bias_add(conv2d_out, bias, data_format=data_format)
relu_out = nn_ops.relu(bias_out)
outputs.append(relu_out)
return control_flow_ops.group(*outputs)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:43,代码来源:fused_conv2d_bias_activation_benchmark.py
示例10: __call__
def __call__(self, args):
if not self._is_sequence:
args = [args]
if len(args) == 1:
res = math_ops.matmul(args[0], self._weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), self._weights)
if self._build_bias:
res = nn_ops.bias_add(res, self._biases)
return res
开发者ID:Mazecreator,项目名称:tensorflow,代码行数:11,代码来源:rnn_cell_impl.py
示例11: testGradients
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
开发者ID:Ambier,项目名称:tensorflow,代码行数:12,代码来源:gradients_test.py
示例12: __call__
def __call__(self, args):
if not self._is_sequence:
args = [args]
if len(args) == 1:
res = math_ops.matmul(args[0], self._weights)
else:
# Explicitly creating a one for a minor performance improvement.
one = constant_op.constant(1, dtype=dtypes.int32)
res = math_ops.matmul(array_ops.concat(args, one), self._weights)
if self._build_bias:
res = nn_ops.bias_add(res, self._biases)
return res
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:13,代码来源:core_rnn_cell.py
示例13: call
def call(self, inputs, state):
"""Long short-term memory cell (LSTM) with masks for pruning.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped
`[batch_size, self.state_size]`, if `state_is_tuple` has been set to
`True`. Otherwise, a `Tensor` shaped
`[batch_size, 2 * self.state_size]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
sigmoid = math_ops.sigmoid
one = constant_op.constant(1, dtype=dtypes.int32)
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one)
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, h], 1), self._masked_kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(
value=gate_inputs, num_or_size_splits=4, axis=one)
forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)
# Note that using `add` and `multiply` instead of `+` and `*` gives a
# performance improvement. So using those at the cost of readability.
add = math_ops.add
multiply = math_ops.multiply
new_c = add(
multiply(c, sigmoid(add(f, forget_bias_tensor))),
multiply(sigmoid(i), self._activation(j)))
new_h = multiply(self._activation(new_c), sigmoid(o))
if self._state_is_tuple:
new_state = tf_rnn.LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:46,代码来源:rnn_cells.py
示例14: relu_layer
def relu_layer(x, weights, biases, name=None):
"""Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2-D Tensor computing relu(matmul(x, weights) + biases).
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "relu_layer") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
return nn_ops.relu(xw_plus_b, name=name)
开发者ID:BersaKAIN,项目名称:tensorflow,代码行数:20,代码来源:nn.py
示例15: xw_plus_b
def xw_plus_b(x, weights, biases, name=None):
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"wx_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "xw_plus_b") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return nn_ops.bias_add(mm, biases, name=name)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:20,代码来源:nn.py
示例16: __call__
def __call__(self, inputs, state, scope=None):
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev,m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer):
concat_w = tf.nn.rnn_cell._get_concat_variable(
"W", [input_size.value + num_proj, 3 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[3 * self._num_units],
initializer=init_ops.zeros_initializer, dtype=dtype)
cell_inputs = array_ops.concat(1,[inputs, m_prev])
ltm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i,j,o = array_ops.split(1,3,ltm_matrix) # i,j,o: [1,num_units]
c = c_prev + sigmoid(i)*self._activation(j)
if self._cell_clip is not None:
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = tf.nn.rnn_cell._get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
new_state = (tf.nn.rnn_cell.LSTMStateTuple(c,m) if self._state_is_tuple
else array_ops.concat(1,[c,m]))
return m, new_state
开发者ID:multiangle,项目名称:PyNLP,代码行数:40,代码来源:LTM.py
示例17: call
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
inputs = self._tflite_wrapper.add_input(
inputs, tag="input", name="input", aggregate="stack", index_override=0)
state = self._tflite_wrapper.add_input(
state,
tag="hidden_state",
name="hidden_state",
aggregate="first",
index_override=4)
weights = array_ops.transpose(
array_ops.concat([self._input_weights, self._recurrent_weights], 1))
gate_inputs = math_ops.matmul(array_ops.concat([inputs, state], 1), weights)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
output = self._tflite_wrapper.add_output(
output,
tag="output",
name="output",
index_override=1,
aggregate="stack")
return output, output
开发者ID:kylin9872,项目名称:tensorflow,代码行数:22,代码来源:rnn_cell.py
示例18: _SimulateFusedConv2dBiasActivationInt8
def _SimulateFusedConv2dBiasActivationInt8(conv_input_scale, conv_input, kernel,
padding, strides, side_input_scale,
side_input, biases, apply_relu):
"""Simulates the int8 fused 2-D convolution op using separate float ops.
The arguments and return values have the same format, meanings and
restrictions as the actual op.
Args:
conv_input_scale: A scalar 'float'.
conv_input: A `Tensor` of type `qint8` in NCHW_VECT_C layout.
kernel: A `Tensor` of type `qint8` in OIHW_VECT_I layout.
padding: A `string` from: `"SAME", "VALID"`.
strides: A list of `ints`.
side_input_scale: A scalar 'float'.
side_input: A `Tensor` of type `qint8` in NCHW_VECT_C layout.
biases: A `Tensor` of type `float32` in NCHW layout.
apply_relu: A boolean to specify whether to apply "Relu" activation function
that clips outputs to the range [0, 127], or "None" activation that clips
to the range [-128, 127].
Returns:
A `Tensor` of type `qint8` in NCHW_VECT_C layout.
"""
conv_result = nn_ops.conv2d(
_NchwVectCToNchw(gen_array_ops.dequantize(conv_input, -128, 127)),
_OihwVectIToHwio(gen_array_ops.dequantize(kernel, -128, 127)),
strides=strides,
padding=padding,
data_format="NCHW") * conv_input_scale
conv_and_side_inputs = conv_result + side_input_scale * _NchwVectCToNchw(
gen_array_ops.dequantize(side_input, -128, 127))
output = nn_ops.bias_add(conv_and_side_inputs, biases, data_format="NCHW")
if apply_relu:
output = nn_ops.relu(output)
result, _, _ = gen_array_ops.quantize_v2(
_NchwToNchwVectC(output), -128, 127, dtypes.qint8)
return result
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:39,代码来源:fused_conv2d_bias_activation_op_test_base.py
示例19: call
def call(self, inputs, state):
"""Run one time step of the IndRNN.
Calculates the output and new hidden state using the IndRNN equation
`output = new_state = act(W * input + u (*) state + b)`
where `*` is the matrix multiplication and `(*)` is the Hadamard product.
Args:
inputs: Tensor, 2-D tensor of shape `[batch, num_units]`.
state: Tensor, 2-D tensor of shape `[batch, num_units]` containing the
previous hidden state.
Returns:
A tuple containing the output and new hidden state. Both are the same
2-D tensor of shape `[batch, num_units]`.
"""
gate_inputs = math_ops.matmul(inputs, self._input_kernel)
recurrent_update = math_ops.multiply(state, self._recurrent_kernel)
gate_inputs = math_ops.add(gate_inputs, recurrent_update)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
return output, output
开发者ID:xkp793003821,项目名称:indrnn,代码行数:24,代码来源:ind_rnn_cell.py
示例20: _test_fully_connected
def _test_fully_connected(tensor_in_sizes, filter_in_sizes, bias_in_size=None):
""" One iteration of fully connected """
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
assert int(total_size_1 / tensor_in_sizes[0]) == filter_in_sizes[0], \
"input size and filter size are mismatched"
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')
# reshape N H W C into N H*W*C
in_data_reshape = array_ops.reshape(in_data, [tensor_in_sizes[0], -1])
out = math_ops.mat_mul(in_data_reshape, in_filter)
# if we have bias
if bias_in_size:
assert bias_in_size[0] == filter_in_sizes[1], "bias and filter size are mismatched"
bias_array = [f * 1.0 for f in range(1, bias_in_size[0] + 1)]
in_bias = constant_op.constant(bias_array, shape=bias_in_size, dtype='float32')
out = nn_ops.bias_add(out, in_bias)
tflite_data_array = np.reshape(data_array, tensor_in_sizes).astype('float32')
tvm_data_array = np.transpose(tflite_data_array, axes=(0, 3, 1, 2))
compare_tflite_with_tvm(tflite_data_array, tvm_data_array,
'Placeholder:0', [in_data], [out])
开发者ID:bddppq,项目名称:tvm,代码行数:36,代码来源:test_forward.py
注:本文中的tensorflow.python.ops.nn_ops.bias_add函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论