本文整理汇总了Python中tensorflow.python.ops.array_ops.pad函数的典型用法代码示例。如果您正苦于以下问题:Python pad函数的具体用法?Python pad怎么用?Python pad使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pad函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testPaddingsNonNegative2
def testPaddingsNonNegative2(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
array_ops.pad(constant_op.constant(
[1], shape=[1]),
constant_op.constant(
[-1, 0], shape=[1, 2]))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:pad_op_test.py
示例2: testInputDims
def testInputDims(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2, 1, 1, 1, 1]),
array_ops.reshape(
[1, 2], shape=[1, 2]))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:pad_op_test.py
示例3: get_observation_model
def get_observation_model(self, times):
"""Construct observation model matrix from VARMA parameters.
Args:
times: A [batch size] vector indicating the times observation models are
requested for. Unused.
Returns:
the observation model matrix. It has shape
[self.num_features, self.state_dimension].
"""
del times # StateSpaceModel will broadcast along the batch dimension
if self.ar_order > self.ma_order or self.state_num_blocks < 2:
return array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],
name="observation_model")
else:
# Add a second observed component which "catches" the accumulated moving
# average errors as they reach the end of the state. If ar_order >
# ma_order, this is unnecessary, since accumulated errors cycle naturally.
return array_ops.concat(
[
array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0,
self.num_features * (self.state_num_blocks - 2)]]),
linalg_ops.eye(self.num_features, dtype=self.dtype)
],
axis=1,
name="observation_model")
开发者ID:1000sprites,项目名称:tensorflow,代码行数:30,代码来源:varma.py
示例4: testPaddingsDim4
def testPaddingsDim4(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2, 3, 4, 5, 6], shape=[3, 2]))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:pad_op_test.py
示例5: create_test_network_8
def create_test_network_8():
"""Aligned network for test, including an intermediate addition.
The graph is similar to create_test_network_1(), except that it includes a few
more layers on top. The added layers compose two different branches whose
receptive fields are different. This makes this test case more challenging; in
particular, this test fails if a naive DFS-like algorithm is used for RF
computation.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch before first addition.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
# First addition.
l4 = nn.relu(l1 + l3)
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='VALID')
# Right branch after first addition.
l6_pad = array_ops.pad(l4, [[0, 0], [1, 0], [1, 0], [0, 0]])
l6 = slim.conv2d(l6_pad, 1, [3, 3], stride=2, scope='L6', padding='VALID')
# Final addition.
nn.relu(l5 + l6, name='output')
return g
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:34,代码来源:receptive_field_test.py
示例6: testPaddingsDim2
def testPaddingsDim2(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[2, 1]))
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:7,代码来源:pad_op_test.py
示例7: _makeTridiagonalMatrix
def _makeTridiagonalMatrix(self, superdiag, maindiag, subdiag):
super_pad = [[0, 0], [0, 1], [1, 0]]
sub_pad = [[0, 0], [1, 0], [0, 1]]
super_part = array_ops.pad(array_ops.matrix_diag(superdiag), super_pad)
main_part = array_ops.matrix_diag(maindiag)
sub_part = array_ops.pad(array_ops.matrix_diag(subdiag), sub_pad)
return super_part + main_part + sub_part
开发者ID:aritratony,项目名称:tensorflow,代码行数:8,代码来源:tridiagonal_matmul_op_test.py
示例8: baseline
def baseline(self, upper, diag, lower, vec):
diag_part = array_ops.expand_dims(diag, -1) * vec
lower_part = array_ops.pad(
array_ops.expand_dims(lower[:, 1:], -1) * vec[:, :-1, :],
[[0, 0], [1, 0], [0, 0]])
upper_part = array_ops.pad(
array_ops.expand_dims(upper[:, :-1], -1) * vec[:, 1:, :],
[[0, 0], [0, 1], [0, 0]])
return lower_part + diag_part + upper_part
开发者ID:aritratony,项目名称:tensorflow,代码行数:9,代码来源:tridiagonal_matmul_op_test.py
示例9: testPaddingsMaximum
def testPaddingsMaximum(self):
with self.test_session(use_gpu=True):
with self.assertRaises(Exception):
array_ops.pad(
constant_op.constant([1], shape=[2]), constant_op.constant([2, 0], shape=[1, 2]), mode="REFLECT"
).eval()
with self.assertRaises(Exception):
array_ops.pad(
constant_op.constant([1], shape=[2]), constant_op.constant([0, 3], shape=[1, 2]), mode="SYMMETRIC"
).eval()
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:10,代码来源:pad_op_test.py
示例10: testShapeFunctionEdgeCases
def testShapeFunctionEdgeCases(self):
# Unknown paddings shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], padded.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
padded = array_ops.pad(inp, [[2, 2], [2, 2]])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input and paddings shape.
inp = array_ops.placeholder(dtypes.float32)
padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
self.assertAllEqual(None, padded.get_shape().ndims)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:15,代码来源:pad_op_test.py
示例11: insert_slice_in_zeros
def insert_slice_in_zeros(slice_to_insert, dim, dim_size, position):
"""Inserts slice into a larger tensor of zeros.
Forms a new tensor which is the same shape as slice_to_insert, except that
the dimension given by 'dim' is expanded to the size given by 'dim_size'.
'position' determines the position (index) at which to insert the slice within
that dimension.
Assumes slice_to_insert.shape[dim] = 1.
Args:
slice_to_insert: The slice to insert.
dim: The dimension which to expand with zeros.
dim_size: The new size of the 'dim' dimension.
position: The position of 'slice_to_insert' in the new tensor.
Returns:
The new tensor.
Raises:
ValueError: If the slice's shape at the given dim is not 1.
"""
slice_shape = slice_to_insert.shape
if slice_shape[dim] != 1:
raise ValueError("Expected slice_to_insert.shape to have {} dim of 1, but "
"was {}".format(dim, slice_to_insert.shape[dim]))
before = [0] * int(len(slice_shape))
after = before[:]
before[dim] = position
after[dim] = dim_size - position - 1
return array_ops.pad(slice_to_insert, list(zip(before, after)))
开发者ID:dyoung418,项目名称:tensorflow,代码行数:33,代码来源:loss_functions.py
示例12: testPadWithNonConstPaddings
def testPadWithNonConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings = array_ops.placeholder(dtype='int32')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
paddings: paddings_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Pad-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:35,代码来源:layout_optimizer_test.py
示例13: inverse_stft_window_fn_inner
def inverse_stft_window_fn_inner(frame_length, dtype):
"""Computes a window that can be used in `inverse_stft`.
Args:
frame_length: An integer scalar `Tensor`. The window length in samples.
dtype: Data type of waveform passed to `stft`.
Returns:
A window suitable for reconstructing original waveform in `inverse_stft`.
Raises:
ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype
`frame_step` is not scalar, or `frame_step` is not scalar.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
# Use equation 7 from Griffin + Lim.
forward_window = forward_window_fn(frame_length, dtype=dtype)
denom = math_ops.square(forward_window)
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
denom = math_ops.reduce_sum(denom, 0, keep_dims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])
return forward_window / denom[:frame_length]
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:31,代码来源:spectral_ops.py
示例14: testFusePadAndConv
def testFusePadAndConv(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:27,代码来源:optimize_for_inference_test.py
示例15: pack_uint8_r2_to_uint32
def pack_uint8_r2_to_uint32(self, test_input):
num_rows, num_columns = test_input.get_shape().as_list()
num_output_columns = int(math.ceil(num_columns / 4.0))
padding_input = array_ops.pad(
math_ops.cast(test_input, dtype=dtypes.uint8),
constant_op.constant([[
0,
0,
], [0, num_output_columns * 4 - num_columns]]))
output = array_ops.zeros([num_rows, num_output_columns],
dtype=dtypes.uint32)
num_elements_per_pack = 4
shift_bits = 8
iota_r1 = math_ops.range(num_output_columns * num_elements_per_pack)
for p in range(num_elements_per_pack):
selected_index = math_ops.equal(
math_ops.mod(iota_r1, num_elements_per_pack), p)
gather_index = array_ops.boolean_mask(iota_r1, selected_index)
gathered_input = array_ops.gather(padding_input, gather_index, axis=1)
total_shift_bits = shift_bits * (num_elements_per_pack - p - 1)
left_shift_input = bitwise_ops.left_shift(
math_ops.cast(gathered_input, dtype=dtypes.uint32), total_shift_bits)
output = bitwise_ops.bitwise_or(output, left_shift_input)
return output
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:26,代码来源:quantized_ops_test.py
示例16: conv2d_same
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
padding='SAME')
net = subsample(net, factor=stride)
whereas
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding='SAME',
scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = array_ops.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding='VALID',
scope=scope)
开发者ID:LUTAN,项目名称:tensorflow,代码行数:60,代码来源:resnet_utils.py
示例17: create_test_network_2
def create_test_network_2():
"""Aligned network for test.
The graph corresponds to a variation to the example from the second figure in
go/cnn-rf-computation#arbitrary-computation-graphs. Layers 2 and 3 are changed
to max-pooling operations. Since the functionality is the same as convolution,
the network is aligned and the receptive field size is the same as from the
network created using create_test_network_1().
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
l2 = slim.max_pool2d(l2_pad, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.max_pool2d(l2, [1, 1], stride=2, scope='L3', padding='VALID')
# Addition.
nn.relu(l1 + l3, name='output')
return g
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:26,代码来源:receptive_field_test.py
示例18: create_test_network
def create_test_network():
"""Convolutional neural network for test.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch before first addition.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
# First addition.
l4 = nn.relu(l1 + l3, name='L4_relu')
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
# Right branch after first addition.
l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
# Final addition.
gen_math_ops.add(l5, l6, name='L7_add')
return g
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:graph_compute_order_test.py
示例19: testPadWithConstPaddings
def testPadWithConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
paddings = constant_op.constant(
paddings_val, dtype='int32', name='PaddingsConst')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Pad-0-0', nodes)
self.assertIn('LayoutOptimizer-Pad-PaddingsConst', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:33,代码来源:layout_optimizer_test.py
示例20: frames
def frames(signal, frame_length, frame_step, name=None):
"""Frame a signal into overlapping frames.
May be used in front of spectral functions.
For example:
```python
pcm = tf.placeholder(tf.float32, [None, 9152])
frames = tf.contrib.signal.frames(pcm, 512, 180)
magspec = tf.abs(tf.spectral.rfft(frames, [512]))
image = tf.expand_dims(magspec, 3)
```
Args:
signal: A `Tensor` of shape `[batch_size, signal_length]`.
frame_length: An `int32` or `int64` `Tensor`. The length of each frame.
frame_step: An `int32` or `int64` `Tensor`. The step between frames.
name: A name for the operation (optional).
Returns:
A `Tensor` of frames with shape `[batch_size, num_frames, frame_length]`.
Raises:
ValueError: if signal does not have rank 2.
"""
with ops.name_scope(name, "frames", [signal, frame_length, frame_step]):
signal = ops.convert_to_tensor(signal, name="signal")
frame_length = ops.convert_to_tensor(frame_length, name="frame_length")
frame_step = ops.convert_to_tensor(frame_step, name="frame_step")
signal_rank = signal.shape.ndims
if signal_rank != 2:
raise ValueError("expected signal to have rank 2 but was " + signal_rank)
signal_length = array_ops.shape(signal)[1]
num_frames = math_ops.ceil((signal_length - frame_length) / frame_step)
num_frames = 1 + math_ops.cast(num_frames, dtypes.int32)
pad_length = (num_frames - 1) * frame_step + frame_length
pad_signal = array_ops.pad(signal, [[0, 0], [0,
pad_length - signal_length]])
indices_frame = array_ops.expand_dims(math_ops.range(frame_length), 0)
indices_frames = array_ops.tile(indices_frame, [num_frames, 1])
indices_step = array_ops.expand_dims(
math_ops.range(num_frames) * frame_step, 1)
indices_steps = array_ops.tile(indices_step, [1, frame_length])
indices = indices_frames + indices_steps
# TODO(androbin): remove `transpose` when `gather` gets `axis` support
pad_signal = array_ops.transpose(pad_signal)
signal_frames = array_ops.gather(pad_signal, indices)
signal_frames = array_ops.transpose(signal_frames, perm=[2, 0, 1])
return signal_frames
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:60,代码来源:shape_ops.py
注:本文中的tensorflow.python.ops.array_ops.pad函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论