本文整理汇总了Python中tensorflow.python.ops.nn.conv2d函数的典型用法代码示例。如果您正苦于以下问题:Python conv2d函数的具体用法?Python conv2d怎么用?Python conv2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了conv2d函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: GetParams
def GetParams(self):
"""Test for Constant broadcasting in TF-TRT."""
dtype = dtypes.float32
input_name = 'input'
input_dims = [5, 12, 12, 2]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
filt1 = constant_op.constant(
0.3, shape=(3, 3, 2, 1), dtype=dtype, name='filt1')
y1 = nn.conv2d(x, filt1, strides=[1, 1, 1, 1], padding='SAME', name='y1')
z1 = nn.relu(y1, name='z1')
filt2 = constant_op.constant(
np.random.randn(9), shape=(3, 3, 1, 1), dtype=dtype, name='filt2')
y2 = nn.conv2d(z1, filt2, strides=[1, 1, 1, 1], padding='SAME', name='y2')
z2 = nn.relu(y2, name='z')
filt3 = constant_op.constant(
np.random.randn(3, 3, 1, 1),
shape=(3, 3, 1, 1),
dtype=dtype,
name='filt3')
y3 = nn.conv2d(z2, filt3, strides=[1, 1, 1, 1], padding='SAME', name='y3')
nn.relu(y3, name='output')
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
num_expected_engines=1,
expected_output_dims=(5, 12, 12, 1),
allclose_atol=1.e-02,
allclose_rtol=1.e-02)
开发者ID:StephenOman,项目名称:tensorflow,代码行数:31,代码来源:const_broadcast_test.py
示例2: GetParams
def GetParams(self):
"""Testing conversion of BatchMatMul in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 15, 15, 3]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
with g.device("/GPU:0"):
e1 = constant_op.constant(
np.random.randn(1, 1, 3, 5), name="kernel_1", dtype=dtype)
e2 = constant_op.constant(
np.random.randn(1, 1, 5, 10), name="kernel_2", dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=e1,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
out = nn.conv2d(
input=conv,
filter=e2,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv_2")
array_ops.squeeze(out, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
expected_engines=["my_trt_op_0"],
expected_output_dims=(2, 15, 15, 10),
allclose_atol=1.e-02,
allclose_rtol=1.e-02)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:35,代码来源:memory_alignment_test.py
示例3: GetMultiEngineGraphDef
def GetMultiEngineGraphDef(dtype=dtypes.float32):
"""Create a graph containing multiple segment."""
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
p = conv * c1
c2 = constant_op.constant(
np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
q = conv / c2
edge = math_ops.sin(q)
edge /= edge
r = edge + edge
p -= edge
q *= edge
s = p + q
s -= r
array_ops.squeeze(s, name=OUTPUT_NAME)
return g.as_graph_def()
开发者ID:Eagle732,项目名称:tensorflow,代码行数:34,代码来源:tf_trt_integration_test.py
示例4: _annotated_graph
def _annotated_graph(self):
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(2)
current_activation = variable_scope.get_variable(
name='start', shape=[1, 2, 2, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(3):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation.op._set_attr(
'_recompute_hint',
# The value of the attribute does not matter; just that the key
# exists in the op's attributes.
attr_value_pb2.AttrValue(i=1))
current_activation += 5.
current_activation.op._set_attr(
'_recompute_hint', attr_value_pb2.AttrValue(i=0))
current_activation = nn.relu(current_activation)
current_activation.op._set_attr(
'_recompute_hint', attr_value_pb2.AttrValue(i=1))
loss = math_ops.reduce_mean(current_activation)
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
return graph, init_op, train_op
开发者ID:aeverall,项目名称:tensorflow,代码行数:29,代码来源:memory_optimizer_test.py
示例5: GetParams
def GetParams(self):
"""Single vgg layer test in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [5, 8, 8, 2]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
x, _, _ = nn_impl.fused_batch_norm(
x, [1.0, 1.0], [0.0, 0.0],
mean=[0.5, 0.5],
variance=[1.0, 1.0],
is_training=False)
e = constant_op.constant(
np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
t = nn.bias_add(conv, b, name="biasAdd")
relu = nn.relu(t, "relu")
idty = array_ops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
array_ops.squeeze(v, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[(5, 2, 2, 6)])
开发者ID:aeverall,项目名称:tensorflow,代码行数:31,代码来源:vgg_block_test.py
示例6: GetParams
def GetParams(self):
dtype = dtypes.float32
input_name = "input"
input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
[[2, 10, 10, 2]]]
expected_output_dims = [[[1, 10, 10, 1]], [[2, 10, 10, 1]], [[4, 10, 10,
1]],
[[2, 10, 10, 1]]]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(
dtype=dtype, shape=[None, 10, 10, 2], name=input_name)
conv_filter = constant_op.constant(
np.random.randn(3, 3, 2, 1), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
bias = constant_op.constant(
np.random.randn(1, 10, 10, 1), dtype=dtypes.float32)
x = math_ops.add(x, bias)
x = nn.relu(x)
x = array_ops.identity(x, name="output")
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=input_dims,
output_names=[output_name],
expected_output_dims=expected_output_dims)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:32,代码来源:lru_cache_test.py
示例7: get_simple_graph_def
def get_simple_graph_def(self):
"""Create a simple graph and return its graph_def."""
g = ops.Graph()
with g.as_default():
a = aops.placeholder(
dtype=dtypes.float32, shape=(None, 24, 24, 2), name="input")
e = cop.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtypes.float32)
conv = nn.conv2d(
input=a,
filter=e,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
b = cop.constant(
[4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtypes.float32)
t = nn.bias_add(conv, b, name="biasAdd")
relu = nn.relu(t, "relu")
idty = aops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
aops.squeeze(v, name="output")
return g.as_graph_def()
开发者ID:ebrevdo,项目名称:tensorflow,代码行数:25,代码来源:tf_trt_integration_test.py
示例8: GetParams
def GetParams(self):
"""Neighboring node wiring tests in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 3, 7, 5]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
e = constant_op.constant(
np.random.normal(.3, 0.05, [3, 2, 3, 4]), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(1.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = math_ops.mul(conv, b, name="mul")
e = self.trt_incompatible_op(conv, name="incompatible")
t = math_ops.sub(t, e, name="sub")
array_ops.squeeze(t, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[(2, 4, 5, 4)])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:30,代码来源:neighboring_engine_test.py
示例9: GetParams
def GetParams(self):
"""Neighboring node wiring tests in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 3, 7, 5]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
e = constant_op.constant(
np.random.normal(.3, 0.05, [3, 2, 3, 4]), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(1.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = conv * b
e = gen_math_ops.tan(conv)
t = t - e
array_ops.squeeze(t, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
num_expected_engines=2,
expected_output_dims=(2, 4, 5, 4),
allclose_atol=1.e-03,
allclose_rtol=1.e-03)
开发者ID:StephenOman,项目名称:tensorflow,代码行数:31,代码来源:neighboring_engine_test.py
示例10: GetSingleEngineGraphDef
def GetSingleEngineGraphDef(dtype=dtypes.float32):
"""Create a graph containing single segment."""
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
bias = constant_op.constant(
[4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype)
added = nn.bias_add(conv, bias, name="bias_add")
relu = nn.relu(added, "relu")
identity = array_ops.identity(relu, "identity")
pool = nn_ops.max_pool(
identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
array_ops.squeeze(pool, name=OUTPUT_NAME)
return g.as_graph_def()
开发者ID:Eagle732,项目名称:tensorflow,代码行数:26,代码来源:tf_trt_integration_test.py
示例11: GetParams
def GetParams(self):
# TODO(laigd): we should test the following cases:
# - batch size is not changed, other dims are changing
# - batch size is decreasing, other dims are identical
# - batch size is decreasing, other dims are changing
# - batch size is increasing, other dims are identical
# - batch size is increasing, other dims are changing
input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],
[[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],
[[1, 224, 224, 1]], [[1, 128, 224, 1]]]
expected_output_dims = input_dims
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(
shape=(None, None, None, 1), dtype=dtypes.float32, name="input")
conv_filter1 = constant_op.constant(
np.ones([3, 3, 1, 8]), name="weights1", dtype=dtypes.float32)
bias1 = constant_op.constant(np.random.randn(8), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter1,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
x = nn.bias_add(x, bias1)
x = nn.relu(x)
conv_filter2 = constant_op.constant(
np.ones([3, 3, 8, 1]), name="weights2", dtype=dtypes.float32)
bias2 = constant_op.constant(np.random.randn(1), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter2,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
x = nn.bias_add(x, bias2)
x = array_ops.identity(x, name="output")
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=["input"],
input_dims=input_dims,
output_names=["output"],
expected_output_dims=expected_output_dims)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:45,代码来源:dynamic_input_shapes_test.py
示例12: GetParams
def GetParams(self):
"""Create a graph containing multiple segment."""
# TODO(aaroey): test graph with different dtypes.
dtype = dtypes.float32
input_name = "input"
input_dims = [100, 24, 24, 2]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype, name="c1")
p = math_ops.mul(conv, c1, name="mul")
c2 = constant_op.constant(
np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype, name="c2")
q = math_ops.div(conv, c2, name="div")
edge = self.trt_incompatible_op(q, name="incompatible")
edge = math_ops.div(edge, edge, name="div1")
r = math_ops.add(edge, edge, name="add")
p = math_ops.sub(p, edge, name="sub")
q = math_ops.mul(q, edge, name="mul1")
s = math_ops.add(p, q, name="add1")
s = math_ops.sub(s, r, name="sub1")
array_ops.squeeze(s, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
# TODO(aaroey): LayoutOptimizer adds additional nodes to the graph which
# breaks the connection check, fix it.
# - my_trt_op_0 should have ["mul", "sub", "div1", "mul1", "add1",
# "add", "sub1"];
# - my_trt_op_1 should have ["weights","conv", "div"]
expected_engines=["my_trt_op_0", "my_trt_op_1"],
expected_output_dims=(100, 12, 12, 6),
allclose_atol=1.e-03,
allclose_rtol=1.e-03)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:50,代码来源:base_test.py
示例13: GetParams
def GetParams(self):
"""Test for multi connection neighboring nodes wiring tests in TF-TRT."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 3, 7, 5]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
e = constant_op.constant(
np.random.normal(.05, .005, [3, 2, 3, 4]),
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = conv + b
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
q = conv - b
edge = math_ops.sigmoid(q)
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
d = b + conv
edge3 = math_ops.sigmoid(d)
edge1 = gen_math_ops.tan(conv)
t = t - edge1
q = q + edge
t = t + q
t = t + d
t = t - edge3
array_ops.squeeze(t, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
expected_engines=["my_trt_op_0", "my_trt_op_1"],
expected_output_dims=(2, 4, 5, 4),
allclose_atol=1.e-03,
allclose_rtol=1.e-03)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:48,代码来源:multi_connection_neighbor_engine_test.py
示例14: GetParams
def GetParams(self):
"""Test for multi connection neighboring nodes wiring tests in TF-TRT."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 3, 7, 5]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
e = constant_op.constant(
np.random.normal(.05, .005, [3, 2, 3, 4]),
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = conv + b
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
q = conv - b
edge = self.trt_incompatible_op(q)
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
d = b + conv
edge3 = self.trt_incompatible_op(d)
edge1 = self.trt_incompatible_op(conv)
t = t - edge1
q = q + edge
t = t + q
t = t + d
t = t - edge3
array_ops.squeeze(t, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[[input_dims]],
output_names=[output_name],
expected_output_dims=[[[2, 4, 5, 4]]])
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:47,代码来源:multi_connection_neighbor_engine_test.py
示例15: conv2d
def conv2d(tensor_in,
n_filters,
filter_shape,
strides=None,
padding='SAME',
bias=True,
activation=None,
batch_norm=False):
"""Creates 2D convolutional subgraph with bank of filters.
Uses tf.nn.conv2d under the hood.
Creates a filter bank:
[filter_shape[0], filter_shape[1], tensor_in[3], n_filters]
and applies it to the input tensor.
Args:
tensor_in: input Tensor, 4D shape:
[batch, in_height, in_width, in_depth].
n_filters: number of filters in the bank.
filter_shape: Shape of filters, a list of ints, 1-D of length 2.
strides: A list of ints, 1-D of length 4. The stride of the sliding
window for each dimension of input.
padding: A string: 'SAME' or 'VALID'. The type of padding algorthim to use.
See the [comment here]
(https://www.tensorflow.org/api_docs/python/nn.html#convolution)
bias: Boolean, if to add bias.
activation: Activation Op, optional. If provided applied on the output.
batch_norm: Whether to apply batch normalization.
Returns:
A Tensor with resulting convolution.
"""
with vs.variable_scope('convolution'):
if strides is None:
strides = [1, 1, 1, 1]
input_shape = tensor_in.get_shape()
filter_shape = list(filter_shape) + [input_shape[3], n_filters]
filters = vs.get_variable('filters', filter_shape, dtypes.float32)
output = nn.conv2d(tensor_in, filters, strides, padding)
if bias:
bias_var = vs.get_variable('bias', [1, 1, 1, n_filters], dtypes.float32)
output += bias_var
if batch_norm:
output = batch_normalize(output, convnet=True)
if activation:
output = activation(output)
return output
开发者ID:0ruben,项目名称:tensorflow,代码行数:47,代码来源:conv_ops.py
示例16: GetParams
def GetParams(self):
"""Create a graph containing multiple segment."""
# TODO(aaroey): test graph with different dtypes.
dtype = dtypes.float32
input_name = "input"
input_dims = [100, 24, 24, 2]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype)
p = conv * c1
c2 = constant_op.constant(
np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype)
q = conv / c2
edge = self.trt_incompatible_op(q)
edge /= edge
r = edge + edge
p -= edge
q *= edge
s = p + q
s -= r
array_ops.squeeze(s, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
num_expected_engines=2,
expected_output_dims=(100, 12, 12, 6),
allclose_atol=1.e-03,
allclose_rtol=1.e-03)
开发者ID:StephenOman,项目名称:tensorflow,代码行数:45,代码来源:base_test.py
示例17: GetParams
def GetParams(self):
"""Create a graph containing multiple segment."""
# TODO(aaroey): test graph with different dtypes.
dtype = dtypes.float32
input_name = "input"
input_dims = [100, 24, 24, 2]
output_name = "output"
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=input_dims, name=input_name)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(12, 12, 6), dtype=dtype, name="c1")
p = math_ops.mul(conv, c1, name="mul")
c2 = constant_op.constant(
np.random.randn(12, 12, 6), dtype=dtype, name="c2")
q = math_ops.div(conv, c2, name="div")
edge = self.trt_incompatible_op(q, name="incompatible")
edge = math_ops.div(edge, edge, name="div1")
r = math_ops.add(edge, edge, name="add")
p = math_ops.sub(p, edge, name="sub")
q = math_ops.mul(q, edge, name="mul1")
s = math_ops.add(p, q, name="add1")
s = math_ops.sub(s, r, name="sub1")
array_ops.squeeze(s, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[(100, 12, 12, 6)])
开发者ID:aeverall,项目名称:tensorflow,代码行数:44,代码来源:base_test.py
示例18: GetParams
def GetParams(self):
"""Single vgg layer in NCHW unit tests in TF-TRT."""
dtype = dtypes.float32
input_name = "input"
input_dims = [5, 2, 8, 8]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
x, _, _ = nn_impl.fused_batch_norm(
x,
np.random.randn(2).astype(np.float32),
np.random.randn(2).astype(np.float32),
mean=np.random.randn(2).astype(np.float32),
variance=np.random.randn(2).astype(np.float32),
data_format="NCHW",
is_training=False)
e = constant_op.constant(
np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 2, 2],
padding="SAME",
name="conv")
b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
t = nn.bias_add(conv, b, data_format="NCHW", name="biasAdd")
relu = nn.relu(t, "relu")
idty = array_ops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 1, 2, 2], [1, 1, 2, 2],
"VALID",
data_format="NCHW",
name="max_pool")
array_ops.squeeze(v, name="output")
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
num_expected_engines=1,
expected_output_dims=(5, 6, 2, 2),
allclose_atol=1.e-03,
allclose_rtol=1.e-03)
开发者ID:StephenOman,项目名称:tensorflow,代码行数:43,代码来源:vgg_block_nchw_test.py
示例19: _GetMetaGraph
def _GetMetaGraph(self, batch_size=14, image_dim=12, optimizer_scope_name=''):
"""A simple layered graph with conv, an intermediate op, and a ReLU."""
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(1)
current_activation = variable_scope.get_variable(
name='start', shape=[batch_size, image_dim, image_dim, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(10):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation = nn.relu(current_activation)
loss = math_ops.reduce_mean(current_activation)
with ops.name_scope(optimizer_scope_name):
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
metagraph = train.export_meta_graph()
return (metagraph, init_op.name, train_op.name, loss.name)
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:22,代码来源:memory_optimizer_test.py
示例20: get_multi_engine_graph_def
def get_multi_engine_graph_def(mode="FP32"):
"""Create a simple graph and return its graph_def."""
dtype = dtypes.float32
if mode.upper() == "FP16":
dtype = dtypes.float16
else:
pass
g = ops.Graph()
with g.as_default():
x = aops.placeholder(shape=[None, 3, 7, 5], name="input", dtype=dtype)
with g.name_scope("Global_scope"):
with g.name_scope("first_scope"):
e = cop.constant(
np.random.randn(3, 2, 3, 4), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = cop.constant(np.random.randn(1, 4, 1, 1), name="bias1", dtype=dtype)
t = conv * b
b = cop.constant(np.random.randn(1, 4, 1, 1), name="bias2", dtype=dtype)
q = conv / b
edge = mops.sin(q)
edge1 = mops.cos(conv)
with g.name_scope("test_scope"):
de = edge + edge1
t -= edge1
q *= edge
t += q
t -= de
k = aops.squeeze(t, name="output")
print(k.dtype)
return g.as_graph_def()
开发者ID:Eagle732,项目名称:tensorflow,代码行数:38,代码来源:test_tftrt.py
注:本文中的tensorflow.python.ops.nn.conv2d函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论