本文整理汇总了Python中tvm.relay.var函数的典型用法代码示例。如果您正苦于以下问题:Python var函数的具体用法?Python var怎么用?Python var使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了var函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_cmp_type
def test_cmp_type():
for op, ref in ((relay.greater, np.greater),
(relay.greater_equal, np.greater_equal),
(relay.less, np.less),
(relay.less_equal, np.less_equal),
(relay.equal, np.equal),
(relay.not_equal, np.not_equal)):
x = relay.var("x", relay.TensorType((10, 4), "float32"))
y = relay.var("y", relay.TensorType((5, 10, 1), "float32"))
z = op(x, y)
z.astext()
zz = relay.ir_pass.infer_type(z)
assert zz.checked_type == relay.TensorType((5, 10, 4), "bool")
if ref is not None:
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape)
t2 = relay.TensorType(y_shape)
x = relay.var("x", t1)
y = relay.var("y", t2)
z = op(x, y)
x_data = np.random.rand(*x_shape).astype(t1.dtype)
y_data = np.random.rand(*y_shape).astype(t2.dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
开发者ID:bddppq,项目名称:tvm,代码行数:31,代码来源:test_op_level4.py
示例2: test_mul_param
def test_mul_param():
x = relay.var('x', shape=(10, 10))
y = relay.var('y', shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype('float32')
y_data = np.random.rand(1, 10).astype('float32')
check_eval(func, [x_data, y_data], x_data * y_data)
开发者ID:LANHUIYING,项目名称:tvm,代码行数:7,代码来源:test_backend_interpreter.py
示例3: dense_add_bias
def dense_add_bias(data, weight=None, bias=None, units=None, **kwargs):
"""Wrapper of dense which automatically creates weights if not given.
Parameters
----------
data : relay.Expr
The input expression.
weight : relay.Expr
The weight to conv2d.
bias : relay.Expr
The bias.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not weight:
weight = relay.var(name + "_weight")
if not bias:
bias = relay.var(name + "_bias")
data = relay.nn.dense(data, weight, units, **kwargs)
data = relay.nn.bias_add(data, bias, axis=-1)
return data
开发者ID:bddppq,项目名称:tvm,代码行数:31,代码来源:layers.py
示例4: test_annotate_none
def test_annotate_none():
ctx1 = tvm.context(1)
ctx2 = tvm.context(2)
x = relay.var("x", shape=(3,))
y = relay.var("y", shape=(3,))
z = relay.var("z", shape=(3,))
def annotated():
add = relay.add(x, y)
sub = relay.subtract(add, z)
func = relay.Function([x, y, z], sub)
func = relay.ir_pass.infer_type(func)
func = relay.ir_pass.rewrite_annotated_ops(func,
ctx1.device_type)
return func
def expected():
add = relay.add(x, y)
sub = relay.subtract(add, z)
func = relay.Function([x, y, z], sub)
return func
annotated_func = relay.ir_pass.infer_type(annotated())
expected_func = relay.ir_pass.infer_type(expected())
assert relay.ir_pass.alpha_equal(annotated_func, expected_func)
开发者ID:bddppq,项目名称:tvm,代码行数:25,代码来源:test_pass_annotation.py
示例5: test_conv
def test_conv():
batch_size = 1
input_channel = 3
h = 224
w = 224
output_channel = 64
kh = 7
kw = 7
h_padding = 1
w_padding = 1
oh = h + h_padding * 2 - kh + 1
ow = w + w_padding * 2 - kw + 1
dshape = (batch_size, input_channel, h, w)
weight = relay.var("weight", shape=(output_channel, input_channel, kh, kw))
data = relay.var("data", shape=dshape)
conv2d = relay.nn.conv2d(
data,
weight,
channels=output_channel,
kernel_size=(kh, kw),
padding=(1, 1))
func = relay.Function([data, weight],
relay.Tuple(tvm.convert([conv2d])))
func = relay.ir_pass.infer_type(func)
compute_count = relay.ir_pass.get_total_mac_number(func)
expect_count = batch_size * input_channel * oh * ow * output_channel * kh * kw
assert compute_count == expect_count
开发者ID:bddppq,项目名称:tvm,代码行数:27,代码来源:test_pass_mac_count.py
示例6: test_tuple
def test_tuple():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
z = relay.var("z", t)
tup = relay.Var("tup")
func = relay.Function([x, y, z], relay.Let(tup, relay.Tuple([x, y, z]),
relay.TupleGetItem(tup, 0) +
relay.TupleGetItem(tup, 1) -
relay.TupleGetItem(tup, 2)))
back_func = relay.ir_pass.infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t, t, t], relay.TupleType([t, relay.TupleType([t, t, t])]))
x_nd = rand(dtype, *shape)
y_nd = rand(dtype, *shape)
z_nd = rand(dtype, *shape)
x_np = x_nd.asnumpy()
y_np = y_nd.asnumpy()
z_np = z_nd.asnumpy()
expected_forward = x_np + y_np - z_np
ex = create_executor()
forward, (grad_x, grad_y, grad_z) = ex.evaluate(back_func)(x_nd, y_nd, z_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(), np.ones_like(grad_x.asnumpy()))
tvm.testing.assert_allclose(grad_y.asnumpy(), np.ones_like(grad_y.asnumpy()))
tvm.testing.assert_allclose(grad_z.asnumpy(), -1 * np.ones_like(grad_z.asnumpy()))
开发者ID:bddppq,项目名称:tvm,代码行数:27,代码来源:test_pass_gradient.py
示例7: test_recursion
def test_recursion():
"""
Program:
def @f(%n: int32, %data: float32) -> float32 {
if (%n == 0) {
%data
} else {
@f(%n - 1, log(%data))
}
}
"""
sb = relay.ScopeBuilder()
f = relay.GlobalVar("f")
ti32 = relay.scalar_type("int32")
tf32 = relay.scalar_type("float32")
n = relay.var("n", ti32)
data = relay.var("data", tf32)
with sb.if_scope(relay.equal(n, relay.const(0, ti32))):
sb.ret(data)
with sb.else_scope():
sb.ret(f(relay.subtract(n, relay.const(1, ti32)), relay.log(data)))
mod = relay.Module()
mod[f] = relay.Function([n, data], sb.get())
assert "@f(%1, %2) /* ty=float32 */" in mod.astext()
assert mod[f].checked_type == relay.FuncType([ti32, tf32], tf32)
开发者ID:bddppq,项目名称:tvm,代码行数:26,代码来源:test_type_infer.py
示例8: _test_upsampling
def _test_upsampling(layout, method):
n, c, h, w = tvm.var("n"), 16, 32, 32
scale = 2
dtype = "float32"
def get_shape():
if layout == "NCHW":
return (c, h, w), (c, h*scale, w*scale)
else:
return (h, w, c), (h*scale, w*scale, c)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method)
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
if method == "NEAREST_NEIGHBOR":
ref = topi.testing.upsampling_python(data, scale, layout)
else:
ref = topi.testing.bilinear_resize_python(data, (h*scale, w*scale), layout)
for target, ctx in ctx_list():
executor = relay.create_executor("graph", ctx=ctx, target=target)
out = executor.evaluate(func)(data)
tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)
开发者ID:bddppq,项目名称:tvm,代码行数:27,代码来源:test_op_level2.py
示例9: run_test_conv2d
def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1),
fref=None,
groups=1,
dilation=(1, 1),
except_targets=None,
**attrs):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv2d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
**attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = topi.testing.conv2d_nchw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
groups=groups)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
开发者ID:bddppq,项目名称:tvm,代码行数:35,代码来源:test_op_level2.py
示例10: test_lrn
def test_lrn():
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", shape=(n, c , h, w))
y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75)
"alpha=" in y.astext()
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((n, c , h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
size=5
axis=1
bias=0.5
alpha=.00001
beta=0.75
z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
yy = relay.ir_pass.infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
开发者ID:bddppq,项目名称:tvm,代码行数:30,代码来源:test_op_level2.py
示例11: test_l2_normalize
def test_l2_normalize():
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", shape=(n, c , h, w))
y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])
"axis=" in y.astext()
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((n, c , h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
eps=0.001
axis=1
z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])
yy = relay.ir_pass.infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
开发者ID:bddppq,项目名称:tvm,代码行数:27,代码来源:test_op_level2.py
示例12: test_flatten_infer_type
def test_flatten_infer_type():
d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4")
x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32"))
y = relay.nn.batch_flatten(x)
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), "float32")
x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((3, 24), "float32")
x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), "float32")
shape = (1, 5, 10, 10)
o_shape = (1, 500)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.batch_flatten(x)
yy = relay.ir_pass.infer_type(z)
assert yy.checked_type == relay.TensorType(o_shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = x_data.flatten().reshape(o_shape)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
开发者ID:bddppq,项目名称:tvm,代码行数:35,代码来源:test_op_level2.py
示例13: test_conv2d_transpose_infer_type
def test_conv2d_transpose_infer_type():
# symbolic in batch dimension
n, c, h, w = tvm.var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.conv2d_transpose(x, w,
kernel_size=(3, 3),
padding=(1, 1),
channels=15)
assert "channels=15" in y.astext()
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 15, 10, 12), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(10, 15, 3, 3), "float32")
# infer by shape of w, mixed precision
n, c, h, w = tvm.var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32"))
y = relay.nn.conv2d_transpose(x, w,
output_padding=(1, 1),
channels=11,
data_layout="NHWC")
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 15, 15, 11), "float32")
开发者ID:bddppq,项目名称:tvm,代码行数:27,代码来源:test_op_level2.py
示例14: test_binary_int_broadcast
def test_binary_int_broadcast():
for op, ref in [(relay.right_shift, np.right_shift),
(relay.left_shift, np.left_shift),
(relay.mod, np.mod),
(relay.maximum, np.maximum),
(relay.minimum, np.minimum)]:
x = relay.var("x", relay.TensorType((10, 4), "int32"))
y = relay.var("y", relay.TensorType((5, 10, 1), "int32"))
z = op(x, y)
zz = relay.ir_pass.infer_type(z)
assert zz.checked_type == relay.TensorType((5, 10, 4), "int32")
if ref is not None:
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape, 'int32')
t2 = relay.TensorType(y_shape, 'int32')
x_data = np.random.rand(*x_shape).astype(t1.dtype)
y_data = np.random.rand(*y_shape).astype(t2.dtype)
func = relay.Function([x, y], z)
ref_res = ref(x_data, y_data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
开发者ID:bddppq,项目名称:tvm,代码行数:26,代码来源:test_op_level4.py
示例15: test_infer_type_leaky_relu
def test_infer_type_leaky_relu():
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.leaky_relu(x, alpha=0.1)
"alpha=0.1" in y.astext()
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.leaky_relu(x, alpha=0.1)
assert "alpha=0.1" in z.astext()
yy = relay.ir_pass.infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = np.where(x_data > 0, x_data, x_data * 0.1)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
开发者ID:LANHUIYING,项目名称:tvm,代码行数:26,代码来源:test_op_level3.py
示例16: test_threshold
def test_threshold():
num_anchors = 5
num_classes = 5
n = tvm.var("n")
cls_prob = relay.var(
"cls_prob",
relay.ty.TensorType((n, num_anchors, num_classes), "float32"))
loc_pred = relay.var(
"loc_pred", relay.ty.TensorType((n, num_anchors * 4), "float32"))
anchors = relay.var(
"anchors", relay.ty.TensorType((1, num_anchors, 4), "float32"))
threshold = 0.02
variances = (0.2, 0.2, 0.3, 0.3)
ret = relay.vision.multibox_transform_loc(
cls_prob=cls_prob,
loc_pred=loc_pred,
anchor=anchors,
threshold=threshold,
variances=variances)
ret = relay.ir_pass.infer_type(ret.astuple())
ref_type = relay.ty.TupleType(
tvm.convert([
relay.ty.TensorType((n, num_anchors, 6), "float32"),
relay.ty.TensorType((n, ), "int")
]))
assert ret.checked_type == ref_type
开发者ID:bddppq,项目名称:tvm,代码行数:27,代码来源:test_op_level5.py
示例17: verify_infer_type_prelu
def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"):
x = relay.var("data", relay.TensorType(data, dtype))
if alpha:
y = relay.var("alpha", relay.TensorType(alpha, dtype))
else:
y = relay.var("alpha", relay.IncompleteType())
z = relay.nn.prelu(x, y, axis=axis)
zz = relay.ir_pass.infer_type(z)
if axis != 1:
assert "axis" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if not alpha:
axis = axis if axis else 1
alpha_shape = (data[axis],)
assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32")
if all(isinstance(v, tvm.expr.Var) == 1 for v in data) or not alpha:
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)
a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)
if axis == 1:
ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data>=0) * x_data
else:
ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data>=0) * x_data
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
开发者ID:LANHUIYING,项目名称:tvm,代码行数:35,代码来源:test_op_level3.py
示例18: verify_roi_pool
def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale):
data = relay.var("data", relay.ty.TensorType(data_shape, "float32"))
rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32"))
z = relay.vision.roi_pool(data, rois, pooled_size=(pooled_size, pooled_size),
spatial_scale=spatial_scale, layout="NCHW")
zz = relay.ir_pass.infer_type(z)
batch, channel, in_size, _ = data_shape
num_roi = rois_shape[0]
assert zz.checked_type == relay.ty.TensorType(
(num_roi, channel, pooled_size, pooled_size), "float32")
func = relay.Function([data, rois], z)
func = relay.ir_pass.infer_type(func)
np_data = np.random.uniform(size=data_shape).astype("float32")
np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size
np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32')
ref_res = topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size,
spatial_scale=spatial_scale)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(np_data, np_rois)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res2 = intrp2.evaluate(func)(np_data, np_rois)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4)
开发者ID:bddppq,项目名称:tvm,代码行数:26,代码来源:test_op_level5.py
示例19: test_broadcast_add
def test_broadcast_add():
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = 'float32'
x_nd = rand(dtype, *shape1)
y_nd = rand(dtype, *shape2)
x_np = x_nd.asnumpy()
y_np = y_nd.asnumpy()
expected_forward = x_np + y_np
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
func = relay.Function([x, y], x + y)
full_func = relay.ir_pass.infer_type(gradient(func))
assert full_func.checked_type == relay.FuncType([t1, t2],
relay.TupleType([relay.TensorType(expected_forward.shape, dtype),
relay.TupleType([t1, t2])]))
ex = create_executor()
forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(),
np.ones_like(expected_forward).sum(axis=2, keepdims=True))
tvm.testing.assert_allclose(grad_y.asnumpy(),
np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))
开发者ID:bddppq,项目名称:tvm,代码行数:25,代码来源:test_pass_gradient.py
示例20: test_run
def test_run(batch, in_channel, size, out_channel, deformable_groups, groups):
kernel_size = (3, 3)
data_shape = (batch, in_channel, size, size)
offset_shape = (batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, size, size)
kernel_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1])
dtype = 'float32'
data = relay.var("data", shape=data_shape, dtype=dtype)
offset = relay.var("offset")
kernel = relay.var("kernel")
y = relay.nn.deformable_conv2d(data, offset, kernel,
strides=(1, 1),
padding=(1, 1),
dilation=(1, 1),
kernel_size=kernel_size,
deformable_groups=deformable_groups,
groups=groups,
channels=out_channel)
func = relay.Function([data, offset, kernel], y)
data = np.random.uniform(size=data_shape).astype(dtype)
offset = np.random.uniform(size=offset_shape).astype(dtype)
kernel = np.random.uniform(size=kernel_shape).astype(dtype)
ref_res = topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp1 = relay.create_executor(kind, ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, offset, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
开发者ID:bddppq,项目名称:tvm,代码行数:28,代码来源:test_op_level5.py
注:本文中的tvm.relay.var函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论