• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python nnabla.context_scope函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nnabla.context_scope函数的典型用法代码示例。如果您正苦于以下问题:Python context_scope函数的具体用法?Python context_scope怎么用?Python context_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了context_scope函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: sigma_regularization

def sigma_regularization(ctx, log_var, one):
    with nn.context_scope(ctx):
        h = F.exp(log_var)
        h = F.pow_scalar(h, 0.5)
        b = log_var.shape[0]
        r = F.sum(F.squared_error(h, one)) / b
    return r
开发者ID:kzky,项目名称:works,代码行数:7,代码来源:cnn_model_040.py


示例2: resnet_model

def resnet_model(ctx, x, inmaps=64, act=F.relu, test=False):
    # Conv -> BN -> Relu
    with nn.context_scope(ctx):
        with nn.parameter_scope("conv1"):
            h = PF.convolution(x, inmaps, kernel=(3, 3), pad=(1, 1), with_bias=False)
            h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
            h = act(h)
        
        h = res_unit(h, "conv2", act, False) # -> 32x32
        h = res_unit(h, "conv3", act, True)  # -> 16x16
        with nn.parameter_scope("bn0"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test:
            h = F.dropout(h)
        h = res_unit(h, "conv4", act, False) # -> 16x16
        h = res_unit(h, "conv5", act, True)  # -> 8x8
        with nn.parameter_scope("bn1"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test:
            h = F.dropout(h)
        h = res_unit(h, "conv6", act, False) # -> 8x8
        h = res_unit(h, "conv7", act, True)  # -> 4x4
        with nn.parameter_scope("bn2"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test:
            h = F.dropout(h)
        h = res_unit(h, "conv8", act, False) # -> 4x4
        h = F.average_pooling(h, kernel=(4, 4))  # -> 1x1
        
        pred = PF.affine(h, 10)
    return pred
开发者ID:kzky,项目名称:works,代码行数:31,代码来源:cnn_model_019.py


示例3: cnn_model_003

def cnn_model_003(ctx, x, act=F.relu, test=False):
    with nn.context_scope(ctx):
        # Convblock0
        h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
        h = F.max_pooling(h, (2, 2))  # 32 -> 16
        with nn.parameter_scope("bn0"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test:
            h = F.dropout(h)

        # Convblock 1
        h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
        h = F.max_pooling(h, (2, 2))  # 16 -> 8
        with nn.parameter_scope("bn1"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test:
            h = F.dropout(h)

        # Convblock 2
        h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test)  # 8 -> 6
        h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
        h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
        h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)

        # Convblock 3
        h = F.average_pooling(h, (6, 6))
        with nn.parameter_scope("bn2"):
            h = PF.batch_normalization(h, batch_stat=not test)
        h = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
        return h
开发者ID:kzky,项目名称:works,代码行数:34,代码来源:cnn_model_005_001.py


示例4: inplace_function_test_helper

def inplace_function_test_helper(inputs, func, func_args=[], func_kwargs={}, ctx=None, rng=None):
    if rng is None:
        rng = np.random.RandomState(313)
    if ctx is None:
        ctx = nn.Context()
    with nn.context_scope(ctx):
        a_s = [inp * 1.0 for inp in inputs]
        y = func(*(a_s + list(func_args)), inplace=False, **func_kwargs)
        l = F.sum(y)
        a_s_i = [inp * 1.0 for inp in inputs]
        y_i = func(*(a_s_i + list(func_args)), inplace=True, **func_kwargs)
        l_i = F.sum(y_i)
    data = [(rng.randn(*inp.shape), rng.randn(*inp.shape)) for inp in inputs]
    for i in range(len(data)):
        inputs[i].d = data[i][0]
        inputs[i].g = data[i][1]
    l.forward()
    l.backward()
    grads = [inp.g.copy() for inp in inputs]
    for i in range(len(data)):
        inputs[i].d = data[i][0]
        inputs[i].g = data[i][1]
    l_i.forward()
    l_i.backward()
    grads_i = [inp.g.copy() for inp in inputs]
    for g, g_i in zip(grads, grads_i):
        assert np.allclose(g, g_i)
开发者ID:zwsong,项目名称:nnabla,代码行数:27,代码来源:nbla_test_utils.py


示例5: cnn_ae_model_000

def cnn_ae_model_000(ctx, x, act=F.relu, test=False):
    with nn.parameter_scope("ae"):
        with nn.context_scope(ctx):
            # Convblock0
            h = conv_unit(x, "conv00", 32, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv01", 32, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv02", 32, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv03", 32, k=4, s=2, p=1, act=act, test=test)  # 32 -> 16
            if not test:
                h = F.dropout(h)
     
            # Convblock 1
            h = conv_unit(h, "conv10", 64, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv11", 64, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv12", 64, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv13", 64, k=4, s=2, p=1, act=act, test=test) # 16 -> 8
            if not test:
                h = F.dropout(h)
     
            # Deconvblock0
            h = deconv_unit(h, "deconv00", 64, k=4, s=2, p=1, act=act, test=test) # 8 -> 16
            h = deconv_unit(h, "deconv01", 64, k=3, s=1, p=1, act=act, test=test)
     
            h = deconv_unit(h, "deconv02", 64, k=3, s=1, p=1, act=act, test=test)
            h = deconv_unit(h, "deconv03", 64, k=3, s=1, p=1, act=act, test=test)  
     
            # Deconvblock 1
            h = deconv_unit(h, "deconv10", 32, k=4, s=2, p=1, act=act, test=test)  # 16 -> 32
            h = deconv_unit(h, "deconv11", 32, k=3, s=1, p=1, act=act, test=test)
            h = deconv_unit(h, "deconv12", 32, k=3, s=1, p=1, act=act, test=test)
            h = deconv_unit(h, "deconv13", 3, k=3, s=1, p=1, act=None, test=test)

        return h
开发者ID:kzky,项目名称:works,代码行数:33,代码来源:cnn_ae_model_000.py


示例6: ce_loss_with_uncertainty

def ce_loss_with_uncertainty(ctx, pred, y_l, log_var):
    r = F.randn(0., 1., log_var.shape)
    r = F.pow_scalar(F.exp(log_var), 0.5) * r
    h = pred + r
    with nn.context_scope(ctx):
        loss_ce = F.mean(F.softmax_cross_entropy(h, y_l))
    return loss_ce
开发者ID:kzky,项目名称:works,代码行数:7,代码来源:cnn_model_060.py


示例7: cifar10_resnet23_prediction

def cifar10_resnet23_prediction(ctx, image, test=False):
    """
    Construct ResNet 23
    """
    # Residual Unit
    def res_unit(x, scope_name, dn=False, test=False):
        C = x.shape[1]
        with nn.parameter_scope(scope_name):

            # Conv -> BN -> Relu
            with nn.parameter_scope("conv1"):
                h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN -> Relu
            with nn.parameter_scope("conv2"):
                h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Relu
            h = F.relu(h + x)

            # Maxpooling
            if dn:
                h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
            return h

    # Random generator for using the same init parameters in all devices
    nmaps = 64
    ncls = 10

    # Conv -> BN -> Relu
    with nn.context_scope(ctx):
        with nn.parameter_scope("conv1"):
            h = PF.convolution(image, nmaps, kernel=(3, 3), pad=(1, 1),
                               with_bias=False)
            h = PF.batch_normalization(h, batch_stat=not test)
            h = F.relu(h)

        h = res_unit(h, "conv2", False)    # -> 32x32
        h = res_unit(h, "conv3", True)     # -> 16x16
        h = bn_dropout(h, "bn_dropout1", test)
        h = res_unit(h, "conv4", False)    # -> 16x16
        h = res_unit(h, "conv5", True)     # -> 8x8
        h = bn_dropout(h, "bn_dropout2", test)
        h = res_unit(h, "conv6", False)    # -> 8x8
        h = res_unit(h, "conv7", True)     # -> 4x4
        h = bn_dropout(h, "bn_dropout3",  test)
        h = res_unit(h, "conv8", False)    # -> 4x4
        h = F.average_pooling(h, kernel=(4, 4))  # -> 1x1
        pred = PF.affine(h, ncls)

    return pred
开发者ID:kzky,项目名称:works,代码行数:60,代码来源:cnn_model_057.py


示例8: sigma_regularization

def sigma_regularization(ctx, log_var, one):
    with nn.context_scope(ctx):
        h = F.exp(log_var)
        h = F.pow_scalar(h, 0.5)
        h = F.mean(h, axis=1)
        r = F.mean(F.squared_error(h, one))
    return r
开发者ID:kzky,项目名称:works,代码行数:7,代码来源:cnn_model_042.py


示例9: kl_divergence

def kl_divergence(ctx, pred, label, log_var):
    with nn.context_scope(ctx):
        s = F.pow_scalar(F.exp(log_var), 0.5)
        elms = softmax_with_temperature(ctx, label, s) \
               * F.log(F.softmax(pred, axis=1))
        loss = -F.mean(F.sum(elms, axis=1))
    return loss
开发者ID:kzky,项目名称:works,代码行数:7,代码来源:cnn_model_063.py


示例10: sigmas_regularization

def sigmas_regularization(ctx, log_var0, log_var1):
    with nn.context_scope(ctx):
        h0 = F.exp(log_var0)
        h0 = F.pow_scalar(h0, 0.5)
        h1 = F.exp(log_var1)
        h1 = F.pow_scalar(h1, 0.5)
        r = F.mean(F.squared_error(h0, h1))
    return r
开发者ID:kzky,项目名称:works,代码行数:8,代码来源:cnn_model_060.py


示例11: sr_loss_with_uncertainty

def sr_loss_with_uncertainty(ctx, pred0, pred1, log_var0, log_var1):
    #TODO: squared error/absolute error
    s0 = F.exp(log_var0)
    s1 = F.exp(log_var1)
    squared_error = F.squared_error(pred0, pred1)
    with nn.context_scope(ctx):
        loss_sr = F.mean(squared_error * (1 / s0 + 1 / s1) + (s0 / s1 + s1 / s0)) * 0.5
    return loss_sr
开发者ID:kzky,项目名称:works,代码行数:8,代码来源:cnn_model_060.py


示例12: test_randint_forward

def test_randint_forward(seed, ctx, func_name, low, high, shape):
    with nn.context_scope(ctx):
        o = F.randint(low, high, shape, seed=seed)
    assert o.shape == tuple(shape)
    assert o.parent.name == func_name
    o.forward()
    assert np.all(o.d < high)
    assert np.all(o.d >= low)
开发者ID:zwsong,项目名称:nnabla,代码行数:8,代码来源:test_random_functions.py


示例13: er_loss

def er_loss(ctx, pred):
    with nn.context_scope(ctx):
        bs = pred.shape[0]
        d = np.prod(pred.shape[1:])
        denominator = bs * d
        pred_normalized = F.softmax(pred)
        pred_log_normalized = F.log(F.softmax(pred))
        loss_er = - F.sum(pred_normalized * pred_log_normalized) / denominator
    return loss_er
开发者ID:kzky,项目名称:works,代码行数:9,代码来源:cnn_model_060.py


示例14: sr_loss_with_uncertainty

def sr_loss_with_uncertainty(ctx, pred0, pred1, log_var0, log_var1):
    var0 = F.exp(log_var0)
    var1 = F.exp(log_var1)
    s0 = F.pow_scalar(var0, 0.5)
    s1 = F.pow_scalar(var0, 0.5)
    squared_error = F.squared_error(pred0, pred1)
    with nn.context_scope(ctx):
        loss = F.log(s1/s0) + (var0/var1 + squared_error/var1) * 0.5
        loss_sr = F.mean(loss)
    return loss_sr
开发者ID:kzky,项目名称:works,代码行数:10,代码来源:cnn_model_079.py


示例15: test_image_augmentation_forward

def test_image_augmentation_forward(seed, ctx, func_name):
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(16, 3, 8, 8).astype(np.float32)]
    i = nn.Variable(inputs[0].shape)
    # NNabla forward
    with nn.context_scope(ctx), nn.auto_forward():
        o = F.image_augmentation(i)
    assert o.d.shape == inputs[0].shape

    shape = (3, 5, 8)
    with nn.context_scope(ctx), nn.auto_forward():
        o = F.image_augmentation(i, shape=shape, pad=(2, 2),
                                 min_scale=0.8, max_scale=1.2, angle=0.2,
                                 aspect_ratio=1.1, distortion=0.1,
                                 flip_lr=True, flip_ud=False,
                                 brightness=0.1, brightness_each=True,
                                 contrast=1.1, contrast_center=0.5, contrast_each=True,
                                 noise=0.1, seed=0)
    assert o.d.shape == (inputs[0].shape[0],) + shape
开发者ID:zwsong,项目名称:nnabla,代码行数:19,代码来源:test_image_augmentation.py


示例16: sr_loss_with_uncertainty

def sr_loss_with_uncertainty(ctx, pred0, pred1, log_v0, log_v1, 
                             log_s0, log_s1):
    v0 = F.exp(log_v0)
    v1 = F.exp(log_v1)
    squared_error = F.squared_error(pred0, pred1)
    s0 = F.exp(log_s0)
    s1 = F.exp(log_s1)
    with nn.context_scope(ctx):
        error = squared_error * (1 / v0 + 1 / v1) + (v0 / v1 + v1 / v0) + (s0 / s1 + s1 / s0)
        loss_sr = F.mean(error) * 0.5
    return loss_sr
开发者ID:kzky,项目名称:works,代码行数:11,代码来源:cnn_model_050.py


示例17: test_one_hot_forward

def test_one_hot_forward(seed, inshape, shape, ctx, func_name):
    rng = np.random.RandomState(seed)
    # Input
    input = rng.randint(0, shape[0], size=inshape)
    vinput = nn.Variable(input.shape, need_grad=False)
    vinput.d = input
    with nn.context_scope(ctx), nn.auto_forward():
        o = F.one_hot(vinput, shape)
    r = ref_one_hot(input, shape)
    assert np.allclose(o.d, r)
    assert func_name == o.parent.name
开发者ID:zwsong,项目名称:nnabla,代码行数:11,代码来源:test_one_hot.py


示例18: test_batch_normalization_forward_backward

def test_batch_normalization_forward_backward(seed, axis, decay_rate, eps,
                                              output_stat, ctx, func_name):
    from nbla_test_utils import function_tester
    rng = np.random.RandomState(seed)
    inputs = list(create_inputs(rng, axis))
    axes = [axis]
    batch_stat = True
    function_tester(rng, F.batch_normalization, ref_batch_normalization,
                    inputs,
                    func_args=[axes, decay_rate, eps, batch_stat, output_stat],
                    backward=[True, True, True, False, False],
                    ctx=ctx, func_name=func_name, dstep=1e-2, atol_b=1e-2)

    # Check if running mean and var works.
    vinputs = []
    for i in inputs:
        vinputs.append(nn.Variable(i.shape, True))
        vinputs[-1].d = i
    for i in range(5):
        inputs[0] = rng.randn(*inputs[0].shape)
        vinputs[0].d[...] = inputs[0]
        ref_y = ref_batch_normalization(
            *(inputs + [axes, decay_rate, eps, batch_stat, output_stat]))
        with nn.context_scope(ctx), nn.auto_forward():
            y = F.batch_normalization(
                *(vinputs + [axes, decay_rate, eps, batch_stat, output_stat]))
        assert np.allclose(vinputs[3].d, inputs[3])
        assert np.allclose(vinputs[4].d, inputs[4], atol=1e-3)

    # Check if global stat mode works
    batch_stat = False
    if output_stat:
        return
    ref_y = ref_batch_normalization(
        *(inputs + [axes, decay_rate, eps, batch_stat, output_stat]))
    with nn.context_scope(ctx), nn.auto_forward():
        y = F.batch_normalization(
            *(vinputs + [axes, decay_rate, eps, batch_stat, output_stat]))
    assert np.allclose(ref_y, y.d, atol=1e-6)
开发者ID:zwsong,项目名称:nnabla,代码行数:39,代码来源:test_batch_normalization.py


示例19: test_random_crop_forward_backward

def test_random_crop_forward_backward(seed, inshape, shape, ctx, func_name):
    from nbla_test_utils import function_tester
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    i = nn.Variable(inputs[0].shape, need_grad=True)
    i.d = inputs[0]
    # NNabla forward
    with nn.context_scope(ctx), nn.auto_forward():
        o = F.random_crop(i, shape, 0, seed)
    if shape is not None:
        max_correl = 0
        possible_crop_range = [
            input - output for output, input in zip(shape, inshape)]
        for crop_pos in itertools.product(*map(tuple, map(lambda x: range(*x), [(0, r + 1) for r in possible_crop_range]))):
            r = inputs[0][crop_pos[0]:crop_pos[0] + shape[0], crop_pos[1]:crop_pos[1] + shape[1], crop_pos[2]:crop_pos[2] + shape[2]]
            assert(o.d.shape == r.shape)
            correl_and_p = pearsonr(o.d.flatten(), r.flatten())
            if correl_and_p[0] > max_correl:
                max_correl = correl_and_p[0]
    else:
        max_correl = pearsonr(o.d.flatten(), inputs[0].flatten())[0]

    assert(max_correl == 1.0)

    assert o.parent.name == func_name

    # Skipping Backward check
    g = np.random.randn(*i.shape)
    i.g = g
    o_grad = np.random.randn(*o.shape)
    o.g = o_grad
    o.parent.backward([i], [o])
    ref_grad = i.g.copy() - g

    # Check accum=False with NaN gradient
    i.g = np.float32('nan')
    o.parent.backward([i], [o], [False])
    assert not np.any(np.isnan(i.g))

    # Check if accum option works
    i.g[...] = 1
    o.g = o_grad
    o.parent.backward([i], [o], [False])
    assert np.allclose(i.g, ref_grad, atol=1e-6)

    # Check if need_grad works
    i.g[...] = 0
    i.need_grad = False
    o_diff = rng.randn(*o.shape).astype(i.d.dtype)
    o.backward(o_diff)
    assert np.all(i.g == 0)
开发者ID:zwsong,项目名称:nnabla,代码行数:51,代码来源:test_random_crop.py


示例20: test_random_shift_forward_backward

def test_random_shift_forward_backward(seed, inshape, shifts, border_mode, ctx, func_name):
    from nbla_test_utils import function_tester
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    i = nn.Variable(inputs[0].shape, need_grad=True)
    i.d = inputs[0]
    # NNabla forward
    with nn.context_scope(ctx), nn.auto_forward():
        o = F.random_shift(i, shifts, border_mode, 0, seed)
    result_shifts = (0, 0, 0)
    max_correl = 0
    for shift_amount in itertools.product(*map(tuple, map(lambda x: range(*x), [(-2, 3) for _ in range(len(inshape))]))):
        r = scipy_shift(inputs[0], shift_amount, mode=border_mode)
        correl_and_p = pearsonr(o.d.flatten(), r.flatten())
        if correl_and_p[0] > max_correl:
            result_shifts = shift_amount
            max_correl = correl_and_p[0]
    ref = scipy_shift(inputs[0], result_shifts, mode=border_mode)
    if shifts is None:
        shifts = (0,) * len(inputs[0].shape)
    for result, shift_range in zip(result_shifts, shifts):
        assert abs(result) <= shift_range

    assert np.allclose(o.d, ref)
    assert o.parent.name == func_name

    # Skipping Backward check
    g = np.random.randn(*i.shape)
    i.g = g
    o_grad = np.random.randn(*o.shape)
    o.g = o_grad
    o.parent.backward([i], [o])
    ref_grad = i.g.copy() - g

    # Check accum=False with NaN gradient
    i.g = np.float32('nan')
    o.parent.backward([i], [o], [False])
    assert not np.any(np.isnan(i.g))

    # Check if accum option works
    i.g[...] = 1
    o.g = o_grad
    o.parent.backward([i], [o], [False])
    assert np.allclose(i.g, ref_grad, atol=1e-6)

    # Check if need_grad works
    i.g[...] = 0
    i.need_grad = False
    o_grad = rng.randn(*i.shape).astype(i.data.dtype)
    o.backward(o_grad)
    assert np.all(i.g == 0)
开发者ID:zwsong,项目名称:nnabla,代码行数:51,代码来源:test_random_shift.py



注:本文中的nnabla.context_scope函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nnabla.get_parameters函数代码示例发布时间:2022-05-27
下一篇:
Python nn_utils.softmax函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap