• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python unittest_tools.seed_rng函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tests.unittest_tools.seed_rng函数的典型用法代码示例。如果您正苦于以下问题:Python seed_rng函数的具体用法?Python seed_rng怎么用?Python seed_rng使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了seed_rng函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: run_conv_nnet2_classif

def run_conv_nnet2_classif(use_gpu, seed, isize, ksize, bsize,
                           n_train=10,
                           check_isfinite=True,
                           verbose=0,
                           version=-1):
    """Run the train function returned by build_conv_nnet2_classif on one device.
    """

    utt.seed_rng(seed)  # Seeds numpy.random with seed
    train, params, x_shape, y_shape, mode = build_conv_nnet2_classif(
        use_gpu=use_gpu,
        isize=isize,
        ksize=ksize,
        n_batch=bsize,
        verbose=verbose,
        version=version,
        check_isfinite=check_isfinite)

    xval = my_rand(*x_shape)
    yval = my_rand(*y_shape)
    lr = theano._asarray(0.01, dtype='float32')

    rvals = my_zeros(n_train)
    for i in xrange(n_train):
        rvals[i] = train(xval, yval, lr)[0]
开发者ID:ChinaQuants,项目名称:Theano,代码行数:25,代码来源:test_mlp.py


示例2: setUp

 def setUp(self):
     if theano.config.mode == "FAST_COMPILE":
         m = theano.compile.mode.get_mode("FAST_RUN").excluding("local_elemwise_fusion")
     else:
         m = theano.compile.mode.get_default_mode().excluding("local_elemwise_fusion")
     self.m = m
     utt.seed_rng()
开发者ID:Jerryzcn,项目名称:Theano,代码行数:7,代码来源:test_sigm.py


示例3: test_full

def test_full():
    seed_rng()
    shapes = get_basic_shapes()
    shapes += get_shapes2()
    #test image stride
    shapes += get_shapes2(scales_img=(2, 2), img_stride=(1, 2))
    shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 1))
    shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 2))
    shapes += get_shapes2(scales_img=(2, 2), img_stride=(-1, -1))
    shapes += get_shapes2(scales_img=(2, 2), kern_stride=(-1, -1))

    #test subsample done in a separate fct

    shapes += [
        #other test
              ((2, 1, 2, 2), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1))
            , ((3, 2, 4, 4), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1))
            , ((4, 1, 10, 10), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1))
            , ((1, 1, 4, 4), (1, 1, 2, 3), (1, 1), (1, 1), (1, 1))
            , ((4, 1, 10, 10), (1, 1, 2, 3), (1, 1), (1, 1), (1, 1))
            , ((4, 1, 10, 10), (1, 1, 2, 10), (1, 1), (1, 1), (1, 1))
            , ((4, 1, 20, 10), (1, 1, 2, 10), (1, 1), (1, 1), (1, 1))
            , ((3, 2, 8, 8), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize
            , ((3, 2, 8, 6), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize, non-square image
            , ((3, 2, 8, 6), (4, 2, 4, 3), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize, non-square image, non-square kern
            , ((3, 2, 8, 6), (4, 2, 4, 6), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize ,non-square image, non-square kern, kernsize==imgsize on one dim
            , ((16, 5, 64, 64), (8, 5, 8, 8), (1, 1), (1, 1), (1, 1)) # a big one
            , ((16, 1, 28, 28), (20, 1, 5, 5), (1, 1), (1, 1), (1, 1)) # MNIST LeNET layer 1
            , ((20, 16, 32, 32), (1, 16, 28, 28), (1, 1), (1, 1), (1, 1)) # layer 1 backprop to weights

        #other test
            , ((3, 1, 1, 1), (2, 1, 5, 3), (1, 1), (1, 1), (1, 1))#kernel bigger then image
            , ((3, 2, 1, 1), (4, 2, 1, 1), (1, 1), (1, 1), (1, 1))
            , ((3, 2, 4, 4), (4, 2, 2, 6), (1, 1), (1, 1), (1, 1))
            , ((3, 2, 4, 4), (4, 2, 8, 6), (1, 1), (1, 1), (1, 1))#kernel bigger then image
            , ((4, 2, 10, 10), (3, 2, 2, 12), (1, 1), (1, 1), (1, 1))
            ]
    shapes += [
#        ((60,1,28,28),(20,1,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 1 layers
#            , ((60,20,12,12),(30,20,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 2 layers
             ((60,30,8,8),(20,30,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 1 full
#            , ((20,60,12,12),(30,60,8,8), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 2 valid
#            , ((1,60,28,28),(20,60,24,24), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 2 valid
#            , ((10,1,64,64),(20,1,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 1 layers
#            , ((10,20,29,29),(30,20,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 2 layers
            , ((10,30,23,23),(20,30,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 full
#            , ((20,10,29,29),(30,10,23,23), (1, 1), (1, 1), (1, 1))#test_lenet_64 bprop 1
#            , ((1,10,64,64),(20,10,58,58), (1, 1), (1, 1), (1, 1))#test_lenet_64 bprop 2
            #Test more than maxThreadsDim0
            , ((2,4,13,1050), (3,4,10, 11), (1, 1), (1, 1), (1, 1))
            , ((2,4,1050,13), (3,4,10, 11), (1, 1), (1, 1), (1, 1))
            ]

#    shapes=shapes[:277]
    version = [-2, -1, 0, 1, 2, 3, 4, 5]
    verbose = 0
#    version=[4]
    random = True

    exec_conv(version, shapes, verbose, random, 'full')
开发者ID:Donghuan,项目名称:Theano,代码行数:60,代码来源:test_conv_cuda_ndarray.py


示例4: test_invalid_input_shape

    def test_invalid_input_shape(self):
        """
        Tests that when the shape gived at build time is not the same as
        run time we raise an error
        """
        seed_rng()
        verbose = 0
        random = True
        print_ = False
        ones = False
        if ones:
            random = False

        global theano_mode
        theano_mode_orig = theano_mode
        try:
            if theano.config.mode in ['DebugMode', 'DEBUG_MODE']:
                theano_mode = theano.compile.mode.get_mode(
                    'FAST_RUN').including('gpu')
                for mode in ['valid', 'full']:
                    for shapes in [((3, 2, 8, 8), (4, 2, 5, 5), (8, 8)),
                                   ((3, 2, 8, 8), (4, 2, 5, 5), (5, 8)),
                                   #((3, 2, 8, 8), (4, 2, 5, 5), (8, 5)),
                                   # We use only the number of columns.
                                   ]:

                        self.assertRaises(ValueError, _params_allgood,
                                          shapes[0], shapes[1],
                                          verbose=verbose, random=random,
                                          mode=mode,
                                          print_=print_, ones=ones,
                                          compile_kshp=shapes[2])
        finally:
            theano_mode = theano_mode_orig
开发者ID:JoeGlobal2014,项目名称:Theano,代码行数:34,代码来源:test_conv_cuda_ndarray.py


示例5: test_valid_4

def test_valid_4():
    seed_rng()
    shapes = get_valid_shapes()
    version = [4]
    verbose = 0

    random = True
    print_ = False
    ones = False
    if ones:
        random = False
    shapes2 = []

    for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
        oshape = (
            [ishape[0]]
            + [kshape[0]]
            + list(numpy.asarray(ishape[2:]) - numpy.asarray(kshape[2:]) + numpy.asarray([1, 1]))
        )
        if oshape[3] > device_prop["maxThreadsDim0"]:
            continue
        if ishape[1] > 1:
            continue
        if (kshape[2] * ishape[3] * 4 + numpy.prod(kshape[2:]) * 4) > (16 * 1024 - 150):
            continue
        if subshape == (1, 1):
            shapes2.append((ishape, kshape, subshape, istride, kstride))
    shapes = shapes2

    exec_conv(version, shapes, verbose, random, "valid", print_=print_, ones=ones, rtol=1.1e-5)
开发者ID:gyenney,项目名称:Tools,代码行数:30,代码来源:test_conv_cuda_ndarray.py


示例6: _test_subsample

def _test_subsample(cls, mode, version_valid=[-1], version_full=[-1]):
    seed_rng()
    shapes = [((1, 1, 1, 1), (1, 1, 1, 1), (1, 1), (1, 1), (1, 1)),
              ((1, 1, 1, 1), (1, 1, 1, 1), (2, 2), (1, 1), (1, 1)),
              ((4, 2, 10, 10), (3, 2, 2, 2), (1, 3), (1, 1), (1, 1)),
              ((4, 2, 10, 10), (3, 2, 2, 2), (3, 3), (1, 1), (1, 1)),
              ((4, 2, 10, 10), (3, 2, 2, 2), (3, 1), (1, 1), (1, 1))
          ]
    shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 1))
    shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 2))
    shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 1))
    shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 2))

    # We put only the version that implement the subsample to make the
    # test faster.
    verbose = 0
    random = True
    print_ = False
    ones = False
    if ones:
        random = False

    for t in exec_conv(version_valid, shapes, verbose, random, 'valid',
                       print_=print_, ones=ones,
                       theano_mode=mode, cls=cls):
        yield t
    for t in exec_conv(version_full, shapes, verbose, random, 'full',
                       print_=print_, ones=ones,
                       theano_mode=mode, cls=cls):
        yield t
开发者ID:alouisos,项目名称:Theano,代码行数:30,代码来源:test_conv_cuda_ndarray.py


示例7: test_doubleop

def test_doubleop():
    utt.seed_rng()
    x = matrix()
    f = function([x], DoubleOp()(x))
    inp = numpy.asarray(numpy.random.rand(5, 4), dtype=config.floatX)
    out = f(inp)
    utt.assert_allclose(inp * 2, out)
开发者ID:wangxiong2015,项目名称:ccw_tutorial_theano,代码行数:7,代码来源:test_doubleop.py


示例8: test_subsample

def test_subsample():
    seed_rng()
    # implement when
    shapes = [((1, 1, 1, 1), (1, 1, 1, 1), (1, 1), (1, 1), (1, 1)),
              ((1, 1, 1, 1), (1, 1, 1, 1), (2, 2), (1, 1), (1, 1)),
              ((4, 2, 10, 10), (3, 2, 2, 2), (1, 3), (1, 1), (1, 1)),
              ((4, 2, 10, 10), (3, 2, 2, 2), (3, 3), (1, 1), (1, 1)),
              ((4, 2, 10, 10), (3, 2, 2, 2), (3, 1), (1, 1), (1, 1))
          ]
    shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 1))
    shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 2))
    shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 1))
    shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 2))

#We put only the version that implement the subsample to make the test faster.
    version_valid = [-2, -1, 1, 3, 11, 12]
    version_full = [-2, -1]
    verbose = 0
    random = True
    print_ = False
    ones = False
    if ones:
        random = False

    exec_conv(version_valid, shapes, verbose, random, 'valid',
              print_=print_, ones=ones)
    exec_conv(version_full, shapes, verbose, random, 'full',
              print_=print_, ones=ones)
开发者ID:Dimitris0mg,项目名称:Theano,代码行数:28,代码来源:test_conv_cuda_ndarray.py


示例9: test_batch_normalization_train_without_running_averages

def test_batch_normalization_train_without_running_averages():
    # compile and run batch_normalization_train without running averages
    utt.seed_rng()

    x, scale, bias, dy = T.tensor4('x'), T.tensor4('scale'), T.tensor4('bias'), T.tensor4('dy')
    data_shape = (5, 10, 30, 25)
    param_shape = (1, 10, 30, 25)

    # forward pass
    out, x_mean, x_invstd = bn.batch_normalization_train(x, scale, bias, 'per-activation')
    # backward pass
    grads = T.grad(None, wrt=[x, scale, bias], known_grads={out: dy})
    # compile
    f = theano.function([x, scale, bias, dy], [out, x_mean, x_invstd] + grads)
    # check if the abstract Ops have been replaced
    assert not any([isinstance(n.op, (bn.AbstractBatchNormTrain,
                                      bn.AbstractBatchNormInference,
                                      bn.AbstractBatchNormTrainGrad))
                    for n in f.maker.fgraph.toposort()])
    # run
    X = 4 + 3 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
    Dy = -1 + 2 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
    Scale = numpy.random.randn(*param_shape).astype(theano.config.floatX)
    Bias = numpy.random.randn(*param_shape).astype(theano.config.floatX)
    f(X, Scale, Bias, Dy)
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:25,代码来源:test_bn.py


示例10: test_valid_7_8_13

def test_valid_7_8_13():
    seed_rng()
    shapes = get_valid_shapes()
    # This is to test the "new" lower shared memory usage.
    shapes.append(((10, 30, 60, 60), (20, 30, 40, 40),
                   (1, 1), (1, 1), (1, 1)))
    version = [7, 8, 13]
    verbose = 0

    random = True
    print_ = False
    ones = False
    if ones:
        random = False
    shapes2 = []

#    print len(shapes)
    for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
        oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
                                                  numpy.asarray(kshape[2:]) +
                                                  numpy.asarray([1, 1]))
        if oshape[2] * oshape[3] > device_prop['maxThreadsDim0']:
            continue
        if max(numpy.prod(ishape[2:]) * 4 + 2 * kshape[3] * 4,
               oshape[2] * oshape[3] * 4 * 2) > (16 * 1024 - 150):
            continue
        if subshape == (1, 1):
            shapes2.append((ishape, kshape, subshape, istride, kstride))
    shapes = shapes2
#    print len(shapes2)

    exec_conv(version, shapes, verbose, random, 'valid',
              print_=print_, ones=ones, rtol=1.1e-5)
开发者ID:JoeGlobal2014,项目名称:Theano,代码行数:33,代码来源:test_conv_cuda_ndarray.py


示例11: test_subsample

def test_subsample():
    seed_rng()
    # implement when
    shapes = [((1, 1, 1, 1), (1, 1, 1, 1), (1, 1), (1, 1), (1, 1)),
              ((1, 1, 1, 1), (1, 1, 1, 1), (2, 2), (1, 1), (1, 1)),
              ((4, 2, 10, 10), (3, 2, 2, 2), (1, 3), (1, 1), (1, 1)),
              ((4, 2, 10, 10), (3, 2, 2, 2), (3, 3), (1, 1), (1, 1)),
              ((4, 2, 10, 10), (3, 2, 2, 2), (3, 1), (1, 1), (1, 1))
          ]
    shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 1))
    shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 2))
    shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 1))
    shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 2))

    version_valid = [-1]
    version_full = [-1]
    verbose = 0
    random = True
    print_ = False
    ones = False
    if ones:
        random = False

    exec_conv(version_valid, shapes, verbose, random, 'valid',
              print_=print_, ones=ones)
    exec_conv(version_full, shapes, verbose, random, 'full',
              print_=print_, ones=ones)
开发者ID:alimuldal,项目名称:Theano,代码行数:27,代码来源:test_conv_cuda_ndarray.py


示例12: test_valid_9_10

def test_valid_9_10():
    seed_rng()
    shapes = get_valid_shapes()
    version = [9, 10]
    verbose = 0

    random = True
    print_ = False
    ones = False
    if ones:
        random = False
    shapes2 = []

#    print len(shapes)
    for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
        oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
                                                  numpy.asarray(kshape[2:]) +
                                                  numpy.asarray([1, 1]))
        if oshape[3] > device_prop['maxThreadsDim0']:
            continue
        if (kshape[3] * 4 + ishape[3]) > (16 * 1024 - 150):
            continue
        if subshape == (1, 1):
            shapes2.append((ishape, kshape, subshape, istride, kstride))
    shapes = shapes2
#    print len(shapes2)

    exec_conv(version, shapes, verbose, random, 'valid',
              print_=print_, ones=ones, rtol=1.1e-5)
开发者ID:JoeGlobal2014,项目名称:Theano,代码行数:29,代码来源:test_conv_cuda_ndarray.py


示例13: test_valid

def test_valid(conv_gemm=False):
    seed_rng()
    shapes = get_valid_shapes()

    #shapes=shapes[400:426]
    # I put -1 in case we forget to add version in the test to.
    # I put -2 to test the reference version.
    version = [-2, -1, 6]
    verbose = 0

    random = True
    print_ = False
    ones = False
    if ones:
        random = False

    if conv_gemm:
        # Test the GpuCorrMM version
        mode = theano_mode.including("conv_gemm")
        cls = cuda.blas.BaseGpuCorrMM
        # dummy version; not used by GpuCorrMM so one version is enough
        version = [-1]
        # Add tests with strided inputs by still square images and filters.
        shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 2))
        shapes += get_shapes2(scales_kern=(2, 2), kern_stride=(2, 2))
    else:
        mode = theano_mode
        cls = None
    exec_conv(version, shapes, verbose, random, 'valid',
              print_=print_, ones=ones, rtol=1.1e-5,
              theano_mode=mode, cls=cls)
开发者ID:Jakobularius,项目名称:Theano,代码行数:31,代码来源:test_conv_cuda_ndarray.py


示例14: test_logical_shapes

    def test_logical_shapes(self):
        seed_rng()
        for stride in range(1, 4):
            kshp = (10, 2, 10, 10)
            featshp = (3, 10, 11, 11)

            a = tensor.ftensor4()
            A = tensor.ftensor4()

            # Need to transpose first two dimensions of kernel, and reverse
            # index kernel image dims (for correlation)
            kernel_rotated = tensor.transpose(A, axes=[1, 0, 2, 3])

            featshp_logical = (featshp[0], featshp[1], featshp[2] * stride,
                               featshp[3] * stride)
            kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
            #print featshp, kshp_rotated, featshp_logical[1:], kshp[2:]
            image_estimate = tensor.nnet.conv2d(a, kernel_rotated,
                                                border_mode='full',
                                                image_shape=featshp,
                                                filter_shape=kshp_rotated,
                                                imshp_logical=featshp_logical[1:],
                                                kshp_logical=kshp[2:])

            func = theano.function([a, A], image_estimate, mode=theano_mode)
            #theano.printing.debugprint(func,)
            assert any([isinstance(node.op, theano.sandbox.cuda.blas.GpuConv)
                        for node in func.maker.fgraph.toposort()])

            a_in = numpy.random.randn(*featshp).astype("float32")
            A_in = numpy.random.randn(*kshp).astype("float32")

            func(a_in, A_in)
开发者ID:JoeGlobal2014,项目名称:Theano,代码行数:33,代码来源:test_conv_cuda_ndarray.py


示例15: setUp

 def setUp(self):
     utt.seed_rng()
     self.mode = mode_with_gpu.excluding('constant_folding')
     self.gemv_op = gpu_sparse_block_gemv
     self.outer_op = gpu_sparse_block_outer
     self.gemv_class = GpuSparseBlockGemv
     self.outer_class = GpuSparseBlockOuter
开发者ID:Abioy,项目名称:Theano,代码行数:7,代码来源:test_blocksparse.py


示例16: test_valid_1_3_11_12

def test_valid_1_3_11_12():
    seed_rng()
    shapes = get_valid_shapes()
    version = [1, 3, 11, 12]
    verbose = 0

    random = True
    print_ = False
    ones = False
    if ones:
        random = False
    shapes2 = []

    for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
        oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
                                                  numpy.asarray(kshape[2:]) +
                                                  numpy.asarray([1, 1]))
        if oshape[3] > device_prop['maxThreadsDim0']:
            continue
        if ((numpy.prod(ishape[2:]) + numpy.prod(kshape[2:])) * 4 >
            (16 * 1024 - 150)):
            continue
        if subshape == (1, 1):
            shapes2.append((ishape, kshape, subshape, istride, kstride))
    shapes = shapes2

    for t in exec_conv(version, shapes, verbose, random, 'valid',
                       print_=print_, ones=ones, rtol=1.1e-5):
        yield t
开发者ID:alouisos,项目名称:Theano,代码行数:29,代码来源:test_conv_cuda_ndarray.py


示例17: test_valid

def test_valid():
    seed_rng()
    shapes = get_valid_shapes()

    #shapes=shapes[400:426]
    # I put -1 in case we forget to add version in the test to.
    # I put -2 to test the reference version.
    version = [-2, -1, 6]
    verbose = 0
#    version=[1]

    random = True
    print_ = False
    ones = False
    if ones:
        random = False

#    exec_conv(version, shapes, verbose, random, 'valid',
#              print_=print_, ones=ones, rtol=1.1e-5)

    mode = theano_mode.including("conv_gemm")

    version = [-1]
    # Remove case not supported
    # Add tests with strided inputs by still square images and filters.
    shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 2))
    shapes += get_shapes2(scales_kern=(2, 2), kern_stride=(2, 2))
    # Keep only tests with square images and filters even with inputs strides
    shapes = [shp for shp in shapes if (
        shp[0][2]/shp[3][0] == shp[0][3]/shp[3][1] and
        shp[1][2]/shp[4][0] == shp[1][3]/shp[4][1])]
    exec_conv(version, shapes, verbose, random, 'valid',
              print_=print_, ones=ones, rtol=1.1e-5,
              theano_mode=mode, cls=cuda.blas.GpuCorrMM)
开发者ID:John-Annual,项目名称:Theano,代码行数:34,代码来源:test_conv_cuda_ndarray.py


示例18: test_GpuCrossentropySoftmaxArgmax1HotWithBias

def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
    # This is basic test for GpuCrossentropySoftmaxArgmax1HotWithBias
    # We check that we loop when their is too much threads

    n_in = 1000
    batch_size = 4097
    n_out = 1250

    if not isinstance(mode_with_gpu, theano.compile.DebugMode):
        n_in = 4098
        n_out = 4099

    y = T.lvector('y')

    b = T.fvector('b')

    # we precompute the dot with big shape before to allow the test of
    # GpuCrossentropySoftmax1HotWithBiasDx to don't fail with the error
    # (the launch timed out and was terminated) on GPU card not
    # powerful enough. We need the big shape to check for corner
    # case.
    dot_result = T.fmatrix('dot_result')

    # Seed numpy.random with config.unittests.rseed
    utt.seed_rng()

    xx = np.asarray(np.random.rand(batch_size, n_in),
                    dtype=np.float32)
    yy = np.ones((batch_size,), dtype='int32')
    b_values = np.zeros((n_out,), dtype='float32')
    W_values = np.asarray(np.random.rand(n_in, n_out), dtype='float32')

    dot_value = np.asarray(np.dot(xx, W_values), dtype='float32')
    del W_values
    p_y_given_x = T.nnet.softmax(dot_result + b)
    y_pred = T.argmax(p_y_given_x, axis=-1)
    loss = -T.mean(T.log(p_y_given_x)[T.arange(y.shape[0]), y])
    dW = T.grad(loss, dot_result)
    classify = theano.function(inputs=[y, b, dot_result],
                               outputs=[loss, y_pred, dW],
                               mode=mode_without_gpu)
    classify_gpu = theano.function(inputs=[y, b, dot_result],
                                   outputs=[loss, y_pred, dW],
                                   mode=mode_with_gpu)

    assert any([isinstance(node.op,
                           T.nnet.CrossentropySoftmaxArgmax1HotWithBias)
                for node in classify.maker.fgraph.toposort()])
    assert any([isinstance(node.op,
                           GpuCrossentropySoftmaxArgmax1HotWithBias)
                for node in classify_gpu.maker.fgraph.toposort()])

    out = classify(yy, b_values, dot_value)
    gout = classify_gpu(yy, b_values, dot_value)

    assert len(out) == len(gout) == 3
    utt.assert_allclose(out[0], gout[0])
    utt.assert_allclose(out[2], gout[2], atol=3e-6)
    utt.assert_allclose(out[1], gout[1])
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:59,代码来源:test_nnet.py


示例19: test_GpuCrossentropySoftmaxArgmax1HotWithBias

def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
    """
    This is basic test for GpuCrossentropySoftmaxArgmax1HotWithBias

    We check that we loop when their is too much threads
    TODO: check that we loop when their is too much block(>32*1024)

    """

    n_in = 1000
    batch_size = 4097
    n_out = 1250

    if theano.config.mode!="DEBUG_MODE":
        n_in = 4098
        n_out = 4099

    x = T.fmatrix('x')
    y = T.lvector('y')


    b = T.fvector('b')
    #W = T.fmatrix('W')

    #we precompute the dot with big shape before to allow the test of GpuCrossentropySoftmax1HotWithBiasDx to don't fail with the error (the launch timed out and was terminated) on GPU card not powerfull enought. We need the big shape to check for corner case.
    dot_result = T.fmatrix('dot_result')

    # Seed numpy.random with config.unittests.rseed
    utt.seed_rng()

    xx = numpy.asarray(numpy.random.rand(batch_size,n_in),dtype=numpy.float32)
    #?????yy = numpy.ones((batch_size,),dtype='float32')
    yy = numpy.ones((batch_size,),dtype='int32')
    b_values = numpy.zeros((n_out,),dtype='float32')
    W_values = numpy.asarray(numpy.random.rand(n_in,n_out),dtype='float32')

    dot_value = numpy.asarray(numpy.dot(xx, W_values),dtype='float32')
    del W_values
    p_y_given_x = T.nnet.softmax(dot_result+b)
    y_pred = T.argmax(p_y_given_x, axis=-1)
    loss = -T.mean(T.log(p_y_given_x)[T.arange(y.shape[0]), y])
    dW = T.grad(loss, dot_result)
    classify = theano.function( inputs = [x,y,b,dot_result], outputs = [loss,y_pred,dW],
                                mode = mode_without_gpu)
    classify_gpu = theano.function( inputs = [x,y,b,dot_result], outputs = [loss,y_pred,dW],
                                    mode = mode_with_gpu)
    #theano.printing.debugprint(classify)
    #theano.printing.debugprint(classify_gpu)

    assert any([isinstance(node.op,T.nnet.CrossentropySoftmaxArgmax1HotWithBias) for node in classify.maker.env.toposort()])
    assert any([isinstance(node.op,cuda.nnet.GpuCrossentropySoftmaxArgmax1HotWithBias) for node in classify_gpu.maker.env.toposort()])

    out=classify(xx,yy,b_values,dot_value)
    gout=classify_gpu(xx,yy,b_values,dot_value)

    assert len(out)==len(gout)==3
    assert numpy.allclose(out[0],gout[0])
    assert numpy.allclose(out[2],gout[2],atol=3e-6),numpy.absolute(gout-out).max()
    assert numpy.allclose(out[1],gout[1]),[(id,out[1][id],gout[1][id],val) for id,val in enumerate(out[1]-gout[1]) if val!=0]
开发者ID:HaniAlmousli,项目名称:Theano,代码行数:59,代码来源:test_nnet.py


示例20: test_conv_nnet2

def test_conv_nnet2():
    utt.seed_rng()
    rval_gpu = run_conv_nnet2(True)
    if True:
        utt.seed_rng()
        rval_cpu = run_conv_nnet2(False)
        # print rval_cpu[0], rval_gpu[0],rval_cpu[0]-rval_gpu[0]
        utt.assert_allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-4)
开发者ID:12190143,项目名称:Theano,代码行数:8,代码来源:test_mlp.py



注:本文中的theano.tests.unittest_tools.seed_rng函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python unittest_tools.verify_grad函数代码示例发布时间:2022-05-27
下一篇:
Python unittest_tools.fetch_seed函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap