• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Java Type类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中android.support.v8.renderscript.Type的典型用法代码示例。如果您正苦于以下问题:Java Type类的具体用法?Java Type怎么用?Java Type使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



Type类属于android.support.v8.renderscript包,在下文中一共展示了Type类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: loadModel

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public void loadModel(String path) throws IOException {
    mInputStream = mContext.getAssets().open(path + "/W", AssetManager.ACCESS_BUFFER);
    ByteBuffer bb = readInput(mInputStream);
    FloatBuffer.wrap(W).put(bb.asFloatBuffer());

    // padding for GPU BLAS when necessary.
    int W_height_input = in_channels * ksize * ksize;
    if (padded_Y_blas == W_height_input) {
        // If the input width already satisfies the requirement, just copy to the Allocation.
        W_alloc.copyFrom(W);
    } else {
        // If not, a temp allocation needs to be created.
        Allocation input = Allocation.createTyped(mRS,
                Type.createXY(mRS, Element.F32(mRS), W_height_input, out_channels));
        input.copyFrom(W);
        W_alloc.copy2DRangeFrom(0, 0, W_height_input, out_channels, input, 0, 0);
    }

    mInputStream = mContext.getAssets().open(path + "/b", AssetManager.ACCESS_BUFFER);
    bb = readInput(mInputStream);
    FloatBuffer.wrap(b).put(bb.asFloatBuffer());
    b_alloc.copyFrom(b);

    mInputStream.close();
    Log.v(TAG, "Convolution2D loaded: " + b[0]);
}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:27,代码来源:Convolution2D.java


示例2: yuvToRgb

import android.support.v8.renderscript.Type; //导入依赖的package包/类
/**
 * Converts a NV21 image to a Bitmap.
 * @param nv21Image the NV21 image to convert.
 */
public static Bitmap yuvToRgb(RenderScript rs, Nv21Image nv21Image) {
    long startTime = System.currentTimeMillis();

    Type.Builder yuvTypeBuilder = new Type.Builder(rs, Element.U8(rs))
            .setX(nv21Image.nv21ByteArray.length);
    Type yuvType = yuvTypeBuilder.create();
    Allocation yuvAllocation = Allocation.createTyped(rs, yuvType, Allocation.USAGE_SCRIPT);
    yuvAllocation.copyFrom(nv21Image.nv21ByteArray);

    Type.Builder rgbTypeBuilder = new Type.Builder(rs, Element.RGBA_8888(rs));
    rgbTypeBuilder.setX(nv21Image.width);
    rgbTypeBuilder.setY(nv21Image.height);
    Allocation rgbAllocation = Allocation.createTyped(rs, rgbTypeBuilder.create());

    ScriptIntrinsicYuvToRGB yuvToRgbScript = ScriptIntrinsicYuvToRGB.create(rs, Element.RGBA_8888(rs));
    yuvToRgbScript.setInput(yuvAllocation);
    yuvToRgbScript.forEach(rgbAllocation);

    Bitmap bitmap = Bitmap.createBitmap(nv21Image.width, nv21Image.height, Bitmap.Config.ARGB_8888);
    rgbAllocation.copyTo(bitmap);

    Log.d("NV21", "Conversion to Bitmap: " + (System.currentTimeMillis() - startTime) + "ms");
    return bitmap;
}
 
开发者ID:silvaren,项目名称:easyrs,代码行数:29,代码来源:YuvToRgb.java


示例3: resize

import android.support.v8.renderscript.Type; //导入依赖的package包/类
/**
 * Resizes a Bitmap image to a target width and height.
 */
public static Bitmap resize(RenderScript rs, Bitmap inputBitmap, int targetWidth,
                            int targetHeight) {
    RSToolboxContext bitmapRSContext = RSToolboxContext.createFromBitmap(rs, inputBitmap);
    Bitmap.Config config = inputBitmap.getConfig();
    Bitmap outputBitmap = Bitmap.createBitmap(targetWidth, targetHeight, config);
    Type outType = Type.createXY(bitmapRSContext.rs, bitmapRSContext.ain.getElement(), targetWidth,
            targetHeight);
    Allocation aout = Allocation.createTyped(bitmapRSContext.rs, outType);

    ScriptIntrinsicResize resizeScript = ScriptIntrinsicResize.create(bitmapRSContext.rs);
    resizeScript.setInput(bitmapRSContext.ain);
    resizeScript.forEach_bicubic(aout);

    aout.copyTo(outputBitmap);
    return outputBitmap;
}
 
开发者ID:silvaren,项目名称:easyrs,代码行数:20,代码来源:Resize.java


示例4: MandelbrotRSGen

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public MandelbrotRSGen(Context context, int width, int height, int iterations, int[] palette) {
    super(context, width, height, iterations, palette);

    //  Create the RenderScript context used to communicate with our RS.
    //  Then create our actual script which will do the real work
    mRSCtx = RenderScript.create(mContext);
    mMandGen = new ScriptC_mand_gen(mRSCtx);

    //  Set the initial parameters for the generator.
    //  TODO: ADD SUPPORT FOR RE-CENTERING AND ZOOM
    mMandGen.set_width(width);
    mMandGen.set_height(height);
    mMandGen.set_iter(iterations);
    mMandGen.set_paletteLen(mPalette.length);
    Type.Builder intArrayBuilder = new Type.Builder(mRSCtx,
                                                    Element.I32(mRSCtx));
    intArrayBuilder.setX(mPalette.length);
    Allocation allocPalette =
        Allocation.createTyped(mRSCtx,
                               intArrayBuilder.create());
    allocPalette.copyFrom(mPalette);
    mMandGen.bind_palette(allocPalette);
}
 
开发者ID:hiq-larryschiefer,项目名称:SimpleFractal,代码行数:24,代码来源:MandelbrotRSGen.java


示例5: MandelbrotRSFloatGen

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public MandelbrotRSFloatGen(Context context, int width, int height, int iterations, int[] palette) {
    super(context, width, height, iterations, palette);

    //  Create the RenderScript context used to communicate with our RS.
    //  Then create our actual script which will do the real work
    mRSCtx = RenderScript.create(mContext);
        mMandGen = new ScriptC_mand_float_gen(mRSCtx);

    //  Set the initial parameters for the generator.
    //  TODO: ADD SUPPORT FOR RE-CENTERING AND ZOOM
    mMandGen.set_width(width);
    mMandGen.set_height(height);
    mMandGen.set_iter(iterations);
    mMandGen.set_paletteLen(mPalette.length);
    Type.Builder intArrayBuilder = new Type.Builder(mRSCtx,
                                                    Element.I32(mRSCtx));
    intArrayBuilder.setX(mPalette.length);
    Allocation allocPalette =
        Allocation.createTyped(mRSCtx,
                               intArrayBuilder.create());
    allocPalette.copyFrom(mPalette);
    mMandGen.bind_palette(allocPalette);
}
 
开发者ID:hiq-larryschiefer,项目名称:SimpleFractal,代码行数:24,代码来源:MandelbrotRSFloatGen.java


示例6: convertYuv420ToBitmap

import android.support.v8.renderscript.Type; //导入依赖的package包/类
private static Bitmap convertYuv420ToBitmap(Image image) {
    RenderScript rs = mRenderScript;
    final int width = image.getWidth();
    final int height = image.getHeight();

    // prepare input Allocation for RenderScript
    Type.Builder inType = new Type.Builder(rs, Element.U8(rs)).setX(width).setY(height).setYuvFormat(ImageFormat.YV12);
    Allocation inAlloc = Allocation.createTyped(rs, inType.create(), Allocation.USAGE_SCRIPT);
    byte[] rawBuffer = new byte[inAlloc.getBytesSize()];
    int lumaSize = width * height;
    int chromaSize = (width / 2) * (height / 2);
    Image.Plane[] planes = image.getPlanes();
    planes[0].getBuffer().get(rawBuffer, 0, lumaSize);
    planes[1].getBuffer().get(rawBuffer, lumaSize, chromaSize);
    planes[2].getBuffer().get(rawBuffer, lumaSize + chromaSize, chromaSize);
    inAlloc.copyFromUnchecked(rawBuffer);

    // prepare output Allocation for RenderScript
    Bitmap bmp = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    Allocation outAlloc = Allocation.createFromBitmap(rs, bmp, Allocation.MipmapControl.MIPMAP_NONE, Allocation.USAGE_SCRIPT | Allocation.USAGE_SHARED);

    // convert YUV to RGB colorspace
    ScriptC_yuv2rgb converter = new ScriptC_yuv2rgb(rs);
    converter.set_gYUV(inAlloc);
    converter.forEach_convert(outAlloc);
    outAlloc.copyTo(bmp);
    return bmp;
}
 
开发者ID:yohhoy,项目名称:heifreader,代码行数:29,代码来源:HeifReader.java


示例7: Convolution2D

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public Convolution2D(Context ctx, RenderScript rs, int in_channels, int out_channels, int ksize, int stride, int pad) {
    super(ctx, rs);

    this.in_channels = in_channels;
    this.out_channels = out_channels;
    this.ksize = ksize;
    this.stride = stride;
    this.pad = pad;
    // X dimension for W: in_channels * ksize * ksize
    // Y dimension for W: out_channels
    this.W = new float[out_channels * in_channels * ksize * ksize];
    this.b = new float[out_channels];

    // Pad the width of W to be multiple of 8.
    padded_Y_blas = in_channels * ksize * ksize;
    if (padded_Y_blas % 8 > 0) {
        padded_Y_blas = (padded_Y_blas / 8 + 1) * 8;
    }

    // Create Allocations for W and b.
    W_alloc = Allocation.createTyped(mRS,
            Type.createXY(mRS, Element.F32(mRS), padded_Y_blas, out_channels));
    b_alloc = Allocation.createSized(mRS, Element.F32(mRS), out_channels);

    // Initialize the 2D convolution kernel;
    mConvovle = new ScriptC_convolve2d(mRS);

    // Set the global variables for the RS kernel.
    mConvovle.set_kernel_h(ksize);
    mConvovle.set_kernel_w(ksize);
    mConvovle.set_step_x(stride);
    mConvovle.set_step_y(stride);
    mConvovle.set_pad_h(pad);
    mConvovle.set_pad_w(pad);

    mConvovle.set_beta_alloc(b_alloc);
    mConvovle.set_img_channel(in_channels);
}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:39,代码来源:Convolution2D.java


示例8: process

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public Allocation process(Allocation input, int img_h, int img_w) {
    // Set the input variables to the convolve kernel.
    mConvovle.set_img_h(img_h);
    mConvovle.set_img_w(img_w);
    mConvovle.set_img_alloc(input);

    // Calculate the dimensions of the image after padding.
    int padded_h = img_h + 2 * pad;
    int padded_w = img_w + 2 * pad;

    // Create Allocation to hold the padded image.
    Allocation img_padded = Allocation.createTyped(mRS,
            Type.createXY(mRS, Element.F32(mRS), padded_h * padded_w, in_channels));
    // Initialize the padded Allocation to zero.
    mConvovle.forEach_zero(img_padded, img_padded);
    mConvovle.set_padded_alloc(img_padded);

    // Invoked the padding kernel.
    mConvovle.invoke_padd();

    // TODO Step2: Use convolve2DGEMM instead.
    Allocation out_alloc = convolve2D(img_padded, img_h, img_w);

    // Destroy the intermediate Allocations.
    img_padded.destroy();

    return out_alloc;
}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:29,代码来源:Convolution2D.java


示例9: convolve2D

import android.support.v8.renderscript.Type; //导入依赖的package包/类
private Allocation convolve2D(Allocation img_padded, int img_h, int img_w) {
    // Calculate the dimensions of image after convolution.
    int out_h = ConvolveUtil.get_conv_outsize(img_h, ksize, stride, pad);
    int out_w = ConvolveUtil.get_conv_outsize(img_w, ksize, stride, pad);
    Log.v(TAG, "convolve size: " + out_h + " " + out_w);

    mConvovle.set_outW(out_w);
    mConvovle.set_outH(out_h);
    mConvovle.set_W_alloc(W_alloc);
    mConvovle.set_input_padded(img_padded);

    // Create the output Allocation for 2D convolution operation.
    Allocation out_alloc = Allocation.createTyped(mRS,
            Type.createXY(mRS, Element.F32(mRS), out_h * out_w, out_channels));

    long time = System.currentTimeMillis();
    mConvovle.forEach_convolve2D(out_alloc);
    if (LOG_TIME) {
        mRS.finish();
        time = System.currentTimeMillis() - time;
        conv2dTime += time;
        Log.v(TAG, "Convolution2D, channels: " + in_channels + ", " + out_channels + " size: " + img_h + ", " + img_w + " conv2D process time: " + time);
    }

    time = System.currentTimeMillis();
    mConvovle.forEach_addBeta(out_alloc, out_alloc);
    if (LOG_TIME) {
        mRS.finish();
        time = System.currentTimeMillis() - time;
        betaTime += time;
        Log.v(TAG, "Convolution2D, channels: " + in_channels + ", " + out_channels + " size: " + img_h + ", " + img_w + " initBeta process time: " + time);
    }
    // Update the output dimensions.
    outH = out_h;
    outW = out_w;

    return out_alloc;
}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:39,代码来源:Convolution2D.java


示例10: Convolution2DTiled

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public Convolution2DTiled(Context ctx, RenderScript rs, int in_channels, int out_channels, int ksize, int stride, int pad) {
    super(ctx, rs);

    this.in_channels = in_channels;
    this.out_channels = out_channels;
    this.ksize = ksize;
    this.stride = stride;
    this.pad = pad;
    // X dimension for W: in_channels * ksize * ksize
    // Y dimension for W: out_channels
    this.W = new float[out_channels * in_channels * ksize * ksize];
    this.b = new float[out_channels];

    // Pad the width of W to be multiple of 8.
    padded_Y_blas = in_channels * ksize * ksize;
    if (padded_Y_blas % 8 > 0) {
        padded_Y_blas = (padded_Y_blas / 8 + 1) * 8;
    }

    // Create Allocations for W and b.
    W_alloc = Allocation.createTyped(mRS,
            Type.createXY(mRS, Element.F32(mRS), padded_Y_blas, out_channels));
    b_alloc = Allocation.createSized(mRS, Element.F32(mRS), out_channels);

    // Initialize the 2D convolution kernel;
    mConvovle = new ScriptC_convolve2d(mRS);

    // Set the global variables for the RS kernel.
    mConvovle.set_kernel_h(ksize);
    mConvovle.set_kernel_w(ksize);
    mConvovle.set_step_x(stride);
    mConvovle.set_step_y(stride);
    mConvovle.set_pad_h(pad);
    mConvovle.set_pad_w(pad);

    mConvovle.set_beta_alloc(b_alloc);
    mConvovle.set_img_channel(in_channels);
    mConvovle.set_tile_h(TILE_Y);
}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:40,代码来源:Convolution2DTiled.java


示例11: Deconvolution2DTiled

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public Deconvolution2DTiled(Context ctx, RenderScript rs, int in_channels, int out_channels, int ksize, int stride, int pad) {
    super(ctx, rs);

    this.in_channels = in_channels;
    this.out_channels = out_channels;
    this.ksize = ksize;
    this.stride = stride;
    this.pad = pad;
    // X dimension for W: in_channels * ksize * ksize
    // Y dimension for W: out_channels
    this.W = new float[out_channels * ksize * ksize * in_channels];
    this.b = new float[out_channels];

    // Pad the width of W to be multiple of 8.
    padded_Y_blas = out_channels * ksize * ksize;
    if (padded_Y_blas % 8 > 0) {
        padded_Y_blas = (padded_Y_blas / 8 + 1) * 8;
    }

    // Create Allocations for W and b.
    W_alloc = Allocation.createTyped(mRS,
            Type.createXY(mRS, Element.F32(mRS), in_channels, padded_Y_blas));
    b_alloc = Allocation.createSized(mRS, Element.F32(mRS), out_channels);

    // Initialize the 2D deconvolution kernel;
    mConvovle = new ScriptC_deconvolve2d(mRS);

    // Set the global variables for the RS kernel.
    mConvovle.set_tile_h(TILE_Y);
    mConvovle.set_col_h(TILE_Y);

    mConvovle.set_kernel_h(ksize);
    mConvovle.set_kernel_w(ksize);
    mConvovle.set_step_x(stride);
    mConvovle.set_step_y(stride);
    mConvovle.set_pad_h(pad);
    mConvovle.set_pad_w(pad);
    mConvovle.set_beta_alloc(b_alloc);
}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:40,代码来源:Deconvolution2DTiled.java


示例12: Deconvolution2D

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public Deconvolution2D(Context ctx, RenderScript rs, int in_channels, int out_channels, int ksize, int stride, int pad) {
    super(ctx, rs);

    this.in_channels = in_channels;
    this.out_channels = out_channels;
    this.ksize = ksize;
    this.stride = stride;
    this.pad = pad;
    // Y dimension for W: out_channels * ksize * ksize
    // X dimension for W: in_channels
    this.W = new float[out_channels * ksize * ksize * in_channels];
    this.b = new float[out_channels];

    // Pad the width of W to be multiple of 8.
    padded_Y_blas = out_channels * ksize * ksize;
    if (padded_Y_blas % 8 > 0) {
        padded_Y_blas = (padded_Y_blas / 8 + 1) * 8;
    }

    // Create Allocations for W and b.
    W_alloc = Allocation.createTyped(mRS,
            Type.createXY(mRS, Element.F32(mRS), in_channels, padded_Y_blas));
    b_alloc = Allocation.createSized(mRS, Element.F32(mRS), out_channels);

    // Initialize the 2D deconvolution kernel;
    mConvovle = new ScriptC_deconvolve2d(mRS);

    // Set the global variables for the RS kernel.
    mConvovle.set_kernel_h(ksize);
    mConvovle.set_kernel_w(ksize);
    mConvovle.set_step_x(stride);
    mConvovle.set_step_y(stride);
    mConvovle.set_pad_h(pad);
    mConvovle.set_pad_w(pad);

    mConvovle.set_beta_alloc(b_alloc);

}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:39,代码来源:Deconvolution2D.java


示例13: createAllocation

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public Allocation createAllocation(RenderScript rs) {
    final int sx = xSize;
    final int sy = ySize;
    final int sz = zSize;
    Type.Builder tb = new Type.Builder(rs, Element.U8_4(rs));
    tb.setX(sx);
    tb.setY(sy);
    tb.setZ(sz);
    Type t = tb.create();
    Allocation mCube = Allocation.createTyped(rs, t);
    mCube.copyFromUnchecked(getCube());

    return mCube;
}
 
开发者ID:silvaren,项目名称:easyrs,代码行数:15,代码来源:Lut3DParams.java


示例14: getExpectedBitmap

import android.support.v8.renderscript.Type; //导入依赖的package包/类
@NonNull
private Bitmap getExpectedBitmap(RenderScript rs, Bitmap bmpFromNv21) {
    Allocation ain = Allocation.createFromBitmap(rs, bmpFromNv21);
    Bitmap expectedBitmap = Bitmap.createBitmap(TARGET_WIDTH, TARGET_HEIGHT, bmpFromNv21.getConfig());
    Type outType = Type.createXY(rs, ain.getElement(), TARGET_WIDTH, TARGET_HEIGHT);
    Allocation aout = Allocation.createTyped(rs, outType);

    ScriptIntrinsicResize resizeScript = ScriptIntrinsicResize.create(rs);
    resizeScript.setInput(ain);
    resizeScript.forEach_bicubic(aout);

    aout.copyTo(expectedBitmap);
    return expectedBitmap;
}
 
开发者ID:silvaren,项目名称:easyrs,代码行数:15,代码来源:ResizeTest.java


示例15: initLUT

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public void initLUT (int setSizeX, int setSizeY, int setSizeZ) {

        lutSizeX = setSizeX;
        lutSizeY = setSizeY;
        lutSizeZ = setSizeZ;
        colorLutArray = new int[lutSizeX * lutSizeY * lutSizeZ];

        rS = RenderScript.create(this);

        Type.Builder tb = new Type.Builder(rS, Element.U8_4(rS));
        tb.setX(lutSizeX);
        tb.setY(lutSizeY);
        tb.setZ(lutSizeZ);
        Type t = tb.create();


        allocLut = Allocation.createTyped(rS, t);


        for (int x = 0; x < lutSizeX; x++) {
            for (int y = 0; y < lutSizeY; y++) {
                for (int z = 0; z < lutSizeZ; z++) {


                    colorLutArray[x * lutSizeY * lutSizeZ + y * lutSizeZ + z] = Color.argb(0xff,
                            (0xff * x / (lutSizeX - 1)),
                            (0xff * y / (lutSizeY - 1)),
                            (0xff * z / (lutSizeZ - 1)));
                    /*
                    Log.d(TAG, "x,y,z = " + Integer.toString(x) + ", " + Integer.toString(y) + ", " +
                            Integer.toString(z) + " = " + Integer.toHexString(colorLutArray[x * lutSizeY * lutSizeZ + y * lutSizeZ + z]));
                    */

                }
            }
        }
        allocLut.copyFromUnchecked(colorLutArray);
        sI3dLut = ScriptIntrinsic3DLUT.create(rS, Element.U8_4(rS));
        sI3dLut.setLUT(allocLut);
    }
 
开发者ID:colorchief,项目名称:colorchief,代码行数:41,代码来源:ColorLUT.java


示例16: CameraPreviewCallback

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public CameraPreviewCallback()
//-----------------------------
{
   if (LegacyCameraRenderer.this.camera.getFileFormat() == ARCamera.RecordFileFormat.NV21)
   {
      rsNv21toRGBA = RenderScript.create(activity);
      YUVToRGBA = ScriptIntrinsicYuvToRGB.create(rsNv21toRGBA, Element.U8_4(rsNv21toRGBA));
      Type.Builder yuvType = new Type.Builder(rsNv21toRGBA, Element.U8(rsNv21toRGBA)).setX(previewWidth).
            setY(previewHeight).setMipmaps(false).setYuvFormat(ImageFormat.NV21);
      Type.Builder rgbaType = new Type.Builder(rsNv21toRGBA, Element.RGBA_8888(rsNv21toRGBA)).setX(previewWidth).
            setY(previewHeight).setMipmaps(false);
      ain = Allocation.createTyped(rsNv21toRGBA, yuvType.create(), Allocation.USAGE_SCRIPT);
      aOut = Allocation.createTyped(rsNv21toRGBA, rgbaType.create(), Allocation.USAGE_SCRIPT);
   }
}
 
开发者ID:donaldmunro,项目名称:AARemu,代码行数:16,代码来源:LegacyCameraRenderer.java


示例17: convolve2DGEMM

import android.support.v8.renderscript.Type; //导入依赖的package包/类
private Allocation convolve2DGEMM(Allocation img_padded, int img_h, int img_w) {
    // Calculate the dimensions of image after convolution.
    int out_h = ConvolveUtil.get_conv_outsize(img_h, ksize, stride, pad);
    int out_w = ConvolveUtil.get_conv_outsize(img_w, ksize, stride, pad);
    Log.v(TAG, "convolve size: " + out_h + " " + out_w);
    // Create the column Allocation.
    Allocation col_alloc = Allocation.createTyped(mRS,
            Type.createXY(mRS, Element.F32(mRS), out_h * out_w, padded_Y_blas));

    long time = System.currentTimeMillis();
    // Invoke im2col kernel, to transform padded image to column image:
    mConvovle.set_outW(out_w);
    mConvovle.set_outH(out_h);
    mConvovle.forEach_im2col(col_alloc);

    if (LOG_TIME) {
        mRS.finish();
        time = System.currentTimeMillis() - time;
        im2colTime += time;
        Log.v(TAG, "Convolution2D, channels: " + in_channels + ", " + out_channels + " size: " + img_h + ", " + img_w + " im2col process time: " + time);
    }

    // Create the output Allocation for SGEMM operation.
    Allocation out_alloc = Allocation.createTyped(mRS,
            Type.createXY(mRS, Element.F32(mRS), out_h * out_w, out_channels));

    time = System.currentTimeMillis();
    // Conduct the convolution by matrix multiplication, using SGEMM (BLAS API).
    mBlas.SGEMM(ScriptIntrinsicBLAS.NO_TRANSPOSE, ScriptIntrinsicBLAS.NO_TRANSPOSE,
            1.0f, W_alloc, col_alloc, 0.0f, out_alloc);

    if (LOG_TIME) {
        mRS.finish();
        time = System.currentTimeMillis() - time;
        sgemmTime += time;
        Log.v(TAG, "Convolution2D, channels: " + in_channels + ", " + out_channels + " size: " + img_h + ", " + img_w + " SGEMM process time: " + time);
    }

    time = System.currentTimeMillis();
    // Add beta to the results for each channel.
    mConvovle.forEach_addBeta(out_alloc, out_alloc);
    if (LOG_TIME) {
        mRS.finish();
        time = System.currentTimeMillis() - time;
        betaTime += time;
        Log.v(TAG, "Convolution2D, channels: " + in_channels + ", " + out_channels + " size: " + img_h + ", " + img_w + " initBeta process time: " + time);
    }

    // Destroy the intermediate Allocations.
    col_alloc.destroy();

    // Update the output dimensions.
    outH = out_h;
    outW = out_w;

    return out_alloc;
}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:58,代码来源:Convolution2D.java


示例18: ResidualBlockChained

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public ResidualBlockChained(Context ctx, RenderScript rs, int in_channels, int out_channels, int ksize, int stride, int pad, int numBlocks) {
    super(ctx, rs);

    this.in_channels = in_channels;
    this.out_channels = out_channels;
    this.ksize = ksize;
    this.stride = stride;
    this.pad = pad;
    this.mNumBlocks = numBlocks;

    this.b = new float[out_channels];
    this.W = new float[out_channels * in_channels * ksize * ksize];

    // Pad the width of W to be multiple of 8.
    padded_Y_blas = in_channels * ksize * ksize;
    if (padded_Y_blas % 8 > 0) {
        padded_Y_blas = (padded_Y_blas / 8 + 1) * 8;
    }

    // Create Allocations for each convolution operation.
    W_alloc = new Allocation[mNumBlocks * 2];
    b_alloc = new Allocation[mNumBlocks * 2];
    Type.Builder tb = new Type.Builder(mRS, Element.F32(mRS));
    tb.setX(padded_Y_blas).setY(out_channels);
    for (int i = 0; i < mNumBlocks * 2; i++) {
        W_alloc[i] = Allocation.createTyped(mRS, tb.create());
    }
    Type.Builder tbeta = new Type.Builder(mRS, Element.F32(mRS));
    tbeta.setX(out_channels);
    for (int i = 0; i < mNumBlocks * 2; i++) {
        b_alloc[i] = Allocation.createTyped(mRS, tbeta.create());
    }


    // Create Allocations for each batch normalization operation.
    gamma = new float[out_channels];
    beta = new float[out_channels];
    avg_mean = new float[out_channels];
    avg_var = new float[out_channels];

    gamma_alloc = new Allocation[numBlocks * 2];
    beta_alloc = new Allocation[numBlocks * 2];
    avg_mean_alloc = new Allocation[numBlocks * 2];
    avg_var_alloc = new Allocation[numBlocks * 2];

    Type.Builder tbn = new Type.Builder(mRS, Element.F32(mRS));
    tbn.setX(out_channels);
    for (int i = 0; i < numBlocks * 2; i++) {
        gamma_alloc[i] = Allocation.createTyped(mRS, tbn.create());
        beta_alloc[i] = Allocation.createTyped(mRS, tbn.create());
        avg_mean_alloc[i] = Allocation.createTyped(mRS, tbn.create());
        avg_var_alloc[i] = Allocation.createTyped(mRS, tbn.create());
    }

    // Initialize the RS kernels;
    mResidualBlock = new ScriptC_residualblock(mRS);
    mActivation = new ScriptC_activation(mRS);
    mConvovle = new ScriptC_convolve2d(mRS);
    rs_BN = new ScriptC_batchnormalization(mRS);

    // Set the global variables for the convolution kernel.
    mConvovle.set_kernel_h(ksize);
    mConvovle.set_kernel_w(ksize);
    mConvovle.set_step_x(stride);
    mConvovle.set_step_y(stride);
    mConvovle.set_pad_h(pad);
    mConvovle.set_pad_w(pad);
    mConvovle.set_tile_h(TILE_Y);
}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:70,代码来源:ResidualBlockChained.java


示例19: loadModel

import android.support.v8.renderscript.Type; //导入依赖的package包/类
public void loadModel(String path) throws IOException {
    for (int i = 0; i < mNumBlocks; i++) {
        for (int j = 0; j < 2; j++) {
            // Read all convolution blocks.
            mInputStream = mContext.getAssets().open(path + "/r" + (i + 1) + "/c" + (j + 1) + "/W", AssetManager.ACCESS_BUFFER);
            ByteBuffer bb = readInput(mInputStream);
            FloatBuffer.wrap(W).put(bb.asFloatBuffer());

            // padding for GPU BLAS
            int W_height_input = in_channels * ksize * ksize;
            if (padded_Y_blas == W_height_input) {
                // If the input width already satisfies the requirement, just copy to the Allocation.
                W_alloc[i * 2 + j].copyFrom(W);
            } else {
                // If not, a temp allocation needs to be created.
                Allocation input = Allocation.createTyped(mRS,
                        Type.createXY(mRS, Element.F32(mRS), W_height_input, out_channels));
                input.copyFrom(W);
                W_alloc[i * 2 + j].copy2DRangeFrom(0, 0, W_height_input, out_channels, input, 0, 0);
            }

            mInputStream = mContext.getAssets().open(path + "/r" + (i + 1) + "/c" + (j + 1) + "/b", AssetManager.ACCESS_BUFFER);
            bb = readInput(mInputStream);
            FloatBuffer.wrap(b).put(bb.asFloatBuffer());
            b_alloc[i * 2 + j].copyFrom(b);

            // Read all batch normalization blocks;
            mInputStream = mContext.getAssets().open(path + "/r" + (i + 1) + "/b" + (j + 1) + "/gamma", AssetManager.ACCESS_BUFFER);
            bb = readInput(mInputStream);
            FloatBuffer.wrap(gamma).put(bb.asFloatBuffer());
            gamma_alloc[i * 2 + j].copyFrom(gamma);

            mInputStream = mContext.getAssets().open(path + "/r" + (i + 1) + "/b" + (j + 1) + "/beta", AssetManager.ACCESS_BUFFER);
            bb = readInput(mInputStream);
            FloatBuffer.wrap(beta).put(bb.asFloatBuffer());
            beta_alloc[i * 2 + j].copyFrom(beta);

            mInputStream = mContext.getAssets().open(path + "/r" + (i + 1) + "/b" + (j + 1) + "/avg_mean", AssetManager.ACCESS_BUFFER);
            bb = readInput(mInputStream);
            FloatBuffer.wrap(avg_mean).put(bb.asFloatBuffer());
            avg_mean_alloc[i * 2 + j].copyFrom(avg_mean);

            mInputStream = mContext.getAssets().open(path + "/r" + (i + 1) + "/b" + (j + 1) + "/avg_var", AssetManager.ACCESS_BUFFER);
            bb = readInput(mInputStream);
            FloatBuffer.wrap(avg_var).put(bb.asFloatBuffer());
            avg_var_alloc[i * 2 + j].copyFrom(avg_var);

        }

    }
    mInputStream.close();
    Log.v(TAG, "ResidualBlockChained loaded: " + b[0]);
}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:54,代码来源:ResidualBlockChained.java


示例20: processImgChunk

import android.support.v8.renderscript.Type; //导入依赖的package包/类
private Allocation processImgChunk(Bitmap bitmap) {
    int height = bitmap.getHeight();
    int width = bitmap.getWidth();

    mImg2Alloc.set_height(height);
    mImg2Alloc.set_weight(width);

    Bitmap outImg = Bitmap.createBitmap(bitmap);
    // RGB bitmap Allocation.
    Allocation imgAlloc = Allocation.createFromBitmap(mRS, bitmap);
    mImg2Alloc.set_img_alloc(imgAlloc);
    // Float input Allocation.
    Allocation result = Allocation.createTyped(mRS, Type.createXY(mRS, Element.F32(mRS), height * width, 3));
    // convert the bitmap to 3 * (h * w) float Allocation;
    mImg2Alloc.forEach_img2alloc(result);

    // Actual computation;
    // 1st Convolution layer.
    result = mConvLayer[0].process(result, height, width);
    // Use ELU for activation.
    mActivation.forEach_elu(result, result);
    // 1st Batch Normalization.
    mBatchNormLayer[0].process(result);

    // 2nd Convolution layer.
    result = mConvLayer[1].process(result, mConvLayer[0].outH, mConvLayer[0].outW);
    mActivation.forEach_elu(result, result);
    // 2nd Batch Normalization.
    mBatchNormLayer[1].process(result);

    // 3rd Convolution layer.
    result = mConvLayer[2].process(result, mConvLayer[1].outH, mConvLayer[1].outW);
    mActivation.forEach_elu(result, result);
    // 3rd Batch Normalization.
    mBatchNormLayer[2].process(result);

    // Process through 5 consecutive residual blocks.
    result = mResidualLayer[0].process(result, mConvLayer[2].outH, mConvLayer[2].outW);
    result = mResidualLayer[1].process(result, mResidualLayer[0].outH, mResidualLayer[0].outW);
    result = mResidualLayer[2].process(result, mResidualLayer[1].outH, mResidualLayer[1].outW);
    result = mResidualLayer[3].process(result, mResidualLayer[2].outH, mResidualLayer[2].outW);
    result = mResidualLayer[4].process(result, mResidualLayer[3].outH, mResidualLayer[3].outW);

    // 1st Deconvolution layer.
    result = mDeconvLayer[0].process(result, mResidualLayer[4].outH, mResidualLayer[4].outW);
    mActivation.forEach_elu(result, result);
    // 4th Batch Normalization.
    mBatchNormLayer[3].process(result);

    // 2nd Deconvolution layer.
    result = mDeconvLayer[1].process(result, mDeconvLayer[0].outH, mDeconvLayer[0].outW);
    mActivation.forEach_elu(result, result);
    // 5th Batch Normalization.
    mBatchNormLayer[4].process(result);

    // 3rd Deconvolution layer.
    result = mDeconvLayer[2].process(result, mDeconvLayer[1].outH, mDeconvLayer[1].outW);

    // Convert floating point result to RGB image.
    mImg2Alloc.set_nn_alloc(result);
    Allocation outAlloc = Allocation.createFromBitmap(mRS, outImg);
    mImg2Alloc.forEach_alloc2img(outAlloc);
    return outAlloc;
}
 
开发者ID:googlecodelabs,项目名称:style-transfer,代码行数:65,代码来源:FastStyleModel.java



注:本文中的android.support.v8.renderscript.Type类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java DefaultSortedSetDocValuesReaderState类代码示例发布时间:2022-05-23
下一篇:
Java Entry类代码示例发布时间:2022-05-23
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap