• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Java CUfunction类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中jcuda.driver.CUfunction的典型用法代码示例。如果您正苦于以下问题:Java CUfunction类的具体用法?Java CUfunction怎么用?Java CUfunction使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



CUfunction类属于jcuda.driver包,在下文中一共展示了CUfunction类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: callCudaFunction

import jcuda.driver.CUfunction; //导入依赖的package包/类
private CUdeviceptr callCudaFunction(CUfunction function, Pointer[] arguments) {
	// Allocate device output memory
	CUdeviceptr result = getCUdeviceptr((long)size());
	arguments[arguments.length-1] = Pointer.to(result);

	// Set up the kernel parameters: A pointer to an array
	// of pointers which point to the actual values.
	Pointer kernelParameters = Pointer.to(arguments);

	// Call the kernel function.
	int blockSizeX = 256;
	int gridSizeX = (int)Math.ceil((double)size() / blockSizeX);
	cuLaunchKernel(function,
			gridSizeX,  1, 1,      // Grid dimension
			blockSizeX, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	cuCtxSynchronize();
	return result;
}
 
开发者ID:finmath,项目名称:finmath-lib-cuda-extensions,代码行数:22,代码来源:RandomVariableCudaWithFinalizer.java


示例2: callCudaFunction

import jcuda.driver.CUfunction; //导入依赖的package包/类
private void callCudaFunction(final CUfunction function, Pointer[] arguments, final int gridSizeX, final int blockSizeX, final int sharedMemorySize) {
	synchronized (vectorsInUseReferenceMap) {
		// Set up the kernel parameters: A pointer to an array
		// of pointers which point to the actual values.
		final Pointer kernelParameters = Pointer.to(arguments);

		// Call the kernel function.
		deviceExecutor.submit(new Runnable() { public void run() {
			cuCtxSynchronize();
			cuLaunchKernel(function,
					gridSizeX,  1, 1,      // Grid dimension
					blockSizeX, 1, 1,      // Block dimension
					sharedMemorySize * Sizeof.FLOAT, null,               // Shared memory size and stream
					kernelParameters, null // Kernel- and extra parameters
					);
		}});
	}
}
 
开发者ID:finmath,项目名称:finmath-lib-cuda-extensions,代码行数:19,代码来源:RandomVariableCuda.java


示例3: initJCuda

import jcuda.driver.CUfunction; //导入依赖的package包/类
/**
 * Initialize the JCudaDriver. Note that this has to be done from the
 * same thread that will later use the JCudaDriver API
 */
private void initJCuda()
{
    JCudaDriver.setExceptionsEnabled(true);

    // Create a device and a context
    cuInit(0);
    CUdevice device = new CUdevice();
    cuDeviceGet(device, 0);
    CUcontext context = new CUcontext();
    cuCtxCreate(context, 0, device);

    // Prepare the PTX file containing the kernel
    String ptxFileName = JCudaSamplesUtils.preparePtxFile(
        "src/main/resources/kernels/JCudaDriverSimpleGLKernel.cu");
    
    // Load the PTX file containing the kernel
    CUmodule module = new CUmodule();
    cuModuleLoad(module, ptxFileName);

    // Obtain a function pointer to the kernel function. This function
    // will later be called during the animation, in the display 
    // method of this GLEventListener.
    function = new CUfunction();
    cuModuleGetFunction(function, module, "simple_vbo_kernel");
}
 
开发者ID:jcuda,项目名称:jcuda-samples,代码行数:30,代码来源:JCudaDriverSimpleLWJGL.java


示例4: initialize

import jcuda.driver.CUfunction; //导入依赖的package包/类
/**
 * Tries to compile the specified .CU file into a PTX file, loads this
 * PTX file as a module, obtains the specified function from this module
 * and returns it.
 * 
 * @param cuFileName The .CU file name
 * @param functionName The kernel function name
 * @return The function
 * @throws CudaException If an error occurs
 */
protected final CUfunction initialize(
    String cuFileName, String functionName)
{
    // Enable exceptions and omit all subsequent error checks
    JCudaDriver.setExceptionsEnabled(true);
   
    // Initialize the driver and create a context for the first device.
    cuInit(0);
    CUdevice device = new CUdevice();
    cuDeviceGet(device, 0);
    CUcontext context = new CUcontext();
    cuCtxCreate(context, 0, device);

    String ptxFileName = JCudaTestUtils.preparePtxFile(cuFileName);
    
    // Load the ptx file.
    CUmodule module = new CUmodule();
    cuModuleLoad(module, ptxFileName);

    // Obtain a function pointer to the kernel function.
    CUfunction function = new CUfunction();
    cuModuleGetFunction(function, module, functionName);
    
    return function;
}
 
开发者ID:jcuda,项目名称:jcuda,代码行数:36,代码来源:JCudaAbstractKernelTest.java


示例5: compute

import jcuda.driver.CUfunction; //导入依赖的package包/类
public void compute(final float[] scores, final float[] whiteObservations, final float[] blackObservations, final int sequenceLength) {
	int gridSizeX = (int) Math.ceil(((double) sequenceLength) / (BLOCK_SIZE_X*ROLL_X));
	int extendedSeqLength = gridSizeX * (BLOCK_SIZE_X*ROLL_X);
	cuMemcpyHtoD(d_Ow, Pointer.to(CudaUtil.extendWithZeros(whiteObservations, (extendedSeqLength+maxTemplateWidth-1)*CharacterTemplate.LINE_HEIGHT)), (extendedSeqLength+maxTemplateWidth-1)*CharacterTemplate.LINE_HEIGHT * Sizeof.FLOAT);
	cuMemcpyHtoD(d_Ob, Pointer.to(CudaUtil.extendWithZeros(blackObservations, (extendedSeqLength+maxTemplateWidth-1)*CharacterTemplate.LINE_HEIGHT)), (extendedSeqLength+maxTemplateWidth-1)*CharacterTemplate.LINE_HEIGHT * Sizeof.FLOAT);
	for (int tw=minTemplateWidth; tw<=maxTemplateWidth; ++tw) {
		if (templateNumIndices[tw-minTemplateWidth] > 0) {
			CUfunction function = new CUfunction();
			cuModuleGetFunction(function, cudaModule, "compute_emissions_"+tw);
			JCudaDriver.cuFuncSetCacheConfig(function, CUfunc_cache.CU_FUNC_CACHE_PREFER_SHARED);
			JCudaDriver.cuFuncSetSharedMemConfig(function, CUsharedconfig.CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE);
			Pointer kernelParameters = Pointer.to(Pointer.to(new int[] {templateIndicesOffsets[tw-minTemplateWidth]*sequenceLength}), Pointer.to(new int[] {sequenceLength}), Pointer.to(new int[] {templateNumIndices[tw-minTemplateWidth]}), Pointer.to(d_Tw[tw-minTemplateWidth]), Pointer.to(d_Tb[tw-minTemplateWidth]), Pointer.to(d_Ow), Pointer.to(d_Ob), Pointer.to(d_scores));
			int gridSizeY = (int) Math.ceil(((double) templateNumIndices[tw-minTemplateWidth]) / BLOCK_SIZE_Y);
			cuLaunchKernel(function, 
					gridSizeX, gridSizeY, 1,      // Grid dimension
					BLOCK_SIZE_X, BLOCK_SIZE_Y, 1,      // Block dimension
					0, null,               // Shared memory size and stream
					kernelParameters, null // Kernel- and extra parameters
					);
		}
	}
	cuMemcpyDtoH(Pointer.to(scores), d_scores, sequenceLength*totalTemplateNumIndices * Sizeof.FLOAT);
}
 
开发者ID:tberg12,项目名称:ocular,代码行数:24,代码来源:CUDAInnerLoop.java


示例6: fill

import jcuda.driver.CUfunction; //导入依赖的package包/类
public void fill(Volume3D vol, float number){
	initCUDA();
	CUdeviceptr sizePointer = CUDAUtil.copyToDeviceMemory(vol.size);
	CUfunction function = new CUfunction();
	JCudaDriver.cuModuleGetFunction(function, module,
	"_Z4fillPfPifi");

	ArrayList<Object> arguments = new ArrayList<Object>();
	arguments.add(((CUDAVolume3D) vol).getDevicePointer());
	arguments.add(sizePointer);
	arguments.add(new Float(number));
	arguments.add(new Integer(vol.getInternalDimension()));

	// Calculate new grid size
	gridSize = getGrid(vol.size);

	if (debug) System.out.println("Calling.");
	callCUDAFunction(function, arguments);
	if (debug) System.out.println("Freeing.");

	JCuda.cudaFree(sizePointer);
	//((CUDAVolume3D) vol).fetch();
}
 
开发者ID:akmaier,项目名称:CONRAD,代码行数:24,代码来源:CUDAVolumeOperator.java


示例7: launchKernel

import jcuda.driver.CUfunction; //导入依赖的package包/类
private void launchKernel() {
    CUfunction function = new CUfunction();
    cuModuleGetFunction(function, moduleDetector, DETECTOR_KERNEL_NAME);

    Pointer kernelParams = Pointer.to(
            Pointer.to(srcPtr),
            Pointer.to(neededFeaturesPtr),
            Pointer.to(new int[]{outputSize}),
            Pointer.to(slidingWindowsPtr),
            Pointer.to(dstPtr)
    );

    int nbBlocksX;
    int nbBlocksY;
    if (slidingWindowsSize > Conf.maxBlocksByDim) {
        nbBlocksX = Conf.maxBlocksByDim;
        nbBlocksY = (int) Math.ceil( (double)slidingWindowsSize / (double)Conf.maxBlocksByDim);
    }
    else {
        nbBlocksX = slidingWindowsSize;
        nbBlocksY = 1;
    }

    cuLaunchKernel(
            function, // CUDA function to be called
            nbBlocksX, nbBlocksY, 1, // 3D (x, y, z) grid of block
            neededFeaturesSize, 1, 1, // 3D (x, y, z) grid of threads
            0, // sharedMemBytes sets the amount of dynamic shared memory that will be available to each thread block.
            null, // can optionally be associated to a stream by passing a non-zero hStream argument.
            kernelParams, // Array of params to be passed to the function
            null // extra parameters
    );
    cuCtxSynchronize();

    cuMemcpyDtoH(Pointer.to(allFeatures), dstPtr, outputSize * Sizeof.INT);
}
 
开发者ID:INVASIS,项目名称:Viola-Jones,代码行数:37,代码来源:HaarDetector.java


示例8: initialize

import jcuda.driver.CUfunction; //导入依赖的package包/类
/**
 * Initialize the driver API, the {@link #context} and the 
 * kernel {@link #function} 
 */
private static void initialize()
{
    System.out.println("Initializing...");
    
    JCudaDriver.setExceptionsEnabled(true);
    JNvrtc.setExceptionsEnabled(true);

    cuInit(0);
    CUdevice device = new CUdevice();
    cuDeviceGet(device, 0);
    context = new CUcontext();
    cuCtxCreate(context, 0, device);

    nvrtcProgram program = new nvrtcProgram();
    nvrtcCreateProgram(
        program, programSourceCode, null, 0, null, null);
    nvrtcCompileProgram(program, 0, null);
    
    String[] ptx = new String[1];
    nvrtcGetPTX(program, ptx);
    nvrtcDestroyProgram(program);

    CUmodule module = new CUmodule();
    cuModuleLoadData(module, ptx[0]);

    function = new CUfunction();
    cuModuleGetFunction(function, module, "example");
    
    System.out.println("Initializing DONE");
}
 
开发者ID:jcuda,项目名称:jcuda-samples,代码行数:35,代码来源:JCudaDriverStreamCallbacks.java


示例9: scalarSet

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void scalarSet(Matrix A, float alpha) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorScalarSet");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(new float[] {alpha}), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例10: scalarAdd

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void scalarAdd(Matrix A, float alpha, Matrix B) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorScalarAdd");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(new float[] {alpha}), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例11: log

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void log(Matrix A, Matrix B) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorLog");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例12: exp

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void exp(Matrix A, Matrix B) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorExp");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例13: sign

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void sign(Matrix A, Matrix B) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorSign");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例14: abs

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void abs(Matrix A, Matrix B) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorAbs");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例15: div

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void div(Matrix A, Matrix B, Matrix C) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorDiv");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(C.data_d), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例16: mul

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void mul(Matrix A, Matrix B, Matrix C) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorMul");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(C.data_d), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例17: max

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void max(Matrix A, Matrix B, float val) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorMax");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(new float[] {val}), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例18: min

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void min(Matrix A, Matrix B, float val) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorMin");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(new float[] {val}), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例19: pow

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void pow(Matrix A, Matrix B, float val) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorPow");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(new float[] {val}), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java


示例20: sqr

import jcuda.driver.CUfunction; //导入依赖的package包/类
private static void sqr(Matrix A, Matrix B) {
	int n = A.rows*A.cols;
	CUfunction function = new CUfunction();
	cuModuleGetFunction(function, helperModule, "vectorSqr");
	Pointer kernelParameters = Pointer.to(Pointer.to(A.data_d), Pointer.to(B.data_d), Pointer.to(new int[] {n}));
	int blockSize = Math.min(n, BLOCK_SIZE);
	int gridSizeX = (int) Math.ceil((double) n / blockSize);
	cuLaunchKernel(function,
			gridSizeX, 1, 1,      // Grid dimension
			blockSize, 1, 1,      // Block dimension
			0, null,               // Shared memory size and stream
			kernelParameters, null // Kernel- and extra parameters
			);
	if (DEBUG_SYNC) JCudaDriver.cuCtxSynchronize();
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:16,代码来源:CublasUtil.java



注:本文中的jcuda.driver.CUfunction类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java LWCToolkit类代码示例发布时间:2022-05-22
下一篇:
Java Dimensions类代码示例发布时间:2022-05-22
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap