• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cudaFreeHost函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cudaFreeHost函数的典型用法代码示例。如果您正苦于以下问题:C++ cudaFreeHost函数的具体用法?C++ cudaFreeHost怎么用?C++ cudaFreeHost使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cudaFreeHost函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: gpujpeg_coder_deinit

/** Documented at declaration */
int
gpujpeg_coder_deinit(struct gpujpeg_coder* coder)
{
    if ( coder->data_raw != NULL )
        cudaFreeHost(coder->data_raw);
    if ( coder->d_data_raw != NULL )
        cudaFree(coder->d_data_raw);
    if ( coder->d_data != NULL )
        cudaFree(coder->d_data);
    if ( coder->data_quantized != NULL )
        cudaFreeHost(coder->data_quantized);
    if ( coder->d_data_quantized != NULL )
        cudaFree(coder->d_data_quantized);
    if ( coder->data_compressed != NULL )
        cudaFreeHost(coder->data_compressed);
    if ( coder->d_data_compressed != NULL )
        cudaFree(coder->d_data_compressed);
    if ( coder->segment != NULL )
        cudaFreeHost(coder->segment);
    if ( coder->d_segment != NULL )
        cudaFree(coder->d_segment);
    if ( coder->d_temp_huffman != NULL )
        cudaFree(coder->d_temp_huffman);
    if ( coder->block_list != NULL )
        cudaFreeHost(coder->block_list);
    if ( coder->d_block_list != NULL )
        cudaFree(coder->d_block_list);
    return 0;
}
开发者ID:zzilla,项目名称:media-streamer,代码行数:30,代码来源:gpujpeg_common.c


示例2: loadOprodFromCPUArrayQuda

    static void 
      loadOprodFromCPUArrayQuda(void *cudaOprodEven, void *cudaOprodOdd, void *cpuOprod,
          size_t bytes, int Vh)
      {
        // Use pinned memory 
	float2 *packedEven, *packedOdd;
        checkCudaError();

        if (cudaMallocHost(&packedEven, bytes) == cudaErrorMemoryAllocation) {
	  errorQuda("ERROR: cudaMallocHost failed for packedEven\n");
	}
        if (cudaMallocHost(&packedOdd, bytes) == cudaErrorMemoryAllocation) {
	  errorQuda("ERROR: cudaMallocHost failed for packedEven\n");
	}
        checkCudaError();


        packOprodField(packedEven, (float*)cpuOprod, 0, Vh);
        packOprodField(packedOdd,  (float*)cpuOprod, 1, Vh);
        checkCudaError();


        cudaMemset(cudaOprodEven, 0, bytes);
        cudaMemset(cudaOprodOdd, 0, bytes);
        checkCudaError();

        cudaMemcpy(cudaOprodEven, packedEven, bytes, cudaMemcpyHostToDevice);
        checkCudaError();
        cudaMemcpy(cudaOprodOdd, packedOdd, bytes, cudaMemcpyHostToDevice);
        checkCudaError();

        cudaFreeHost(packedEven);
        cudaFreeHost(packedOdd);
      }
开发者ID:kpetrov,项目名称:quda,代码行数:34,代码来源:hisq_force_utils.cpp


示例3: copyOprodFromCPUArrayQuda

    static void
      copyOprodFromCPUArrayQuda(FullOprod cudaOprod, void *cpuOprod,
          size_t bytes_per_dir, int Vh)
      {
        // Use pinned memory 
        float2 *packedEven, *packedOdd;
        if(cudaMallocHost(&packedEven, bytes_per_dir) == cudaErrorMemoryAllocation) {
	  errorQuda("ERROR: cudaMallocHost failed for packedEven\n");
	}
        if (cudaMallocHost(&packedOdd, bytes_per_dir) == cudaErrorMemoryAllocation) {
	  errorQuda("ERROR: cudaMallocHost failed for packedOdd\n");
	}

        for(int dir=0; dir<4; dir++){
          packOprodFieldDir(packedEven, (float*)cpuOprod, dir, 0, Vh);
          packOprodFieldDir(packedOdd,  (float*)cpuOprod, dir, 1, Vh);

          cudaMemset(cudaOprod.even.data[dir], 0, bytes_per_dir);
          cudaMemset(cudaOprod.odd.data[dir],  0, bytes_per_dir);
          checkCudaError();

          cudaMemcpy(cudaOprod.even.data[dir], packedEven, bytes_per_dir, cudaMemcpyHostToDevice);
          cudaMemcpy(cudaOprod.odd.data[dir], packedOdd, bytes_per_dir, cudaMemcpyHostToDevice);
          checkCudaError();
        }
        cudaFreeHost(packedEven);
        cudaFreeHost(packedOdd);
      }
开发者ID:kpetrov,项目名称:quda,代码行数:28,代码来源:hisq_force_utils.cpp


示例4: loadParityClover

void loadParityClover(ParityClover ret, void *clover, QudaPrecision cpu_prec, 
		      CloverFieldOrder clover_order)
{
  // use pinned memory                                                                                           
  void *packedClover, *packedCloverNorm;

  if (ret.precision == QUDA_DOUBLE_PRECISION && cpu_prec != QUDA_DOUBLE_PRECISION) {
    errorQuda("Cannot have CUDA double precision without CPU double precision");
  }
  if (clover_order != QUDA_PACKED_CLOVER_ORDER) {
    errorQuda("Invalid clover_order");
  }

#ifndef __DEVICE_EMULATION__
  if (cudaMallocHost(&packedClover, ret.bytes) == cudaErrorMemoryAllocation) {
    errorQuda("Error allocating clover pinned memory");
  }  
  if (ret.precision == QUDA_HALF_PRECISION) 
    if (cudaMallocHost(&packedCloverNorm, ret.bytes/18) == cudaErrorMemoryAllocation) {
      errorQuda("Error allocating clover pinned memory");
    } 
#else
  packedClover = malloc(ret.bytes);
  if (ret.precision == QUDA_HALF_PRECISION) packedCloverNorm = malloc(ret.bytes/18);
#endif
    
  if (ret.precision == QUDA_DOUBLE_PRECISION) {
    packParityClover((double2 *)packedClover, (double *)clover, ret.volume, ret.pad);
  } else if (ret.precision == QUDA_SINGLE_PRECISION) {
    if (cpu_prec == QUDA_DOUBLE_PRECISION) {
      packParityClover((float4 *)packedClover, (double *)clover, ret.volume, ret.pad);
    } else {
      packParityClover((float4 *)packedClover, (float *)clover, ret.volume, ret.pad);
    }
  } else {
    if (cpu_prec == QUDA_DOUBLE_PRECISION) {
      packParityCloverHalf((short4 *)packedClover, (float *)packedCloverNorm, 
			   (double *)clover, ret.volume, ret.pad);
    } else {
      packParityCloverHalf((short4 *)packedClover, (float *)packedCloverNorm, 
			   (float *)clover, ret.volume, ret.pad);
    }
  }
  
  cudaMemcpy(ret.clover, packedClover, ret.bytes, cudaMemcpyHostToDevice);
  if (ret.precision == QUDA_HALF_PRECISION) {
    cudaMemcpy(ret.cloverNorm, packedCloverNorm, ret.bytes/18, cudaMemcpyHostToDevice);
  }

#ifndef __DEVICE_EMULATION__
  cudaFreeHost(packedClover);
  if (ret.precision == QUDA_HALF_PRECISION) cudaFreeHost(packedCloverNorm);
#else
  free(packedClover);
  if (ret.precision == QUDA_HALF_PRECISION) free(packedCloverNorm);
#endif

}
开发者ID:adenbley,项目名称:quda,代码行数:58,代码来源:clover_quda.cpp


示例5: free_data_arr

void free_data_arr(DataArray* data_arr) {
    cudaFreeHost(*(data_arr->data_r));
    printf("host r space freed\n");
    cudaFreeHost(*(data_arr->data_k));
    printf("host k space freed\n");
//   cudaFree(*(data_arr->data_r_dev));
//   cudaDeviceSynchronize();
//   printf("device r space freed\n");
//   cudaFree(*(data_arr->data_k_dev));
//   cudaDeviceSynchronize();
//   printf("device k space freed\n");
}
开发者ID:KKobuszewski,项目名称:CudaGPE,代码行数:12,代码来源:main.c


示例6: errorQuda

void cudaCloverField::loadFullField(void *even, void *evenNorm, void *odd, void *oddNorm, 
				    const void *h_clover, const QudaPrecision cpu_prec, 
				    const CloverFieldOrder cpu_order)
{
  // use pinned memory                  
  void *packedEven, *packedEvenNorm, *packedOdd, *packedOddNorm;

  if (precision == QUDA_DOUBLE_PRECISION && cpu_prec != QUDA_DOUBLE_PRECISION) {
    errorQuda("Cannot have CUDA double precision without CPU double precision");
  }
  if (cpu_order != QUDA_LEX_PACKED_CLOVER_ORDER) {
    errorQuda("Invalid clover order");
  }

  cudaMallocHost(&packedEven, bytes/2);
  cudaMallocHost(&packedOdd, bytes/2);
  if (precision == QUDA_HALF_PRECISION) {
    cudaMallocHost(&packedEvenNorm, norm_bytes/2);
    cudaMallocHost(&packedOddNorm, norm_bytes/2);
  }
    
  if (precision == QUDA_DOUBLE_PRECISION) {
    packFullClover((double2 *)packedEven, (double2 *)packedOdd, (double *)clover, x, pad);
  } else if (precision == QUDA_SINGLE_PRECISION) {
    if (cpu_prec == QUDA_DOUBLE_PRECISION) {
      packFullClover((float4 *)packedEven, (float4 *)packedOdd, (double *)clover, x, pad);
    } else {
      packFullClover((float4 *)packedEven, (float4 *)packedOdd, (float *)clover, x, pad);    
    }
  } else {
    if (cpu_prec == QUDA_DOUBLE_PRECISION) {
      packFullCloverHalf((short4 *)packedEven, (float *)packedEvenNorm, (short4 *)packedOdd,
			 (float *) packedOddNorm, (double *)clover, x, pad);
    } else {
      packFullCloverHalf((short4 *)packedEven, (float *)packedEvenNorm, (short4 *)packedOdd,
			 (float * )packedOddNorm, (float *)clover, x, pad);    
    }
  }

  cudaMemcpy(even, packedEven, bytes/2, cudaMemcpyHostToDevice);
  cudaMemcpy(odd, packedOdd, bytes/2, cudaMemcpyHostToDevice);
  if (precision == QUDA_HALF_PRECISION) {
    cudaMemcpy(evenNorm, packedEvenNorm, norm_bytes/2, cudaMemcpyHostToDevice);
    cudaMemcpy(oddNorm, packedOddNorm, norm_bytes/2, cudaMemcpyHostToDevice);
  }

  cudaFreeHost(packedEven);
  cudaFreeHost(packedOdd);
  if (precision == QUDA_HALF_PRECISION) {
    cudaFreeHost(packedEvenNorm);
    cudaFreeHost(packedOddNorm);
  }
}
开发者ID:fwinter,项目名称:quda,代码行数:53,代码来源:clover_field.cpp


示例7: trace_printf

void MFNHashTypePlainCUDA::freeThreadAndDeviceMemory() {
    trace_printf("MFNHashTypePlainCUDA::freeThreadAndDeviceMemory()\n");

    cudaError_t err;

    // Free all the memory, then look for errors.
    cudaFree((void *)this->DeviceHashlistAddress);
    cudaFreeHost((void *)this->HostSuccessAddress);

    delete[] this->HostSuccessReportedAddress;

    // Only cudaFree if zeroCopy is in use.
    if (!this->useZeroCopy) {
        cudaFree((void *)this->DeviceSuccessAddress);
        cudaFree((void *)this->DeviceFoundPasswordsAddress);

    }
    
    cudaFreeHost((void *)this->HostFoundPasswordsAddress);

    cudaFreeHost((void*)this->HostStartPointAddress);
    cudaFree((void *)this->DeviceStartPointAddress);
    cudaFree((void *)this->DeviceStartPasswords32Address);

    // Only free the bitmap memory if it has been allocated.
    if (this->DeviceBitmap128mb_a_Address) {
        cudaFree((void *)this->DeviceBitmap128mb_a_Address);
        this->DeviceBitmap128mb_a_Address = 0;
    }
    if (this->DeviceBitmap128mb_b_Address) {
        cudaFree((void *)this->DeviceBitmap128mb_b_Address);
        this->DeviceBitmap128mb_b_Address = 0;
    }
    if (this->DeviceBitmap128mb_c_Address) {
        cudaFree((void *)this->DeviceBitmap128mb_c_Address);
        this->DeviceBitmap128mb_c_Address = 0;
    }
    if (this->DeviceBitmap128mb_d_Address) {
        cudaFree((void *)this->DeviceBitmap128mb_d_Address);
        this->DeviceBitmap128mb_d_Address = 0;
    }

    // Get any error that occurred above and report it.
    err = cudaGetLastError();
    if (err != cudaSuccess) {
        printf("Thread %d: CUDA error freeing memory: %s. Exiting.\n",
                this->threadId, cudaGetErrorString( err));
        exit(1);
    }
}
开发者ID:locktide,项目名称:rh_project,代码行数:50,代码来源:MFNHashTypePlainCUDA.cpp


示例8: CH_CUDA_SAFE_CALL

void GRTRegenerateChains::FreePerGPUMemory(GRTRegenerateThreadRunData *data) {
    CH_CUDA_SAFE_CALL(cudaFree(this->DEVICE_Hashes[data->threadID]));

    CH_CUDA_SAFE_CALL(cudaFreeHost(this->HOST_Success[data->threadID]));
    CH_CUDA_SAFE_CALL(cudaFreeHost(this->HOST_Passwords[data->threadID]));
    // Only free the device memory if zero copy was NOT used
    if (!this->CommandLineData->GetUseZeroCopy()) {
        CH_CUDA_SAFE_CALL(cudaFree(this->DEVICE_Passwords[data->threadID]));
        CH_CUDA_SAFE_CALL(cudaFree(this->DEVICE_Success[data->threadID]));
   }

    delete[] this->HOST_Success_Reported[data->threadID];
    //printf("Memory for thread %d freed.\n", data->threadID);
}
开发者ID:Debug-Orz,项目名称:sploit-dev,代码行数:14,代码来源:GRTRegenerateChains.cpp


示例9: cudaFreeHost

OpenSteer::MemoryBackend::~MemoryBackend() {
    std::cout << "MemoryBackend reset" << std::endl;
    if (_data != 0) {
        cudaFreeHost(_data);
    }
    
    if (_const != 0) {
        cudaFreeHost(_const);
    }
    
    _data = 0;
    _const = 0;
    _instance = 0;
    _idCounter = 0;
}
开发者ID:Toanso,项目名称:OpenSteerCUDA,代码行数:15,代码来源:MemoryBackend.cpp


示例10: loadMomField

void loadMomField(Float2 *even, Float2 *odd, Float *mom, int bytes, int Vh, int pad) 
{  
  Float2 *packedEven, *packedOdd;
  cudaMallocHost(&packedEven, bytes/2); 
  cudaMallocHost(&packedOdd, bytes/2); 
    
  packMomField(packedEven, (Float*)mom, 0, Vh, pad);
  packMomField(packedOdd,  (Float*)mom, 1, Vh, pad);
    
  cudaMemcpy(even, packedEven, bytes/2, cudaMemcpyHostToDevice);
  cudaMemcpy(odd,  packedOdd, bytes/2, cudaMemcpyHostToDevice); 
  
  cudaFreeHost(packedEven);
  cudaFreeHost(packedOdd);
}
开发者ID:mchengcit,项目名称:quda,代码行数:15,代码来源:cuda_gauge_field.cpp


示例11: __startOperation

    /**
     * destructor
     */
    virtual ~MappedBufferIntern()
    {
        __startOperation(ITask::TASK_CUDA);
        __startOperation(ITask::TASK_HOST);

        if (pointer && ownPointer)
        {
#if( PMACC_CUDA_ENABLED == 1 )
/* cupla 0.1.0 does not support the function cudaHostAlloc to create mapped memory.
 * Therefore we need to call the native CUDA function cudaFreeHost to free memory.
 * Due to the renaming of cuda functions with cupla via macros we need to remove
 * the renaming to get access to the native cuda function.
 * @todo this is a workaround please fix me. We need to investigate if
 * it is possible to have mapped/unified memory in alpaka.
 *
 * corresponding alpaka issues:
 *   https://github.com/ComputationalRadiationPhysics/alpaka/issues/296
 *   https://github.com/ComputationalRadiationPhysics/alpaka/issues/612
 */
#   undef cudaFreeHost
            CUDA_CHECK((cuplaError_t)cudaFreeHost(pointer));
// re-introduce the cupla macro
#   define cudaFreeHost(...) cuplaFreeHost(__VA_ARGS__)
#else
            __deleteArray(pointer);
#endif
        }
    }
开发者ID:ALaDyn,项目名称:picongpu,代码行数:31,代码来源:MappedBufferIntern.hpp


示例12: TEST_P

TEST_P(MemcpyAsync, H2DTransfers) {
    const size_t param = GetParam();
    const size_t alloc = 1 << param;

    cudaError_t ret;
    void *d1, *h1;
    ret = cudaMalloc(&d1, alloc);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaHostAlloc(&h1, alloc, cudaHostAllocMapped);
    ASSERT_EQ(cudaSuccess, ret);

    cudaStream_t stream;
    ret = cudaStreamCreate(&stream);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaMemcpyAsync(d1, h1, alloc, cudaMemcpyHostToDevice, stream);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaStreamSynchronize(stream);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaFree(d1);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaFreeHost(h1);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaStreamDestroy(stream);
    ASSERT_EQ(cudaSuccess, ret);
}
开发者ID:ckennelly,项目名称:panoptes,代码行数:31,代码来源:vtest_memcpyasync.cpp


示例13: throw

 /**
  * destructor
  */
 virtual ~HostBufferIntern() throw (std::runtime_error)
 {
     if (pointer && ownPointer)
     {
         CUDA_CHECK(cudaFreeHost(pointer));
     }
 }
开发者ID:Heikman,项目名称:picongpu,代码行数:10,代码来源:HostBufferIntern.hpp


示例14: allocate

      void allocate(std::size_t size_,std::size_t nstreams , nt2::host_ &)
      {
        if(size_ > size)
        {
          if(size != 0)
          {
            for(std::size_t i =0; i < device.size(); ++i)
            {
              CUDA_ERROR(cudaFreeHost(host_pinned[i]));
              CUDA_ERROR(cudaFree(device[i]));
            }
          }
          ns = nstreams;
          size = size_;
          std::size_t sizeof_ = size*sizeof(T);
          host_pinned.resize(nstreams);
          device.resize(nstreams);
          for(std::size_t i =0; i < nstreams; ++i)
          {
            CUDA_ERROR(cudaMallocHost( (void**)&host_pinned[i] , sizeof_ ));
            CUDA_ERROR(cudaMalloc((void**)&device[i] , sizeof_  ));

          }
        }
      }
开发者ID:psiha,项目名称:nt2,代码行数:25,代码来源:specific_cuda.hpp


示例15: Destroy_LU

/*! \brief Destroy distributed L & U matrices. */
void
Destroy_LU(int_t n, gridinfo_t *grid, LUstruct_t *LUstruct)
{
    int_t i, nb, nsupers;
    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    LocalLU_t *Llu = LUstruct->Llu;

#if ( DEBUGlevel>=1 )
    int iam;
    MPI_Comm_rank( MPI_COMM_WORLD, &iam );
    CHECK_MALLOC(iam, "Enter Destroy_LU()");
#endif

    nsupers = Glu_persist->supno[n-1] + 1;

    nb = CEILING(nsupers, grid->npcol);
    for (i = 0; i < nb; ++i) 
	if ( Llu->Lrowind_bc_ptr[i] ) {
	    SUPERLU_FREE (Llu->Lrowind_bc_ptr[i]);
#ifdef GPU_ACC
	    checkCuda(cudaFreeHost(Llu->Lnzval_bc_ptr[i]));
#else
	    SUPERLU_FREE (Llu->Lnzval_bc_ptr[i]);
#endif
	}
    SUPERLU_FREE (Llu->Lrowind_bc_ptr);
    SUPERLU_FREE (Llu->Lnzval_bc_ptr);

    nb = CEILING(nsupers, grid->nprow);
    for (i = 0; i < nb; ++i)
	if ( Llu->Ufstnz_br_ptr[i] ) {
	    SUPERLU_FREE (Llu->Ufstnz_br_ptr[i]);
	    SUPERLU_FREE (Llu->Unzval_br_ptr[i]);
	}
    SUPERLU_FREE (Llu->Ufstnz_br_ptr);
    SUPERLU_FREE (Llu->Unzval_br_ptr);

    /* The following can be freed after factorization. */
    SUPERLU_FREE(Llu->ToRecv);
    SUPERLU_FREE(Llu->ToSendD);
    SUPERLU_FREE(Llu->ToSendR[0]);
    SUPERLU_FREE(Llu->ToSendR);

    /* The following can be freed only after iterative refinement. */
    SUPERLU_FREE(Llu->ilsum);
    SUPERLU_FREE(Llu->fmod);
    SUPERLU_FREE(Llu->fsendx_plist[0]);
    SUPERLU_FREE(Llu->fsendx_plist);
    SUPERLU_FREE(Llu->bmod);
    SUPERLU_FREE(Llu->bsendx_plist[0]);
    SUPERLU_FREE(Llu->bsendx_plist);
    SUPERLU_FREE(Llu->mod_bit);

    SUPERLU_FREE(Glu_persist->xsup);
    SUPERLU_FREE(Glu_persist->supno);

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Exit Destroy_LU()");
#endif
}
开发者ID:DBorello,项目名称:OpenSeesDev,代码行数:61,代码来源:util.c


示例16: cudaFreeHost

pinned_mem_pool::~pinned_mem_pool()
{
        if (mem_) {
                cudaFreeHost(mem_);
                mem_ = NULL;
        }
}
开发者ID:PickXu,项目名称:pantry,代码行数:7,代码来源:pinned_mem_pool.cpp


示例17: CaffeFreeHost

inline void CaffeFreeHost(void* ptr, bool use_cuda) {
#ifndef CPU_ONLY
  if (use_cuda) {
    CUDA_CHECK(cudaFreeHost(ptr));
    return;
  }
#endif

#ifdef USE_MLSL
  if (mn::is_multinode()) {
    mn::free(ptr);
  } else {
#endif /* !USE_MLSL */

#ifdef USE_MKL
    mkl_free(ptr);
#else
    free(ptr);
#endif

#ifdef USE_MLSL
  }
#endif /* USE_MLSL */

}
开发者ID:csuhawk,项目名称:caffe,代码行数:25,代码来源:syncedmem.hpp


示例18: TEST

TEST(HostAlloc, MappedPointer) {
    cudaError_t ret;
    int device;

    ret = cudaGetDevice(&device);
    ASSERT_EQ(cudaSuccess, ret);

    struct cudaDeviceProp prop;
    ret = cudaGetDeviceProperties(&prop, device);
    ASSERT_EQ(cudaSuccess, ret);

    void * ptr;
    ret = cudaHostAlloc(&ptr, 4, cudaHostAllocMapped);
    ASSERT_EQ(cudaSuccess, ret);

    /*
     * Try to retrieve the device pointer, expecting a result according to
     * prop.canMapHostMemory.
     */
    void * device_ptr;
    ret = cudaHostGetDevicePointer(&device_ptr, ptr, 0);
    if (prop.canMapHostMemory) {
        EXPECT_EQ(cudaSuccess, ret);
        EXPECT_FALSE(device_ptr == NULL);
    } else {
        EXPECT_EQ(cudaErrorMemoryAllocation, ret);
    }

    ret = cudaFreeHost(ptr);
    ASSERT_EQ(cudaSuccess, ret);
}
开发者ID:ckennelly,项目名称:panoptes,代码行数:31,代码来源:vtest_hostalloc.cpp


示例19: gpujpeg_image_destroy

/** Documented at declaration */
int
gpujpeg_image_destroy(uint8_t* image)
{
    cudaFreeHost(image);

    return 0;
}
开发者ID:zzilla,项目名称:media-streamer,代码行数:8,代码来源:gpujpeg_common.c


示例20: DumpIntegerMemoryDataSet

bool DumpIntegerMemoryDataSet(char *name, unsigned int* device_values, int nb_data ){

    //
    // ON ALLOUE LA ZONE MEMOIRE POUR RECUPERE LES DONNEES PROVENANT DU GPU
    //
    printf("(II) DumpFloatMemoryDataSet(%s, %p, %d)\n", name, device_values, nb_data);
    cudaError_t Status;
    unsigned int* host_values;
    CUDA_MALLOC_HOST(&host_values, nb_data, __FILE__, __LINE__);

    Status = cudaMemcpy(host_values, device_values, nb_data * sizeof(unsigned int), cudaMemcpyDeviceToHost);
    if(Status != cudaSuccess)
    {
    	printf("\n1 %s\n", cudaGetErrorString(Status));
    }

    PrintIntegerMatrix(name, host_values, nb_data, 8);
    // PrintIntegerMatrix(name, host_values, nb_data);
    Status = cudaFreeHost(host_values);
    if(Status != cudaSuccess)
    {
    	printf("\n1 %s\n", cudaGetErrorString(Status));
    }
    return true;
}
开发者ID:blegal,项目名称:Fast_LDPC_decoder_for_GPU_fixed,代码行数:25,代码来源:debug_fx.cpp



注:本文中的cudaFreeHost函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cudaGLSetGLDevice函数代码示例发布时间:2022-05-30
下一篇:
C++ cudaFree函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap