• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ H5Pset_chunk函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中H5Pset_chunk函数的典型用法代码示例。如果您正苦于以下问题:C++ H5Pset_chunk函数的具体用法?C++ H5Pset_chunk怎么用?C++ H5Pset_chunk使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了H5Pset_chunk函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: create_chunked_dataset

/*
 * This creates a dataset serially with 'nchunks' chunks, each of CHUNKSIZE
 * elements. The allocation time is set to H5D_ALLOC_TIME_EARLY. Another
 * routine will open this in parallel for extension test.
 */
void
create_chunked_dataset(const char *filename, int nchunks, write_type write_pattern)
{
    hid_t       file_id, dataset;                          /* handles */
    hid_t       dataspace,memspace;  
    hid_t       cparms;
    hsize_t      dims[1];
    hsize_t      maxdims[1] = {H5S_UNLIMITED};
    
    hsize_t      chunk_dims[1] ={CHUNKSIZE};
    hsize_t     count[1];
    hsize_t     stride[1];
    hsize_t     block[1];
    hsize_t     offset[1];            /* Selection offset within dataspace */
    /* Variables used in reading data back */
    char         buffer[CHUNKSIZE];
    int           i;

    herr_t       hrc;                             

    MPI_Offset  filesize,	    /* actual file size */
		est_filesize;	    /* estimated file size */

    /* set up MPI parameters */
    MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);

    /* Only MAINPROCESS should create the file.  Others just wait. */
    if (MAINPROCESS){

	dims[0]=nchunks*CHUNKSIZE;
	/* Create the data space with unlimited dimensions. */
	dataspace = H5Screate_simple (1, dims, maxdims); 
	VRFY((dataspace >= 0), "");

	memspace = H5Screate_simple(1, chunk_dims, NULL);
	VRFY((memspace >= 0), "");
 
	/* Create a new file. If file exists its contents will be overwritten. */
	file_id = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT,
		    H5P_DEFAULT);
	VRFY((file_id >= 0), "H5Fcreate");

	/* Modify dataset creation properties, i.e. enable chunking  */
	cparms = H5Pcreate (H5P_DATASET_CREATE);
	VRFY((cparms >= 0), "");

	hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY);
	VRFY((hrc >= 0), "");

	hrc = H5Pset_chunk ( cparms, 1, chunk_dims);
	VRFY((hrc >= 0), "");

	/* Create a new dataset within the file using cparms creation properties. */
	dataset = H5Dcreate (file_id, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, cparms);
	VRFY((dataset >= 0), "");

	switch (write_pattern) {

	    /* writes only the second to last chunk */
	    case sec_last:

		memset(buffer, 100, CHUNKSIZE);

		count[0] = 1;
		stride[0] = 1;
		block[0] = chunk_dims[0];
		offset[0] = (nchunks-2)*chunk_dims[0];

		hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
		    VRFY((hrc >= 0), "");

		/* Write sec_last chunk */
		hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
		VRFY((hrc >= 0), "H5Dwrite");

		break;


	    /* doesn't write anything */
	    case none:

		break;
	}

	/* Close resources */
	hrc = H5Dclose (dataset);
	VRFY((hrc >= 0), "");
	dataset = -1;

	hrc = H5Sclose (dataspace);
	VRFY((hrc >= 0), "");

	hrc = H5Sclose (memspace);
	VRFY((hrc >= 0), "");
//.........这里部分代码省略.........
开发者ID:einon,项目名称:affymetrix-power-tools,代码行数:101,代码来源:t_chunk_alloc.c


示例2: WARNING

int PHDF5fileClass::WritePHDF5dataset(string grpname, string datasetname, double ***data, int nx, int ny, int nz){

  /* -------------------------- */
  /* Local variables and arrays */
  /* -------------------------- */

  string dname;
  double *buffer;

  hid_t const h5type = H5T_NATIVE_DOUBLE;

  hid_t glob_dspace;
  hid_t locl_dspace;
  hid_t dataset_prop;
  hid_t dataset;
  hid_t dataspace;
  hid_t dataset_xfer;

  /* --------------------------------- */
  /* Check that dimensions are correct */
  /* --------------------------------- */

  if (bparticles && grpname.c_str()=="Particles"){
    cout << " WARNING(phdf5): Particle data is not going to be written as the 'bparticles' flag is currently turn to FALSE" << endl;
    return (2);
  }

  for (int i=0; i<ndim; i++){
    if (dim[i]%chdim[i]!=0){
      cout << " ERROR(phdf5): Grid size is not a multiple of the chunk size in the " << i << " dimension." << endl;
      cout << "         Glob: " << dim[0] << " " << dim[1] << " " << dim[2] << endl;
      cout << "         Locl: " << chdim[0] << " " << chdim[1] << " " << chdim[2] << endl;
      return 1;
    }
  }

  /* ----------------------- */
  /* Copy raw data to buffer */
  /* ----------------------- */

  if (nx!=chdim[0] || ny!=chdim[1] || nz!=chdim[2]){
    cout << " ERROR(phdf5): data size is not equal to HDF5 chunk size " << endl;
    return 1;
  }

  buffer = new double[nx*ny*nz];
  int l = 0;
  for (int i = 0; i < nx; i++)
    for (int j = 0; j < ny; j++)
      for (int k = 0; k < nz; k++)
        buffer[l++] = data[i][j][k];

  /* -------------------------------------------------------- */
  /* 5- Set the stride, count and block values for each chunk */
  /*    And set the offset for each chunk                     */
  /* -------------------------------------------------------- */

  hsize_t *stride = new hsize_t[ndim];
  hsize_t *count  = new hsize_t[ndim];
  hsize_t *block  = new hsize_t[ndim];
  hsize_t *offset = new hsize_t[ndim];

  for (int i=0; i<ndim; i++){
    stride[i] = 1;
    count[i]  = 1;
    block[i]  = chdim[i];
    offset[i] = mpicoord[i]*chdim[i];
  }

  /* ---------------------------------- */
  /* 6- Create data spaces for our data */
  /* ---------------------------------- */

  glob_dspace = H5Screate_simple(ndim, dim,   NULL);
  locl_dspace = H5Screate_simple(ndim, chdim, NULL);

  /* --------------------------------------- */
  /* 7- Create the dataset for the HDF5 file */
  /* --------------------------------------- */

  dataset_prop = H5Pcreate(H5P_DATASET_CREATE);

  H5Pset_chunk(dataset_prop, ndim, chdim);

  dname   = "/"+grpname+"/"+datasetname;
  dataset = H5Dcreate2(file_id, dname.c_str(), h5type, glob_dspace, H5P_DEFAULT, dataset_prop, H5P_DEFAULT);

  H5Pclose(dataset_prop);

  dataspace = H5Dget_space(dataset);
  H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);

  /* --------------------------------- */
  /* 8- Set the parallel transfer mode */
  /* --------------------------------- */

  dataset_xfer = H5Pcreate(H5P_DATASET_XFER);
  H5Pset_dxpl_mpio(dataset_xfer, H5FD_MPIO_COLLECTIVE);

  /* ---------------------------- */
//.........这里部分代码省略.........
开发者ID:FusionPlasma,项目名称:iPic3D_VR,代码行数:101,代码来源:phdf5.cpp


示例3: main

int main(int argc, char *argv[])
{
  hid_t fid           = -1;
  hid_t access_plist  = -1;
  hid_t create_plist  = -1;
  hid_t cparm         = -1;
  hid_t datatype      = -1;
  hid_t dataspace     = -1;
  hid_t dataset       = -1;
  hid_t memspace      = -1;
  hid_t groupDetector = -1;
  int rank = 1;
  hsize_t chunk[2] = {10,10};
  hsize_t dims[2] = {1,1};
  hsize_t elementSize[2] = {1,1};
  hsize_t maxdims[2] = {H5S_UNLIMITED,H5S_UNLIMITED};
  int ivalue[2];
  int fillValue = 0;
  
  /* Open the source file and dataset */
  /* All SWMR files need to use the latest file format */
  access_plist = H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fclose_degree(access_plist, H5F_CLOSE_STRONG);
#if H5_VERSION_GE(1,9,178)
  H5Pset_object_flush_cb(access_plist, cFlushCallback, NULL);
#endif
  H5Pset_libver_bounds(access_plist, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
  create_plist = H5Pcreate(H5P_FILE_CREATE);
  fid = H5Fcreate("test_string_swmr.h5", H5F_ACC_TRUNC, create_plist, access_plist);

  /* Data */
  rank = 2;
  dims[0] = 10;
  dims[1] = 10;
  dataspace = H5Screate_simple(rank, dims, dims);
  cparm = H5Pcreate(H5P_DATASET_CREATE);
  H5Pset_chunk(cparm, rank, chunk);
  datatype = H5Tcopy(H5T_NATIVE_INT8);
  H5Pset_fill_value(cparm, datatype, &fillValue);
  groupDetector = H5Gcreate(fid, "detector", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);

  access_plist = H5Pcreate(H5P_DATASET_ACCESS);
  H5Pset_chunk_cache(access_plist, 503, 100, 1.0);
  dataset = H5Dcreate2(groupDetector, "data1",
                       datatype, dataspace,
                       H5P_DEFAULT, cparm, access_plist);
  ivalue[0] = 1;
  ivalue[1] = 1;
  writeInt32Attribute(dataset, "NDArrayDimBinning", 2, ivalue);
  ivalue[0] = 0;
  ivalue[1] = 0;
  writeInt32Attribute(dataset, "NDArrayDimOffset", 2, ivalue);
  writeInt32Attribute(dataset, "NDArrayDimReverse", 2, ivalue);
  ivalue[0] = 2;
  writeInt32Attribute(dataset, "NDArrayNumDims", 1, ivalue);
  H5Gclose(groupDetector);

  dims[0] = 1;
  dims[1] = 1;
  chunk[0] = 1;
  rank = 1;

  /* Unique ID */
  datatype = H5T_NATIVE_INT32;
  cparm = H5Pcreate(H5P_DATASET_CREATE);
  H5Pset_fill_value(cparm, datatype, &fillValue);
  H5Pset_chunk(cparm, rank, chunk);
  dataspace = H5Screate_simple(rank, dims, maxdims);
  dataset = H5Dcreate2(fid, "NDArrayUniqueId",
                       datatype, dataspace,
                       H5P_DEFAULT, cparm, H5P_DEFAULT);
  memspace = H5Screate_simple(rank, elementSize, NULL);
  writeStringAttribute(dataset, "NDAttrName",        "NDArrayUniqueId");
  writeStringAttribute(dataset, "NDAttrDescription", "The unique ID of the NDArray");
  writeStringAttribute(dataset, "NDAttrSourceType",  "NDAttrSourceDriver");
  writeStringAttribute(dataset, "NDAttrSource",      "Driver");

  /* EPICS Timestemp */
  datatype = H5T_NATIVE_DOUBLE;
  cparm = H5Pcreate(H5P_DATASET_CREATE);
  H5Pset_fill_value(cparm, datatype, &fillValue);
  H5Pset_chunk(cparm, rank, chunk);
  dataspace = H5Screate_simple(rank, dims, maxdims);
  dataset = H5Dcreate2(fid, "NDArrayTimeStamp",
                       datatype, dataspace,
                       H5P_DEFAULT, cparm, H5P_DEFAULT);
  memspace = H5Screate_simple(rank, elementSize, NULL);
  writeStringAttribute(dataset, "NDAttrName",        "NDArrayTimeStamp");
  writeStringAttribute(dataset, "NDAttrDescription", "The timestamp of the NDArray as float64");
  writeStringAttribute(dataset, "NDAttrSourceType",  "NDAttrSourceDriver");
  writeStringAttribute(dataset, "NDAttrSource",      "Driver");

  /* EPICS TS sec */
  datatype = H5T_NATIVE_UINT32;
  cparm = H5Pcreate(H5P_DATASET_CREATE);
  H5Pset_fill_value(cparm, datatype, &fillValue);
  H5Pset_chunk(cparm, rank, chunk);
  dataspace = H5Screate_simple(rank, dims, maxdims);
  dataset = H5Dcreate2(fid, "NDArrayEpicsTSSec",
                       datatype, dataspace,
//.........这里部分代码省略.........
开发者ID:areaDetector,项目名称:ADCore,代码行数:101,代码来源:test_SWMR_fail.c


示例4: _io_write_prim_h5mpi

void _io_write_prim_h5mpi(const char *fname, const char **pnames, const double *data)
// -----------------------------------------------------------------------------
// This function uses a collective MPI-IO procedure to write the contents of
// 'data' to the HDF5 file named 'fname', which is assumed to have been created
// already. The dataset with name 'dname', which is being written to, must not
// exist already. Chunking is enabled as per the module-wide ChunkSize variable,
// and is disabled by default. Recommended chunk size is local subdomain
// size. This will result in optimized read/write on the same decomposition
// layout, but poor performance for different access patterns, for example the
// slabs used by cluster-FFT functions.
//
//                                   WARNING!
//
// All processors must define the same chunk size, the behavior of this function
// is not defined otherwise. This implies that chunking should be disabled when
// running on a strange number of cores, and subdomain sizes are non-uniform.
// -----------------------------------------------------------------------------
{
  hsize_t ndp1 = n_dims + 1;
  hsize_t *a_nint = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *l_ntot = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *l_strt = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *stride = (hsize_t*) malloc(ndp1*sizeof(hsize_t));

  int i;
  for (i=0; i<n_dims; ++i) {
    a_nint[i] = A_nint[i]; // Selection size, target and destination
    l_ntot[i] = L_ntot[i]; // Memory space total size
    l_strt[i] = L_strt[i]; // Memory space selection start
    stride[i] = 1;
  }
  a_nint[ndp1 - 1] = 1;
  l_ntot[ndp1 - 1] = n_prim;
  stride[ndp1 - 1] = n_prim;

  // Here we create the following property lists:
  //
  // file access property list   ........ for the call to H5Fopen
  // dset creation property list ........ for the call to H5Dcreate
  // dset transfer property list ........ for the call to H5Dwrite
  // ---------------------------------------------------------------------------
  hid_t fapl = H5Pcreate(H5P_FILE_ACCESS);
  hid_t dcpl = H5Pcreate(H5P_DATASET_CREATE);
  hid_t dxpl = H5Pcreate(H5P_DATASET_XFER);

  // Here we define collective (MPI) access to the file with alignment
  // properties optimized for the local file system, according to DiskBlockSize.
  // ---------------------------------------------------------------------------
  if (EnableChunking) {
    H5Pset_chunk(dcpl, n_dims, ChunkSize);
  }
  if (EnableAlignment) {
    H5Pset_alignment(fapl, AlignThreshold, DiskBlockSize);
  }

  H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
  H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);

  hid_t file = H5Fopen(fname, H5F_ACC_RDWR, fapl);
  const int overwrite = H5Lexists(file, "prim", H5P_DEFAULT);
  hid_t prim = overwrite ? H5Gopen(file, "prim", H5P_DEFAULT) :
    H5Gcreate(file, "prim", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
  hid_t mspc = H5Screate_simple(ndp1  , l_ntot, NULL);
  hid_t fspc = H5Screate_simple(n_dims, G_ntot, NULL);
  // Call signature to H5Sselect_hyperslab is (start, stride, count, chunk)
  // ---------------------------------------------------------------------------
  const clock_t start_all = clock();

  for (i=0; i<n_prim; ++i) {

    hid_t dset = overwrite ? H5Dopen(prim, pnames[i], H5P_DEFAULT) : 
      H5Dcreate(prim, pnames[i], H5T_NATIVE_DOUBLE, fspc,
		H5P_DEFAULT, dcpl, H5P_DEFAULT);

    l_strt[ndp1 - 1] = i;
    H5Sselect_hyperslab(mspc, H5S_SELECT_SET, l_strt, stride, a_nint, NULL);
    H5Sselect_hyperslab(fspc, H5S_SELECT_SET, G_strt,   NULL, A_nint, NULL);
    H5Dwrite(dset, H5T_NATIVE_DOUBLE, mspc, fspc, dxpl, data);
    H5Dclose(dset);
  }
  if (iolog) {
    const double sec = (double)(clock() - start_all) / CLOCKS_PER_SEC;
    fprintf(iolog, "[h5mpi] write to %s took %f minutes\n", fname, sec/60.0);
    fflush(iolog);
  }

  free(a_nint);
  free(l_ntot);
  free(l_strt);

  // Always close the hid_t handles in the reverse order they were opened in.
  // ---------------------------------------------------------------------------
  H5Sclose(fspc);
  H5Sclose(mspc);
  H5Gclose(prim);
  H5Fclose(file);
  H5Pclose(dxpl);
  H5Pclose(dcpl);
  H5Pclose(fapl);
}
开发者ID:darien0,项目名称:Mara,代码行数:100,代码来源:h5mpi.c


示例5: FTI_WriteHDF5Var

int FTI_WriteHDF5Var(FTIT_dataset *FTI_DataVar)
{
    int j;
    hsize_t dimLength[32];
    char str[FTI_BUFS];
    int res;
    hid_t dcpl;

    for (j = 0; j < FTI_DataVar->rank; j++) {
        dimLength[j] = FTI_DataVar->dimLength[j];
    }

    dcpl = H5Pcreate (H5P_DATASET_CREATE);
    res = H5Pset_fletcher32 (dcpl);
    res = H5Pset_chunk (dcpl, FTI_DataVar->rank, dimLength);

    hid_t dataspace = H5Screate_simple( FTI_DataVar->rank, dimLength, NULL);
    hid_t dataset = H5Dcreate2 ( FTI_DataVar->h5group->h5groupID, FTI_DataVar->name,FTI_DataVar->type->h5datatype, dataspace,  H5P_DEFAULT, dcpl , H5P_DEFAULT);

    // If my data are stored in the CPU side
    // Just store the data to the file and return;
#ifdef GPUSUPPORT    
    if ( !FTI_DataVar->isDevicePtr ){
#endif
        res = H5Dwrite(dataset,FTI_DataVar->type->h5datatype, H5S_ALL, H5S_ALL, H5P_DEFAULT, FTI_DataVar->ptr);  
        if (res < 0) {
            sprintf(str, "Dataset #%d could not be written", FTI_DataVar->id);
            FTI_Print(str, FTI_EROR);
            return FTI_NSCS;
        }

        res = H5Pclose (dcpl);
        if (res < 0) {
            sprintf(str, "Dataset #%d could not be written", FTI_DataVar->id);
            FTI_Print(str, FTI_EROR);
            return FTI_NSCS;
        }

        res = H5Dclose(dataset);
        if (res < 0) {
            sprintf(str, "Dataset #%d could not be written", FTI_DataVar->id);
            FTI_Print(str, FTI_EROR);
            return FTI_NSCS;
        }
        res = H5Sclose(dataspace);
        if (res < 0) {
            sprintf(str, "Dataset #%d could not be written", FTI_DataVar->id);
            FTI_Print(str, FTI_EROR);
            return FTI_NSCS;
        }
        return FTI_SCES;
#ifdef GPUSUPPORT        
    }

    // This code is only executed in the GPU case.

    hsize_t *count = (hsize_t*) malloc (sizeof(hsize_t)*FTI_DataVar->rank); 
    hsize_t *offset= (hsize_t*) calloc (FTI_DataVar->rank,sizeof(hsize_t)); 

    if ( !count|| !offset){
        sprintf(str, "Could Not allocate count and offset regions");
        FTI_Print(str, FTI_EROR);
        return FTI_NSCS;
    }


    hsize_t seperator;
    hsize_t fetchBytes = FTI_getHostBuffSize();
    fetchBytes = FTI_calculateCountDim(FTI_DataVar->eleSize, fetchBytes ,count, FTI_DataVar->rank, dimLength, &seperator);
    sprintf(str,"GPU-Device Message: I Will Fetch %lld Bytes Per Stream Request", fetchBytes);
    FTI_Print(str,FTI_DBUG);


    FTIT_data_prefetch prefetcher;
    prefetcher.fetchSize = fetchBytes;
    prefetcher.totalBytesToFetch = FTI_DataVar->size;
    prefetcher.isDevice = FTI_DataVar->isDevicePtr;
    prefetcher.dptr = FTI_DataVar->devicePtr;
    size_t bytesToWrite;
    FTI_InitPrefetcher(&prefetcher);
    unsigned char *basePtr = NULL;


    if ( FTI_Try(FTI_getPrefetchedData(&prefetcher, &bytesToWrite, &basePtr), "Fetch next memory block from GPU to write to HDF5") !=  FTI_SCES){
        return FTI_NSCS;
    }

    while( basePtr  ){
        res = FTI_WriteElements( dataspace, FTI_DataVar->type->h5datatype, dataset, count, offset, FTI_DataVar->rank , basePtr);
        if (res != FTI_SCES ) {
            free(offset);
            free(count);
            sprintf(str, "Dataset #%d could not be written", FTI_DataVar->id);
            FTI_Print(str, FTI_EROR);
            return FTI_NSCS;
        }
        FTI_AdvanceOffset(seperator, offset,count, dimLength, FTI_DataVar->rank);

        if ( FTI_Try(FTI_getPrefetchedData(&prefetcher, &bytesToWrite, &basePtr), 
                    "Fetch next memory block from GPU to write to HDF5") !=  FTI_SCES){
//.........这里部分代码省略.........
开发者ID:leobago,项目名称:fti,代码行数:101,代码来源:hdf5.c


示例6: create_deflate_dsets_float

/*-------------------------------------------------------------------------
 * Function:    create_deflate_dsets_float
 *
 * Purpose:     Create a dataset of FLOAT datatype with deflate filter
 *
 * Return:      Success:        0
 *              Failure:        -1
 *
 * Programmer:  Raymond Lu
 *              29 March 2011
 *
 * Modifications:
 *
 *-------------------------------------------------------------------------
 */
int
create_deflate_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
{
#ifdef H5_HAVE_FILTER_DEFLATE
    hid_t       dataset         = -1;   /* dataset handles */
    hid_t       dcpl            = -1;
    float       data[NX][NY];           /* data to write */
    float       fillvalue = -2.2f;
    hsize_t     chunk[RANK] = {CHUNK0, CHUNK1};
    int         i, j;

    /*
     * Data and output buffer initialization.
     */
    for (j = 0; j < NX; j++) {
        for (i = 0; i < NY; i++)
            data[j][i] = ((float)(i + j + 1))/3;
    }

    /*
     * Create the dataset creation property list, add the Scale-Offset
     * filter, set the chunk size, and set the fill value.
     */
    if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
        TEST_ERROR
    if(H5Pset_deflate (dcpl, 6) < 0)
        TEST_ERROR
    if(H5Pset_chunk(dcpl, RANK, chunk) < 0)
        TEST_ERROR
    if(H5Pset_fill_value(dcpl, H5T_NATIVE_FLOAT, &fillvalue) < 0)
        TEST_ERROR

    /*
     * Create a new dataset within the file using defined dataspace, little
     * endian datatype and default dataset creation properties.
     */
    if((dataset = H5Dcreate2(fid, DATASETNAME16, H5T_IEEE_F32LE, fsid,
            H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
        TEST_ERROR

    /*
     * Write the data to the dataset using default transfer properties.
     */
    if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0)
        TEST_ERROR

    /* Close dataset */
    if(H5Dclose(dataset) < 0)
        TEST_ERROR

    /* Now create a dataset with a big-endian type */
    if((dataset = H5Dcreate2(fid, DATASETNAME17, H5T_IEEE_F32BE, fsid,
            H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
        TEST_ERROR
    if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0)
        TEST_ERROR
    if(H5Dclose(dataset) < 0)
        TEST_ERROR

    /*
     * Close/release resources.
     */
    if(H5Pclose(dcpl) < 0)
        TEST_ERROR

#else /* H5_HAVE_FILTER_DEFLATE */
    const char          *not_supported= "Deflate filter is not enabled. Can't create the dataset.";

    puts(not_supported);
#endif /* H5_HAVE_FILTER_DEFLATE */

    return 0;

#ifdef H5_HAVE_FILTER_DEFLATE
error:
    H5E_BEGIN_TRY {
        H5Pclose(dcpl);
        H5Dclose(dataset);
    } H5E_END_TRY;

    return -1;
#endif /* H5_HAVE_FILTER_DEFLATE */
}
开发者ID:Starlink,项目名称:hdf5,代码行数:98,代码来源:gen_cross.c


示例7: main


//.........这里部分代码省略.........
	 int ncid, grpid, nvars, ngatts, ndims, unlimdimid, ngrps;
	 char name_in[NC_MAX_NAME + 1];
	 nc_type xtype_in;
	 int ndims_in, natts_in, dimid_in[NDIMS];

/*	 nc_set_log_level(5);*/
	 if (nc_open(FILE_NAME, NC_NOWRITE, &ncid)) ERR;
	 if (nc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid)) ERR;
	 if (ndims != 2 || nvars != 0 || ngatts != 0 || unlimdimid != -1) ERR;
	 if (nc_inq_grps(ncid, &ngrps, &grpid)) ERR;
	 if (ngrps != 1) ERR;
	 if (nc_inq(grpid, &ndims, &nvars, &ngatts, &unlimdimid)) ERR;
	 if (ndims != 0 || nvars != 1 || ngatts != 0 || unlimdimid != -1) ERR;

	 if (nc_inq_var(grpid, 0, name_in, &xtype_in, &ndims_in, dimid_in,
			&natts_in)) ERR;
	 if (strcmp(name_in, VAR_NAME) || xtype_in != NC_SHORT || ndims_in != NDIMS ||
	     dimid_in[0] != 0 || dimid_in[1] != 1 || natts_in != 0) ERR;

	 if (nc_close(ncid)) ERR;
      }
   }
   SUMMARIZE_ERR;
#ifdef USE_SZIP
   printf("*** testing HDF5 compatibility with szip...");
   {

#define DEFLATE_LEVEL 9
#define MAX_NAME 100
#define NUM_CD_ELEM 10
/* HDF5 defines this... */
#define DEFLATE_NAME "deflate"
#define DIM1_LEN 3000
#define GRP_NAME "George_Washington"
#define BATTLE_RECORD "Battle_Record"

      hid_t fileid, grpid, spaceid, datasetid;
      int data_out[DIM1_LEN], data_in[DIM1_LEN];
      hsize_t dims[1] = {DIM1_LEN};
      hid_t propid;
      char name_in[MAX_NAME + 1];
      int ncid, ndims_in, nvars_in, ngatts_in, unlimdimid_in, ngrps_in;
      int nc_grpid;
      int dimid_in[1], natts_in;

      nc_type xtype_in;
      int i;

      for (i = 0; i < DIM1_LEN; i++)
	 data_out[i] = i;

      /* Open file and create group. */
      if ((fileid = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT,
			      H5P_DEFAULT)) < 0) ERR;
      if ((grpid = H5Gcreate(fileid, GRP_NAME, 0)) < 0) ERR;

      /* Write an array of bools, with szip compression. */
      if ((propid = H5Pcreate(H5P_DATASET_CREATE)) < 0) ERR;
      if (H5Pset_layout(propid, H5D_CHUNKED)) ERR;
      if (H5Pset_chunk(propid, 1, dims)) ERR;
      if (H5Pset_szip(propid, H5_SZIP_EC_OPTION_MASK, 32)) ERR;
      if ((spaceid = H5Screate_simple(1, dims, dims)) < 0) ERR;
      if ((datasetid = H5Dcreate(grpid, BATTLE_RECORD, H5T_NATIVE_INT,
				 spaceid, propid)) < 0) ERR;
      if (H5Dwrite(datasetid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
		   data_out) < 0) ERR;
      if (H5Dclose(datasetid) < 0 ||
	  H5Pclose(propid) < 0 ||
	  H5Sclose(spaceid) < 0 ||
	  H5Gclose(grpid) < 0 ||
	  H5Fclose(fileid) < 0)
	 ERR;

      /* Open the file with netCDF and check it. */
      if (nc_open(FILE_NAME, NC_NOWRITE, &ncid)) ERR;
      if (nc_inq(ncid, &ndims_in, &nvars_in, &ngatts_in, &unlimdimid_in)) ERR;
      if (ndims_in != 0 || nvars_in != 0 || ngatts_in != 0 || unlimdimid_in != -1) ERR;
      if (nc_inq_grps(ncid, &ngrps_in, &nc_grpid)) ERR;
      if (ngrps_in != 1) ERR;
      if (nc_inq(nc_grpid, &ndims_in, &nvars_in, &ngatts_in, &unlimdimid_in)) ERR;
      if (ndims_in != 1 || nvars_in != 1 || ngatts_in != 0 || unlimdimid_in != -1) ERR;

      /* Check the variable. */
      if (nc_inq_var(nc_grpid, 0, name_in, &xtype_in, &ndims_in, dimid_in,
      		     &natts_in)) ERR;
      if (strcmp(name_in, BATTLE_RECORD) || xtype_in != NC_INT || ndims_in != 1 ||
      	  dimid_in[0] != 0 || natts_in != 0) ERR;

      /* Check the data. */
      if (nc_get_var(nc_grpid, 0, data_in)) ERR;
      for (i = 0; i < DIM1_LEN; i++)
	 if (data_in[i] != data_out[i]) ERR;

      if (nc_close(ncid)) ERR;

   }
   SUMMARIZE_ERR;
#endif /* USE_SZIP */
   FINAL_RESULTS;
}
开发者ID:Unidata,项目名称:netcdf-c,代码行数:101,代码来源:tst_interops5.c


示例8: fname

AccessTraceWriter::AccessTraceWriter(g_string _fname, uint32_t numChildren) : fname(_fname) {
    // Create record structure
    hid_t accType = H5Tenum_create(H5T_NATIVE_USHORT);
    uint16_t val;
    H5Tenum_insert(accType, "GETS", (val=GETS,&val));
    H5Tenum_insert(accType, "GETX", (val=GETX,&val));
    H5Tenum_insert(accType, "PUTS", (val=PUTS,&val));
    H5Tenum_insert(accType, "PUTX", (val=PUTX,&val));

    size_t offset = 0;
    size_t size = H5Tget_size(H5T_NATIVE_ULONG)*2 + H5Tget_size(H5T_NATIVE_UINT) + H5Tget_size(H5T_NATIVE_USHORT) + H5Tget_size(accType);
    hid_t recType = H5Tcreate(H5T_COMPOUND, size);
    auto insertType = [&](const char* name, hid_t type) {
        H5Tinsert(recType, name, offset, type);
        offset += H5Tget_size(type);
    };

    insertType("lineAddr", H5T_NATIVE_ULONG);
    insertType("cycle", H5T_NATIVE_ULONG);
    insertType("lat", H5T_NATIVE_UINT);
    insertType("childId", H5T_NATIVE_USHORT);
    insertType("accType", accType);

    hid_t fid = H5Fcreate(fname.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
    if (fid == H5I_INVALID_HID) panic("Could not create HDF5 file %s", fname.c_str());
    
    // HACK: We want to use the SHUF filter... create the raw dataset instead of the packet table
    // hid_t table = H5PTcreate_fl(fid, "accs", recType, PT_CHUNKSIZE, 9);
    // if (table == H5I_INVALID_HID) panic("Could not create HDF5 packet table");
    hsize_t dims[1] = {0};
    hsize_t dims_chunk[1] = {PT_CHUNKSIZE};
    hsize_t maxdims[1] = {H5S_UNLIMITED};
    hid_t space_id = H5Screate_simple(1, dims, maxdims);

    hid_t plist_id = H5Pcreate(H5P_DATASET_CREATE);
    H5Pset_chunk(plist_id, 1, dims_chunk);
    H5Pset_shuffle(plist_id);
    H5Pset_deflate(plist_id, 9);

    hid_t table = H5Dcreate2(fid, "accs", recType, space_id, H5P_DEFAULT, plist_id, H5P_DEFAULT);
    if (table == H5I_INVALID_HID) panic("Could not create HDF5 dataset");
    H5Dclose(table);

    // info("%ld %ld %ld %ld", sizeof(PackedAccessRecord), size, offset, H5Tget_size(recType));
    assert(offset == size);
    assert(size == sizeof(PackedAccessRecord));

    hid_t ncAttr = H5Acreate2(fid, "numChildren", H5T_NATIVE_UINT, H5Screate(H5S_SCALAR), H5P_DEFAULT, H5P_DEFAULT);
    H5Awrite(ncAttr, H5T_NATIVE_UINT, &numChildren);
    H5Aclose(ncAttr);

    hid_t fAttr = H5Acreate2(fid, "finished", H5T_NATIVE_UINT, H5Screate(H5S_SCALAR), H5P_DEFAULT, H5P_DEFAULT);
    uint32_t finished = 0;
    H5Awrite(fAttr, H5T_NATIVE_UINT, &finished);
    H5Aclose(fAttr);

    H5Fclose(fid);

    // Initialize buffer
    buf = gm_calloc<PackedAccessRecord>(PT_CHUNKSIZE);
    cur = 0;
    max = PT_CHUNKSIZE;
    assert((uint32_t)(((char*) &buf[1]) - ((char*) &buf[0])) == sizeof(PackedAccessRecord));
}
开发者ID:Luffy0011,项目名称:zsim,代码行数:64,代码来源:access_tracing.cpp


示例9: PYTABLE_make_array

/*+++++++++++++++++++++++++
.IDENTifer   PYTABLE_make_array
.PURPOSE     create extensible HDF5 dataset
.INPUT/OUTPUT
  call as    stat = PYTABLE_make_array( locID, dset_name, title, rank, dims,
			                extdim, typeID, dims_chunk, fill_data,
			                compress, shuffle, fletcher32, buff );
     input:
            hid_t locID           :  HDF5 identifier of file or group
	    char *dset_name       :  name of dataset
	    char *title           :
	    int rank              :  number of dimensions
	    hsize_t *dims         :  size of each dimension
	    int extdim            :  index of expendable dimension
	    hid_t typeID          :  data type (HDF5 identifier)
	    hsize_t *dims_chunk   :  chunk sizes
	    void *fill_data       :  Fill value for data
	    unsigned int compress :  compression level (zero for no compression)
	    bool shuffle          :  shuffel data for better compression
	    bool fletcher32       :  
	    void *buffer          :  buffer with data to write (or NULL)
	    
.RETURNS     A negative value is returned on failure. 
.COMMENTS    none
-------------------------*/
herr_t PYTABLE_make_array( hid_t locID, const char *dset_name, 
			   const char *title, const int rank, 
			   const hsize_t *dims, int extdim, hid_t typeID,
			   const hsize_t *dims_chunk, void *fill_data,
			   unsigned int compress, bool shuffle, 
			   bool fletcher32, const void *buffer )
{
     register int ni;

     hid_t   dataID = -1, spaceID = -1;
     herr_t  stat;

/* check if the array has to be chunked or not */
     if ( dims_chunk != NULL ) {
	  hid_t   plistID;

	  hsize_t *maxdims = (hsize_t *) malloc( rank * sizeof(hsize_t) );
	  if ( maxdims == NULL )
	       NADC_GOTO_ERROR( NADC_ERR_ALLOC, "maxdims" );

	  for ( ni = 0; ni < rank; ni++ ) {
	       if ( ni == extdim )
		    maxdims[ni] = H5S_UNLIMITED;
	       else
		    maxdims[ni] = 
			 dims[ni] < dims_chunk[ni] ? dims_chunk[ni] : dims[ni];
	  }
	  spaceID = H5Screate_simple( rank, dims, maxdims );
	  free( maxdims );
	  if ( spaceID < 0 ) NADC_GOTO_ERROR( NADC_ERR_HDF_SPACE, "" );

	  /* Modify dataset creation properties, i.e. enable chunking  */
	  plistID = H5Pcreate( H5P_DATASET_CREATE );
	  if ( H5Pset_chunk( plistID, rank, dims_chunk ) < 0 ) goto done;

          /* set the fill value using a struct as the data type */
	  if ( fill_data != NULL 
	       && H5Pset_fill_value( plistID, typeID, fill_data ) < 0 )
	       goto done;

          /* dataset creation property list is modified to use */
          /* fletcher must be first */
	  if ( fletcher32 ) {
	       if ( H5Pset_fletcher32( plistID ) < 0 ) goto done;
	  }
          /* then shuffle */
	  if ( shuffle ) {
	       if ( H5Pset_shuffle( plistID ) < 0 ) goto done;
	  }
          /* finally compression */
	  if ( compress > 0 ) {
	       if ( H5Pset_deflate( plistID, compress ) < 0 ) goto done;
	  }
          /* create the (chunked) dataset */
	  dataID = H5Dcreate( locID, dset_name, typeID, spaceID, 
			      H5P_DEFAULT, plistID, H5P_DEFAULT );
	  if ( dataID < 0 ) 
	       NADC_GOTO_ERROR( NADC_ERR_HDF_DATA, dset_name );

          /* end access to the property list */
	  if ( H5Pclose( plistID ) < 0 ) goto done;
     } else {
	  spaceID = H5Screate_simple( rank, dims, NULL );
	  if ( spaceID < 0 ) return -1;

          /* create the dataset (not chunked) */
	  dataID = H5Dcreate( locID, dset_name, typeID, spaceID, 
			      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT );
	  if ( dataID < 0 ) 
	       NADC_GOTO_ERROR( NADC_ERR_HDF_DATA, dset_name );
     }
/*
 * write the data
 */
     stat = H5Dwrite( dataID, typeID, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer );
//.........这里部分代码省略.........
开发者ID:rmvanhees,项目名称:nadc_tools,代码行数:101,代码来源:nadc_pytable_api.c


示例10: H5CreateOrOpenGroup

void RegionalSummary::writeH5(hid_t &file_id, string group_name) {
  herr_t status;

  hid_t group_id = H5CreateOrOpenGroup(file_id, group_name);
  hid_t dataset_id;
  hid_t dataspace_id;

  hsize_t  h5_dims[1];

  // region_origin
  vector<unsigned int> coord_buf(2,0);
  coord_buf[0] = origin_.first;
  coord_buf[1] = origin_.second;
  h5_dims[0] = 2;
  dataspace_id = H5Screate_simple (1, h5_dims, NULL);
  dataset_id = H5Dcreate2 (group_id, "region_origin", H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); 
  status = H5Dwrite (dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &coord_buf[0]);
  status = H5Dclose (dataset_id);
  status = H5Sclose (dataspace_id);

  // region_dim
  coord_buf[0] = dim_.first;
  coord_buf[1] = dim_.second;
  h5_dims[0] = 2;
  dataspace_id = H5Screate_simple (1, h5_dims, NULL);
  dataset_id = H5Dcreate2 (group_id, "region_dim", H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); 
  status = H5Dwrite (dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &coord_buf[0]);
  status = H5Dclose (dataset_id);
  status = H5Sclose (dataspace_id);

  // n_err_
  h5_dims[0] = 1;
  dataspace_id = H5Screate_simple (1, h5_dims, NULL);
  dataset_id = H5Dcreate2 (group_id, "n_err", H5T_NATIVE_UINT_LEAST64, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); 
  status = H5Dwrite (dataset_id, H5T_NATIVE_UINT_LEAST64, H5S_ALL, H5S_ALL, H5P_DEFAULT, &n_err_);
  status = H5Dclose (dataset_id);
  status = H5Sclose (dataspace_id);

  // n_aligned_
  h5_dims[0] = 1;
  dataspace_id = H5Screate_simple (1, h5_dims, NULL);
  dataset_id = H5Dcreate2 (group_id, "n_aligned", H5T_NATIVE_UINT_LEAST64, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); 
  status = H5Dwrite (dataset_id, H5T_NATIVE_UINT_LEAST64, H5S_ALL, H5S_ALL, H5P_DEFAULT, &n_aligned_);
  status = H5Dclose (dataset_id);
  status = H5Sclose (dataspace_id);

  // data_dim
  AssertDims();
  coord_buf[0] = n_flow_;
  coord_buf[1] = max_hp_;
  h5_dims[0] = 2;
  dataspace_id = H5Screate_simple (1, h5_dims, NULL);
  dataset_id = H5Dcreate2 (group_id, "data_dim", H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); 
  status = H5Dwrite (dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &coord_buf[0]);
  status = H5Dclose (dataset_id);
  status = H5Sclose (dataspace_id);

  // hp_count_ and hp_err_
  hsize_t  dims[2];
  dims[0] = n_flow_;
  dims[1] = max_hp_;
  vector<uint64_t> data_buf;
  hsize_t  cdims[2];
  hid_t plist_id;

  // hp_count_
  LoadDataBuffer(max_hp_,n_flow_,data_buf,hp_count_);
  dataspace_id = H5Screate_simple (2, dims, NULL);
  plist_id  = H5Pcreate (H5P_DATASET_CREATE);
  cdims[0] = min(n_flow_,(unsigned int) 20);
  cdims[1] = min(max_hp_,(unsigned int) 20);
  status = H5Pset_chunk (plist_id, 2, cdims);
  status = H5Pset_deflate (plist_id, 9); 
  dataset_id = H5Dcreate2 (group_id, "hp_count", H5T_NATIVE_UINT_LEAST64, dataspace_id, H5P_DEFAULT, plist_id, H5P_DEFAULT); 
  status = H5Dwrite (dataset_id, H5T_NATIVE_UINT_LEAST64, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data_buf[0]);
  status = H5Dclose (dataset_id);
  status = H5Pclose (plist_id);
  status = H5Sclose (dataspace_id);

  // hp_err_
  LoadDataBuffer(max_hp_,n_flow_,data_buf,hp_err_);
  dataspace_id = H5Screate_simple (2, dims, NULL);
  plist_id  = H5Pcreate (H5P_DATASET_CREATE);
  cdims[0] = min(n_flow_,(unsigned int) 20);
  cdims[1] = min(max_hp_,(unsigned int) 20);
  status = H5Pset_chunk (plist_id, 2, cdims);
  status = H5Pset_deflate (plist_id, 9); 
  dataset_id = H5Dcreate2 (group_id, "hp_err", H5T_NATIVE_UINT_LEAST64, dataspace_id, H5P_DEFAULT, plist_id, H5P_DEFAULT); 
  status = H5Dwrite (dataset_id, H5T_NATIVE_UINT_LEAST64, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data_buf[0]);
  status = H5Dclose (dataset_id);
  status = H5Pclose (plist_id);
  status = H5Sclose (dataspace_id);

  status = H5Gclose (group_id);
}
开发者ID:Brainiarc7,项目名称:TS,代码行数:95,代码来源:ionstats_data.cpp


示例11: H5Eset_auto2


//.........这里部分代码省略.........
  for (int n = 0; n != nuc_size; n++) {
    if (0 < comp.count(nuclides[n]))
      (*mat_data).comp[n] = comp[nuclides[n]];
    else
      (*mat_data).comp[n] = 0.0;
  };

  // get / make the data set
  bool datapath_exists = h5wrap::path_exists(db, datapath);
  if (datapath_exists) {
    data_set = H5Dopen2(db, datapath.c_str(), H5P_DEFAULT);
    data_space = H5Dget_space(data_set);
    data_rank = H5Sget_simple_extent_dims(data_space, data_dims, data_max_dims);

    // Determine the row size.
    if (std::signbit(row))
      row_num = data_dims[0] + row;  // careful, row is negative

    if (data_dims[0] <= row_num) {
      // row == -0, extend to data set so that we can append, or
      // row_num is larger than current dimension, resize to accomodate.
      data_dims[0] = row_num + 1;
      H5Dset_extent(data_set, data_dims);
    }

    data_offset[0] = row_num;
  } else {
    // Get full space
    data_space = H5Screate_simple(1, data_dims, data_max_dims);

    // Make data set properties to enable chunking
    hid_t data_set_params = H5Pcreate(H5P_DATASET_CREATE);
    hsize_t chunk_dims[1] ={chunksize}; 
    H5Pset_chunk(data_set_params, 1, chunk_dims);
    H5Pset_deflate(data_set_params, 1);

    material_struct * data_fill_value  = new material_struct[material_struct_size];
    (*data_fill_value).mass = -1.0;
    (*data_fill_value).density= -1.0;
    (*data_fill_value).atoms_per_mol = -1.0;
    for (int n = 0; n != nuc_size; n++)
      (*data_fill_value).comp[n] = 0.0;
    H5Pset_fill_value(data_set_params, desc, &data_fill_value);

    // Create the data set
    data_set = H5Dcreate2(db, datapath.c_str(), desc, data_space, H5P_DEFAULT, 
                            data_set_params, H5P_DEFAULT);
    H5Dset_extent(data_set, data_dims);

    // Add attribute pointing to nuc path
    hid_t nuc_attr_type = H5Tcopy(H5T_C_S1);
    H5Tset_size(nuc_attr_type, nucpath.length());
    hid_t nuc_attr_space = H5Screate(H5S_SCALAR);
    hid_t nuc_attr = H5Acreate2(data_set, "nucpath", nuc_attr_type, nuc_attr_space, 
                                H5P_DEFAULT, H5P_DEFAULT);
    H5Awrite(nuc_attr, nuc_attr_type, nucpath.c_str());
    H5Fflush(db, H5F_SCOPE_GLOBAL);

    // Remember to de-allocate
    delete[] data_fill_value;
  };

  // Get the data hyperslab
  data_hyperslab = H5Dget_space(data_set);
  hsize_t data_count[1] = {1};
  H5Sselect_hyperslab(data_hyperslab, H5S_SELECT_SET, data_offset, NULL, data_count, NULL);
开发者ID:crbates,项目名称:pyne,代码行数:67,代码来源:material.cpp


示例12: main

int
main(void)
{
    hid_t faplid        = -1;   /* file access property list ID (all files) */

    hid_t src_sid       = -1;   /* source dataset's dataspace ID            */
    hid_t src_dcplid    = -1;   /* source dataset property list ID          */

    hid_t vds_sid       = -1;   /* VDS dataspace ID                         */
    hid_t vds_dcplid    = -1;   /* VDS dataset property list ID             */

    hid_t fid           = -1;   /* HDF5 file ID                             */
    hid_t did           = -1;   /* dataset ID                               */

    hsize_t start[RANK];        /* starting point for hyperslab             */
    int map_start       = -1;   /* starting point in the VDS map            */

    int i;                      /* iterator                         */


    /* Start by creating the virtual dataset (VDS) dataspace and creation
     * property list. The individual source datasets are then created
     * and the VDS map (stored in the VDS property list) is updated.
     */

    /* Create VDS dcpl */
    if((vds_dcplid = H5Pcreate(H5P_DATASET_CREATE)) < 0)
        TEST_ERROR
    if(H5Pset_fill_value(vds_dcplid, VDS_DATATYPE,
                &VDS_FILL_VALUE) < 0)
        TEST_ERROR

    /* Create VDS dataspace */
    if((vds_sid = H5Screate_simple(RANK, VDS_DIMS,
                    VDS_MAX_DIMS)) < 0)
        TEST_ERROR

    /************************************
     * Create source files and datasets *
     ************************************/

    start[0] = 0;
    start[1] = 0;
    start[2] = 0;
    map_start = 0;

    /* All SWMR files need to use the latest file format */
    if((faplid = H5Pcreate(H5P_FILE_ACCESS)) < 0)
        TEST_ERROR
    if(H5Pset_libver_bounds(faplid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
        TEST_ERROR

    for(i = 0; i < N_SOURCES; i++) {

        /* source dataset dcpl */
        if((src_dcplid = H5Pcreate(H5P_DATASET_CREATE)) < 0)
            TEST_ERROR
        if(H5Pset_chunk(src_dcplid, RANK, PLANES[i]) < 0)
            TEST_ERROR
        if(H5Pset_fill_value(src_dcplid, SOURCE_DATATYPE,
                    &FILL_VALUES[i]) < 0)
            TEST_ERROR

        /* Use a mix of compressed and uncompressed datasets */
        if(0 != i % 2)
            if(H5Pset_deflate(src_dcplid, COMPRESSION_LEVEL) < 0)
                TEST_ERROR

        /* Create source file, dataspace, and dataset */
        if((fid = H5Fcreate(FILE_NAMES[i], H5F_ACC_TRUNC,
                        H5P_DEFAULT, faplid)) < 0)
            TEST_ERROR
        if((src_sid = H5Screate_simple(RANK, DIMS[i],
                        MAX_DIMS[i])) < 0)
            TEST_ERROR
        if((did = H5Dcreate2(fid, SOURCE_DSET_NAME,
                        SOURCE_DATATYPE, src_sid,
                        H5P_DEFAULT, src_dcplid, H5P_DEFAULT)) < 0)
            TEST_ERROR

        /* set up hyperslabs for source and destination datasets */
        start[1] = 0;
        if(H5Sselect_hyperslab(src_sid, H5S_SELECT_SET, start, NULL,
                    MAX_DIMS[i], NULL) < 0)
            TEST_ERROR
        start[1] = map_start;
        if(H5Sselect_hyperslab(vds_sid, H5S_SELECT_SET, start, NULL,
                    MAX_DIMS[i], NULL) < 0)
            TEST_ERROR
        map_start += PLANES[i][1];

        /* Add VDS mapping */
        if(H5Pset_virtual(vds_dcplid, vds_sid, FILE_NAMES[i],
                    SOURCE_DSET_PATH, src_sid) < 0)
            TEST_ERROR

        /* close */
        if(H5Sclose(src_sid) < 0)
            TEST_ERROR
        if(H5Pclose(src_dcplid) < 0)
//.........这里部分代码省略.........
开发者ID:Starlink,项目名称:hdf5,代码行数:101,代码来源:vds_swmr_gen.c


示例13: create_nbit_dsets_float

/*-------------------------------------------------------------------------
 * Function:    create_nbit_dsets_float
 *
 * Purpose:     Create a dataset of FLOAT datatype with nbit filter
 *
 * Return:      Success:        0
 *              Failure:        -1
 *
 * Programmer:  Raymond Lu
 *              29 March 2011
 *
 * Modifications:
 *
 *-------------------------------------------------------------------------
 */
int
create_nbit_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
{
    hid_t       dataset         = -1;   /* dataset handles */
    hid_t       datatype        = -1;
    hid_t       dcpl            = -1;
    size_t      precision, offset;
    float       data[NX][NY];           /* data to write */
    float       fillvalue = -2.2f;
    hsize_t     chunk[RANK] = {CHUNK0, CHUNK1};
    int         i, j;

    /*
     * Data and output buffer initialization.
     */
    for (j = 0; j < NX; j++) {
        for (i = 0; i < NY; i++)
            data[j][i] = ((float)(i + j + 1))/3;
    }

    /*
     * Create the dataset creation property list, add the Scale-Offset
     * filter, set the chunk size, and set the fill value.
     */
    if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
        TEST_ERROR
    if(H5Pset_nbit(dcpl) < 0)
        TEST_ERROR
    if(H5Pset_chunk(dcpl, RANK, chunk) < 0)
        TEST_ERROR
    if(H5Pset_fill_value(dcpl, H5T_NATIVE_FLOAT, &fillvalue) < 0)
        TEST_ERROR

    /* Define user-defined single-precision floating-point type for dataset.
     * A 20-bit little-endian data type. */
    if((datatype = H5Tcopy(H5T_IEEE_F32LE)) < 0)
        TEST_ERROR
    if(H5Tset_fields(datatype, (size_t)26, (size_t)20, (size_t)6, (size_t)7, (size_t)13) < 0)
        TEST_ERROR
    offset = 7;
    if(H5Tset_offset(datatype,offset) < 0)
        TEST_ERROR
    precision = 20;
    if(H5Tset_precision(datatype,precision) < 0)
        TEST_ERROR
    if(H5Tset_size(datatype, (size_t)4) < 0)
        TEST_ERROR
    if(H5Tset_ebias(datatype, (size_t)31) < 0)
        TEST_ERROR

    /*
     * Create a new dataset within the file using defined dataspace,
     * user-defined datatype, and default dataset creation properties.
     */
    if((dataset = H5Dcreate2(fid, DATASETNAME22, datatype, fsid,
            H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
        TEST_ERROR

    /*
     * Write the data to the dataset using default transfer properties.
     */
    if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0)
        TEST_ERROR

    /* Close dataset */
    if(H5Dclose(dataset) < 0)
        TEST_ERROR

    /* Now create a dataset with a big-endian type */
    if(H5Tset_order(datatype, H5T_ORDER_BE) < 0)
        TEST_ERROR
    if((dataset = H5Dcreate2(fid, DATASETNAME23, datatype, fsid,
            H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
        TEST_ERROR
    if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0)
        TEST_ERROR
    if(H5Dclose(dataset) < 0)
        TEST_ERROR

    /*
     * Close/release resources.
     */
    if(H5Pclose(dcpl) < 0)
        TEST_ERROR

//.........这里部分代码省略.........
开发者ID:Starlink,项目名称:hdf5,代码行数:101,代码来源:gen_cross.c


示例14: H5Fcreate

该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ H5Pset_fapl_mpio函数代码示例发布时间:2022-05-30
下一篇:
C++ H5Pclose函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap