⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 matrixio.c

📁 MIT开发出来的计算光子晶体的软件
💻 C
📖 第 1 页 / 共 2 页
字号:
#else     return 0;#endif}void matrixio_close_sub(matrixio_id id){#if defined(HAVE_HDF5)     CHECK(H5Gclose(id) >= 0, "error closing HDF group");#endif}/*****************************************************************************/matrixio_id matrixio_open_dataset(matrixio_id id,				  const char *name,				  int rank, const int *dims){#if defined(HAVE_HDF5)     int i, rank_copy;     hid_t space_id, data_id;     hsize_t *dims_copy, *maxdims;     data_id = H5Dopen(id, name);     CHECK((data_id = H5Dopen(id, name)) >= 0, "error in H5Dopen");     CHECK((space_id = H5Dget_space(data_id)) >= 0,	   "error in H5Dget_space");     rank_copy = H5Sget_simple_extent_ndims(space_id);     CHECK(rank == rank_copy, "rank in HDF5 file doesn't match expected rank");          CHK_MALLOC(dims_copy, hsize_t, rank);     CHK_MALLOC(maxdims, hsize_t, rank);     H5Sget_simple_extent_dims(space_id, dims_copy, maxdims);     free(maxdims);     for (i = 0; i < rank; ++i) {	  CHECK(dims_copy[i] == dims[i],		"array size in HDF5 file doesn't match expected size");     }     free(dims_copy);     H5Sclose(space_id);     return data_id;#else     return 0;#endif}/*****************************************************************************/matrixio_id matrixio_create_dataset(matrixio_id id,				    const char *name, const char *description,				    int rank, const int *dims){#if defined(HAVE_HDF5)     int i;     hid_t space_id, type_id, data_id;     hsize_t *dims_copy;     /* delete pre-existing datasets, or we'll have an error; I think        we can only do this on the master process. (?) */     if (matrixio_dataset_exists(id, name)) {	  if (mpi_is_master()) {	       matrixio_dataset_delete(id, name);	       H5Fflush(id, H5F_SCOPE_GLOBAL);	  }	  IF_EXCLUSIVE(0,MPI_Barrier(MPI_COMM_WORLD));     }     CHECK(rank > 0, "non-positive rank");     CHK_MALLOC(dims_copy, hsize_t, rank);     for (i = 0; i < rank; ++i)          dims_copy[i] = dims[i];     space_id = H5Screate_simple(rank, dims_copy, NULL);     free(dims_copy);#ifdef SCALAR_SINGLE_PREC     type_id = H5T_NATIVE_FLOAT;#else     type_id = H5T_NATIVE_DOUBLE;#endif          /* Create the dataset.  Note that, on parallel machines, H5Dcreate	should do the right thing; it is supposedly a collective operation. */     IF_EXCLUSIVE(	  if (mpi_is_master())	       data_id = H5Dcreate(id, name, type_id, space_id, H5P_DEFAULT);	  else	       data_id = H5Dopen(id, name),	  data_id = H5Dcreate(id, name, type_id, space_id, H5P_DEFAULT));     H5Sclose(space_id);  /* the dataset should have its own copy now */          matrixio_write_string_attr(data_id, "description", description);     return data_id;#else     return 0;#endif}void matrixio_close_dataset(matrixio_id data_id){#if defined(HAVE_HDF5)     CHECK(H5Dclose(data_id) >= 0, "error closing HDF dataset");#endif}int matrixio_dataset_exists(matrixio_id id, const char *name){#if defined(HAVE_HDF5)     hid_t data_id;     SUPPRESS_HDF5_ERRORS(data_id = H5Dopen(id, name));     if (data_id >= 0)	  H5Dclose(data_id);     return (data_id >= 0);#else     return 0;#endif}void matrixio_dataset_delete(matrixio_id id, const char *name){#if defined(HAVE_HDF5)     H5Gunlink(id, name);#endif}/*****************************************************************************/void matrixio_write_real_data(matrixio_id data_id,			      const int *local_dims, const int *local_start,			      int stride,			      real *data){#if defined(HAVE_HDF5)     int rank;     hsize_t *dims, *maxdims;     hid_t space_id, type_id, mem_space_id;     hssize_t *start;     hsize_t *strides, *count, count_prod;     int i;     real *data_copy;     int data_copy_stride = 1, free_data_copy = 0, do_write = 1;     /*******************************************************************/     /* Get dimensions of dataset */          space_id = H5Dget_space(data_id);     rank = H5Sget_simple_extent_ndims(space_id);          CHK_MALLOC(dims, hsize_t, rank);     CHK_MALLOC(maxdims, hsize_t, rank);     H5Sget_simple_extent_dims(space_id, dims, maxdims);     free(maxdims);#ifdef SCALAR_SINGLE_PREC     type_id = H5T_NATIVE_FLOAT;#else     type_id = H5T_NATIVE_DOUBLE;#endif     /*******************************************************************/     /* if stride > 1, make a contiguous copy; hdf5 is much faster	in this case. */     if (stride > 1) {	  int N = 1;	  for (i = 0; i < rank; ++i)	       N *= local_dims[i];	  CHK_MALLOC(data_copy, real, N);	  if (data_copy) {	       free_data_copy = 1;	       for (i = 0; i < (N & 3); ++i)		    data_copy[i] = data[i * stride];	       for (; i < N; i += 4) {		    real d0 = data[i * stride];		    real d1 = data[(i + 1) * stride];		    real d2 = data[(i + 2) * stride];		    real d3 = data[(i + 3) * stride];		    data_copy[i] = d0;		    data_copy[i+1] = d1;		    data_copy[i+2] = d2;		    data_copy[i+3] = d3;	       }	       CHECK(i == N, "bug in matrixio copy routine");	  }	  else {	       data_copy = data;	       data_copy_stride = stride;	  }     }     else	  data_copy = data;     /*******************************************************************/     /* Before we can write the data to the data set, we must define	the dimensions and "selections" of the arrays to be read & written: */     CHK_MALLOC(start, hssize_t, rank);     CHK_MALLOC(strides, hsize_t, rank);     CHK_MALLOC(count, hsize_t, rank);     count_prod = 1;     for (i = 0; i < rank; ++i) {	  start[i] = local_start[i];	  count[i] = local_dims[i];	  strides[i] = 1;	  count_prod *= count[i];     }     if (count_prod > 0) {	  H5Sselect_hyperslab(space_id, H5S_SELECT_SET,			      start, NULL, count, NULL);	  for (i = 0; i < rank; ++i)	       start[i] = 0;	  strides[rank - 1] = data_copy_stride;	  count[rank - 1] *= data_copy_stride;	  mem_space_id = H5Screate_simple(rank, count, NULL);	  count[rank - 1] = local_dims[rank - 1];	  H5Sselect_hyperslab(mem_space_id, H5S_SELECT_SET,			      start, data_copy_stride <= 1 ? NULL : strides,			      count, NULL);     }     else { /* this can happen on leftover processes in MPI */	  H5Sselect_none(space_id);	  mem_space_id = H5Scopy(space_id); /* can't create an empty space */	  H5Sselect_none(mem_space_id);	  do_write = 0;  /* HDF5 complains about empty dataspaces otherwise */     }     /*******************************************************************/     /* Write the data, then free all the stuff we've allocated. */     if (do_write)	  H5Dwrite(data_id, type_id, mem_space_id, space_id, H5P_DEFAULT, 		   (void*) data_copy);     if (free_data_copy)	  free(data_copy);     H5Sclose(mem_space_id);     free(count);     free(strides);     free(start);     free(dims);     H5Sclose(space_id);#endif}#if defined(HAVE_HDF5)/* check if the given name is a dataset in group_id, and if so set d   to point to a char** with a copy of name. */static herr_t find_dataset(hid_t group_id, const char *name, void *d){     char **dname = (char **) d;     H5G_stat_t info;     H5Gget_objinfo(group_id, name, 1, &info);     if (info.type == H5G_DATASET) {	  CHK_MALLOC(*dname, char, strlen(name) + 1);	  strcpy(*dname, name);	  return 1;     }     return 0;}#endif/*****************************************************************************//* Read real data from the file/group 'id', from the dataset 'name'.   If name is NULL, reads from the first dataset in 'id'.   If data is non-NULL, then data must have dimensions given in rank   and dims (* stride); actually, what is read in is the hyperslab given by the   local_dim0* parameters.  The dataset is read into 'data' with the   given 'stride'.  Returns the data pointer.   If data is NULL, then upon output rank and dims point to the size   of the array, and a pointer to the (malloc'ed) data is returned.   On input, *rank should point to the maximum allowed rank (e.g. the   length of the dims array)!  The local_dim* and stride parameters   are ignored here.   Returns NULL if the dataset could not be found in id. */real *matrixio_read_real_data(matrixio_id id,			      const char *name,			      int *rank, int *dims,			      int local_dim0, int local_dim0_start,			      int stride,			      real *data){#if defined(HAVE_HDF5)     hid_t space_id, type_id, data_id, mem_space_id;     hsize_t *dims_copy, *maxdims;     char *dname;     int i;     CHECK(*rank > 0, "non-positive rank");     /*******************************************************************/     /* Open the data set and check the dimensions: */     if (name) {	  CHK_MALLOC(dname, char, strlen(name) + 1);	  strcpy(dname, name);     }     else {	  if (H5Giterate(id, "/", NULL, find_dataset, &dname) < 0)	       return NULL;     }     SUPPRESS_HDF5_ERRORS(data_id = H5Dopen(id, dname));     free(dname);     if (data_id < 0)	  return NULL;     CHECK((space_id = H5Dget_space(data_id)) >= 0,	   "error in H5Dget_space");     {	  int filerank = H5Sget_simple_extent_ndims(space_id);	  if (data) {	       CHECK(*rank == filerank,		     "rank in HDF5 file doesn't match expected rank");	  }	  else {	       CHECK(*rank >= filerank,		     "rank in HDF5 file is too big");	       *rank = filerank;	  }     }          CHK_MALLOC(dims_copy, hsize_t, *rank);     CHK_MALLOC(maxdims, hsize_t, *rank);     H5Sget_simple_extent_dims(space_id, dims_copy, maxdims);     free(maxdims);     if (data)	  for (i = 0; i < *rank; ++i) {	       CHECK(dims_copy[i] == dims[i],		     "array size in HDF5 file doesn't match expected size");	  }     else	  for (i = 0; i < *rank; ++i)	       dims[i] = dims_copy[i];#ifdef SCALAR_SINGLE_PREC     type_id = H5T_NATIVE_FLOAT;#else     type_id = H5T_NATIVE_DOUBLE;#endif     /*******************************************************************/     /* Before we can read the data from the data set, we must define	the dimensions and "selections" of the arrays to be read & written: */     if (data) {	  hssize_t *start;	  hsize_t *strides, *count;	  CHK_MALLOC(start, hssize_t, *rank);	  CHK_MALLOC(strides, hsize_t, *rank);	  CHK_MALLOC(count, hsize_t, *rank);	  	  for (i = 0; i < *rank; ++i) {	       start[i] = 0;	       strides[i] = 1;	       count[i] = dims[i];	  }	  	  dims_copy[0] = local_dim0;	  dims_copy[*rank - 1] *= stride;	  start[0] = 0;	  strides[*rank - 1] = stride;	  count[0] = local_dim0;	  mem_space_id = H5Screate_simple(*rank, dims_copy, NULL);	  H5Sselect_hyperslab(mem_space_id, H5S_SELECT_SET,			      start, strides, count, NULL);	  	  start[0] = local_dim0_start;	  count[0] = local_dim0;	  H5Sselect_hyperslab(space_id, H5S_SELECT_SET,			      start, NULL, count, NULL);	  free(count);	  free(strides);	  free(start);     }     else {	  int N = 1;	  for (i = 0; i < *rank; ++i)	       N *= dims[i];	  CHK_MALLOC(data, real, N);	  mem_space_id = H5S_ALL;	  H5Sclose(space_id);	  space_id = H5S_ALL;     }     /*******************************************************************/     /* Read the data, then free all the H5 identifiers. */     CHECK(H5Dread(data_id, type_id, mem_space_id, space_id, H5P_DEFAULT, 		   (void*) data) >= 0,	   "error reading HDF5 dataset");     if (mem_space_id != H5S_ALL)	  H5Sclose(mem_space_id);     free(dims_copy);     if (space_id != H5S_ALL)	  H5Sclose(space_id);     H5Dclose(data_id);     return data;#else     CHECK(0, "no matrixio implementation is linked");     return NULL;#endif}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -