⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 h5file.cpp

📁 麻省理工的计算光子晶体的程序
💻 CPP
📖 第 1 页 / 共 2 页
字号:
    for (; cur && strcmp(cur->dataname, dataname); cur = (prev = cur)->next);    if (!cur) abort("bug in remove_data: inconsistent get_extending");    if (prev)      prev->next = cur->next;    else      extending = cur->next;    delete cur->dataname;    delete cur;  }  if (dataset_exists(file_id, dataname)) {    /* this is hackish ...need to pester HDF5 developers to make       H5Gunlink a collective operation for parallel mode */    if (!parallel || am_master()) {      H5Gunlink(file_id, dataname);  /* delete it */      H5Fflush(file_id, H5F_SCOPE_GLOBAL);    }    IF_EXCLUSIVE((void) 0, if (parallel) all_wait());  }#endif}/* Create a dataset, for writing chunks etc.  Note that, in parallel mode,   this should be called by *all* processors, even those not writing any   data. */void h5file::create_data(const char *dataname, int rank, const int *dims,			 bool append_data, bool single_precision){#ifdef HAVE_HDF5  int i;  hid_t file_id = HID(get_id()), space_id, data_id;  int rank1;  CHECK(rank >= 0, "negative rank");    // stupid HDF5 has problems with rank 0  rank1 = (rank == 0 && !append_data) ? 1 : rank;    CHECK(file_id >= 0, "error opening HDF5 output file");  unset_cur();  remove_data(dataname); // HDF5 gives error if we H5Dcreate existing dataset    if (IF_EXCLUSIVE(!parallel || am_master(), 1)) {    hsize_t *dims_copy = new hsize_t[rank1 + append_data];    hsize_t *maxdims = new hsize_t[rank1 + append_data];    hsize_t N = 1;    for (i = 0; i < rank; ++i)      N *= (maxdims[i] = dims_copy[i] = dims[i]);    if (!rank)      maxdims[0] = dims_copy[0] = 1;    if (append_data) {      dims_copy[rank1] = 1;      maxdims[rank1] = H5S_UNLIMITED;    }    space_id = H5Screate_simple(rank1 + append_data, dims_copy, maxdims);    delete[] maxdims;        /* For unlimited datasets, we need to specify the size of the       "chunks" in which the file data is allocated.  */    hid_t prop_id = H5Pcreate(H5P_DATASET_CREATE);    if (append_data) {      const int blocksize = 128;      // make a chunk at least blocksize elements for efficiency      dims_copy[rank1] = (blocksize + (N - 1)) / N;      H5Pset_chunk(prop_id, rank1 + 1, dims_copy);      dims_copy[rank1] = 1;    }        delete[] dims_copy;        hid_t type_id = single_precision ? H5T_NATIVE_FLOAT : H5T_NATIVE_DOUBLE;        data_id = H5Dcreate(file_id, dataname, type_id, space_id, prop_id);        H5Pclose(prop_id);  }  else {    data_id = H5Dopen(file_id, dataname);    CHECK(data_id >= 0, "missing dataset for subsequent processor");    space_id = H5Dget_space(data_id);      CHECK(rank1 + append_data == H5Sget_simple_extent_ndims(space_id),	  "file data is inconsistent rank for subsequent processor");        hsize_t *dims_copy = new hsize_t[rank1 + append_data];    hsize_t *maxdims = new hsize_t[rank1 + append_data];    H5Sget_simple_extent_dims(space_id, dims_copy, maxdims);    CHECK(!append_data || maxdims[rank1] == H5S_UNLIMITED,	  "file data is missing unlimited dimension for append_data");    delete[] maxdims;    for (i = 0; i < rank; ++i)      CHECK(dims[i] == (int) dims_copy[i],	    "file data is inconsistent size for subsequent processor");    if (rank < rank1)      CHECK(dims_copy[0] == 1, "rank-0 data is incorrect size");        delete[] dims_copy;  }    set_cur(dataname, &data_id);  H5Sclose(space_id);  if (append_data) {    extending_s *cur = new extending_s;    cur->dataname = new char[strlen(dataname) + 1];    strcpy(cur->dataname, dataname);    cur->dindex = 0;    cur->next = extending;    extending = cur;  }#else  abort("not compiled with HDF5, required for HDF5 output");#endif}/* Assumed data already created with append_data == true, and is   already open; extends it and increments cur_dindex.  Like   create_data, this is a collective operation and must be called from   all processes. */void h5file::extend_data(const char *dataname, int rank, const int *dims){#ifdef HAVE_HDF5  extending_s *cur = get_extending(dataname);  CHECK(cur, "extend_data can only be called on extensible data");  hid_t file_id = HID(get_id()), data_id;  if (is_cur(dataname))    data_id = HID(cur_id);  else {    data_id = H5Dopen(file_id, dataname);    set_cur(dataname, &data_id);  }  hid_t space_id = H5Dget_space(data_id);    CHECK(rank + 1 == H5Sget_simple_extent_ndims(space_id),	"file data is inconsistent rank for subsequent extend_data");  hsize_t *dims_copy = new hsize_t[rank + 1];  hsize_t *maxdims = new hsize_t[rank + 1];  H5Sget_simple_extent_dims(space_id, dims_copy, maxdims);  CHECK(maxdims[rank] == H5S_UNLIMITED,	"file data is missing unlimited dimension for extend_data");  delete[] maxdims;  for (int i = 0; i < rank; ++i)    CHECK(dims[i] == (int) dims_copy[i],	  "file data is inconsistent size for subsequent extend_data");    H5Sclose(space_id);    // Allocate more space along unlimited direction  cur->dindex++;  dims_copy[rank] = cur->dindex + 1;  H5Dextend(data_id, dims_copy);  delete[] dims_copy;#else  abort("not compiled with HDF5, required for HDF5 output");#endif}/* If append_data is true, dataname is the current dataset, and is   extensible, then as extend_data; otherwise as create_data. */void h5file::create_or_extend_data(const char *dataname, int rank,				   const int *dims,				   bool append_data, bool single_precision){  if (get_extending(dataname))    extend_data(dataname, rank, dims);  else    create_data(dataname, rank, dims, append_data, single_precision);}/*****************************************************************************//* Write a chunk of data to dataset <dataname> in HDF5 file.   The dataset has dimension dims[rank], and we are   writing a chunk stored at <data> (row-major order) of size   chunk_dims[rank], starting at chunk_start[rank].   You *must* have already called create_data for the same dimensions   (and extend_data, if necessary).   In the special case of rank == 0 (writing a single datum), chunk_dims[0]   should still be initialized to 1 (if the given process is writing data)   or 0 (if it is not).      This function does *not* need to be called on all CPUs (e.g. those   that have no data can be skipped).*/void h5file::write_chunk(int rank,			 const int *chunk_start, const int *chunk_dims,			 double *data){#ifdef HAVE_HDF5  int i;  bool do_write = true;  hid_t space_id, mem_space_id, data_id = HID(cur_id);  int rank1;  extending_s *cur = get_extending(cur_dataname);  bool append_data = cur != NULL;  int dindex = cur ? cur->dindex : 0;    CHECK(data_id >= 0, "create_data must be called before write_chunk");  CHECK(rank >= 0, "negative rank");  CHECK(rank > 0 || chunk_dims[0] == 0 || chunk_dims[0] == 1,	"invalid chunk_dims[0] for rank 0");    // stupid HDF5 has problems with rank 0  rank1 = (rank == 0 && !append_data) ? 1 : rank;    space_id = H5Dget_space(data_id);  /*******************************************************************/  /* Before we can write the data to the data set, we must define     the dimensions and "selections" of the arrays to be read & written: */    start_t *start = new start_t[rank1 + append_data];  hsize_t *count = new hsize_t[rank1 + append_data];    int count_prod = 1;  for (i = 0; i < rank; ++i) {    start[i] = chunk_start[i];    count[i] = chunk_dims[i];    count_prod *= count[i];  }  if (!rank) {    start[0] = 0;    count[0] = chunk_dims[0]; // see comment at top    count_prod *= count[0];  }  if (append_data) {    start[rank1] = dindex;    count[rank1] = 1;  }    if (count_prod > 0) {    H5Sselect_hyperslab(space_id, H5S_SELECT_SET,			start, NULL, count, NULL);    mem_space_id = H5Screate_simple(!rank1 ? 1 : rank1, count, NULL);    H5Sselect_all(mem_space_id);  }  else { /* this can happen on leftover processes in MPI */    H5Sselect_none(space_id);    mem_space_id = H5Scopy(space_id); /* can't create an empty space */    H5Sselect_none(mem_space_id);    do_write = false; /* HDF5 complains about empty dataspaces */  }    delete[] start;  delete[] count;    /*******************************************************************/  /* Write the data, then free all the stuff we've allocated. */    if (do_write)    H5Dwrite(data_id,	     H5T_NATIVE_DOUBLE, mem_space_id, space_id, H5P_DEFAULT, 	     (void *) data);    H5Sclose(mem_space_id);  H5Sclose(space_id);#else  abort("not compiled with HDF5, required for HDF5 output");#endif}// collective call after completing all write_chunk callsvoid h5file::done_writing_chunks() {  /* hackery: in order to not deadlock when writing extensible datasets     with a non-parallel version of HDF5, we need to close the file     and release the lock after writing extensible chunks  ...here,     I'm assuming(?) that non-extensible datasets will use different     files, etcetera, for different timesteps.  All of this hackery     goes away if we just use an MPI-compiled version of HDF5. */  if (parallel && cur_dataname && get_extending(cur_dataname))    prevent_deadlock(); // closes id}void h5file::write(const char *dataname, int rank, const int *dims,		   double *data, bool single_precision){  if (parallel || am_master()) {    int *start = new int[rank + 1];    for (int i = 0; i < rank; i++) start[i] = 0;    create_data(dataname, rank, dims, false, single_precision);    if (am_master())      write_chunk(rank, start, dims, data);    done_writing_chunks();    unset_cur();    delete[] start;  }}void h5file::write(const char *dataname, const char *data){#ifdef HAVE_HDF5  if (parallel || am_master()) {    hid_t file_id = HID(get_id()), type_id, data_id, space_id;        CHECK(file_id >= 0, "error opening HDF5 output file");             remove_data(dataname); // HDF5 gives error if we H5Dcreate existing dataset        type_id = H5Tcopy(H5T_C_S1);;    H5Tset_size(type_id, strlen(data) + 1);    space_id = H5Screate(H5S_SCALAR);        data_id = H5Dcreate(file_id, dataname, type_id, space_id, H5P_DEFAULT);    if (am_master())      H5Dwrite(data_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);        H5Sclose(space_id);    H5Tclose(type_id);    H5Dclose(data_id);  }#else  abort("not compiled with HDF5, required for HDF5 output");#endif}/*****************************************************************************//* Inverse of write_chunk, above.  The caller must first get the    total dataset's rank and dims first by calling read_size, above,   (which also opens the dataset for reading). */void h5file::read_chunk(int rank,			const int *chunk_start, const int *chunk_dims,			double *data){#ifdef HAVE_HDF5  bool do_read = true;  int rank1;  hid_t space_id, mem_space_id, data_id = HID(cur_id);    CHECK(data_id >= 0, "read_size must be called before read_chunk");  CHECK(rank >= 0, "negative rank");  CHECK(rank > 0 || chunk_dims[0] == 0 || chunk_dims[0] == 1,	"invalid chunk_dims[0] for rank 0");    // stupid HDF5 has problems with rank 0  rank1 = rank == 0 ? 1 : rank;    space_id = H5Dget_space(data_id);    /*******************************************************************/  /* Before we can read the data from the data set, we must define     the dimensions and "selections" of the arrays to be read & written: */    start_t *start = new start_t[rank1];  hsize_t *count = new hsize_t[rank1];    int count_prod = 1;  for (int i = 0; i < rank; ++i) {    start[i] = chunk_start[i];    count[i] = chunk_dims[i];    count_prod *= count[i];  }  if (!rank) {    start[0] = 0;    count[0] = chunk_dims[0]; // see comment at top    count_prod *= count[0];  }    if (count_prod > 0) {    H5Sselect_hyperslab(space_id, H5S_SELECT_SET,			start, NULL, count, NULL);    mem_space_id = H5Screate_simple(rank1, count, NULL);    H5Sselect_all(mem_space_id);  }  else { /* this can happen on leftover processes in MPI */    H5Sselect_none(space_id);    mem_space_id = H5Scopy(space_id); /* can't create an empty space */    H5Sselect_none(mem_space_id);    do_read = false; /* HDF5 complains about empty dataspaces */  }    delete[] count;  delete[] start;    /*******************************************************************/  /* Read the data, then free all the stuff we've allocated. */    if (do_read)    H5Dread(data_id, H5T_NATIVE_DOUBLE, mem_space_id, space_id, H5P_DEFAULT,	    (void *) data);    H5Sclose(mem_space_id);  H5Sclose(space_id);#else  abort("not compiled with HDF5, required for HDF5 input");#endif}} // namespace meep

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -