📄 hash_rec.c
字号:
} if ((ret = __memp_fput(mpf, pagep, flags)) != 0) goto out; pagep = NULL;done: *lsnp = argp->prev_lsn; ret = 0;out: if (pagep != NULL) (void)__memp_fput(mpf, pagep, 0); REC_CLOSE;}/* * __ham_splitdata_recover -- * * PUBLIC: int __ham_splitdata_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */int__ham_splitdata_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info;{ __ham_splitdata_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; u_int32_t flags; int cmp_n, cmp_p, ret; pagep = NULL; COMPQUIET(info, NULL); REC_PRINT(__ham_splitdata_print); REC_INTRO(__ham_splitdata_read, 1); if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { if (DB_UNDO(op)) { if (ret == DB_PAGE_NOTFOUND) goto done; else { ret = __db_pgerr(file_dbp, argp->pgno, ret); goto out; } }#ifdef HAVE_FTRUNCATE /* If the page is not here then it was later truncated. */ if (!IS_ZERO_LSN(argp->pagelsn)) goto done;#endif /* * This page was created by a group allocation and * the file may not have been extend yet. * Create the page if necessary. */ if ((ret = __memp_fget(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) { ret = __db_pgerr(file_dbp, argp->pgno, ret); goto out; } } cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->pagelsn); CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn); /* * There are two types of log messages here, one for the old page * and one for the new pages created. The original image in the * SPLITOLD record is used for undo. The image in the SPLITNEW * is used for redo. We should never have a case where there is * a redo operation and the SPLITOLD record is on disk, but not * the SPLITNEW record. Therefore, we only have work to do when * redo NEW messages and undo OLD messages, but we have to update * LSNs in both cases. */ flags = 0; if (cmp_p == 0 && DB_REDO(op)) { if (argp->opcode == SPLITNEW) /* Need to redo the split described. */ memcpy(pagep, argp->pageimage.data, argp->pageimage.size); LSN(pagep) = *lsnp; flags = DB_MPOOL_DIRTY; } else if (cmp_n == 0 && DB_UNDO(op)) { if (argp->opcode == SPLITOLD) { /* Put back the old image. */ memcpy(pagep, argp->pageimage.data, argp->pageimage.size); } else P_INIT(pagep, file_dbp->pgsize, argp->pgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH); LSN(pagep) = argp->pagelsn; flags = DB_MPOOL_DIRTY; } if ((ret = __memp_fput(mpf, pagep, flags)) != 0) goto out; pagep = NULL;done: *lsnp = argp->prev_lsn; ret = 0;out: if (pagep != NULL) (void)__memp_fput(mpf, pagep, 0); REC_CLOSE;}/* * __ham_copypage_recover -- * Recovery function for copypage. * * PUBLIC: int __ham_copypage_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */int__ham_copypage_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info;{ __ham_copypage_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; u_int32_t flags; int cmp_n, cmp_p, ret; pagep = NULL; COMPQUIET(info, NULL); REC_PRINT(__ham_copypage_print); REC_INTRO(__ham_copypage_read, 1); flags = 0; /* This is the bucket page. */ REC_FGET(mpf, argp->pgno, &pagep, donext); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->pagelsn); CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn); if (cmp_p == 0 && DB_REDO(op)) { /* Need to redo update described. */ memcpy(pagep, argp->page.data, argp->page.size); PGNO(pagep) = argp->pgno; PREV_PGNO(pagep) = PGNO_INVALID; LSN(pagep) = *lsnp; flags = DB_MPOOL_DIRTY; } else if (cmp_n == 0 && DB_UNDO(op)) { /* Need to undo update described. */ P_INIT(pagep, file_dbp->pgsize, argp->pgno, PGNO_INVALID, argp->next_pgno, 0, P_HASH); LSN(pagep) = argp->pagelsn; flags = DB_MPOOL_DIRTY; } if ((ret = __memp_fput(mpf, pagep, flags)) != 0) goto out; pagep = NULL;donext: /* Now fix up the "next" page. */ REC_FGET(mpf, argp->next_pgno, &pagep, do_nn); /* For REDO just update the LSN. For UNDO copy page back. */ cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->nextlsn); CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn); flags = 0; if (cmp_p == 0 && DB_REDO(op)) { LSN(pagep) = *lsnp; flags = DB_MPOOL_DIRTY; } else if (cmp_n == 0 && DB_UNDO(op)) { /* Need to undo update described. */ memcpy(pagep, argp->page.data, argp->page.size); flags = DB_MPOOL_DIRTY; } if ((ret = __memp_fput(mpf, pagep, flags)) != 0) goto out; pagep = NULL; /* Now fix up the next's next page. */do_nn: if (argp->nnext_pgno == PGNO_INVALID) goto done; REC_FGET(mpf, argp->nnext_pgno, &pagep, done); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->nnextlsn); CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nnextlsn); flags = 0; if (cmp_p == 0 && DB_REDO(op)) { /* Need to redo update described. */ PREV_PGNO(pagep) = argp->pgno; LSN(pagep) = *lsnp; flags = DB_MPOOL_DIRTY; } else if (cmp_n == 0 && DB_UNDO(op)) { /* Need to undo update described. */ PREV_PGNO(pagep) = argp->next_pgno; LSN(pagep) = argp->nnextlsn; flags = DB_MPOOL_DIRTY; } if ((ret = __memp_fput(mpf, pagep, flags)) != 0) goto out; pagep = NULL;done: *lsnp = argp->prev_lsn; ret = 0;out: if (pagep != NULL) (void)__memp_fput(mpf, pagep, 0); REC_CLOSE;}/* * __ham_metagroup_recover -- * Recovery function for metagroup. * * PUBLIC: int __ham_metagroup_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */int__ham_metagroup_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info;{ __ham_metagroup_args *argp; HASH_CURSOR *hcp; DB *file_dbp; DBMETA *mmeta; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; db_pgno_t pgno; u_int32_t flags, mmeta_flags; int cmp_n, cmp_p, did_recover, groupgrow, ret; COMPQUIET(info, NULL); mmeta_flags = 0; mmeta = NULL; REC_PRINT(__ham_metagroup_print); REC_INTRO(__ham_metagroup_read, 1); /* * This logs the virtual create of pages pgno to pgno + bucket * If HAVE_FTRUNCATE is not supported the mpool page-allocation is not * transaction protected, we can never undo it. Even in an abort, * we have to allocate these pages to the hash table if they * were actually created. In particular, during disaster * recovery the metapage may be before this point if we * are rolling backward. If the file has not been extended * then the metapage could not have been updated. * The log record contains: * bucket: new bucket being allocated. * pgno: page number of the new bucket. * if bucket is a power of 2, then we allocated a whole batch of * pages; if it's not, then we simply allocated one new page. */ groupgrow = (u_int32_t)(1 << __db_log2(argp->bucket + 1)) == argp->bucket + 1; pgno = argp->pgno; if (argp->newalloc) pgno += argp->bucket; flags = 0; pagep = NULL;#ifndef HAVE_FTRUNCATE flags = DB_MPOOL_CREATE;#endif ret = __memp_fget(mpf, &pgno, flags, &pagep);#ifdef HAVE_FTRUNCATE /* If we are undoing, then we don't want to create the page. */ if (ret != 0 && DB_REDO(op)) ret = __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &pagep); else if (ret == DB_PAGE_NOTFOUND) { groupgrow = 0; goto do_meta; }#endif if (ret != 0) { if (ret != ENOSPC) goto out; pgno = 0; groupgrow = 0; goto do_meta; } cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->pagelsn); CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn); flags = 0; if (cmp_p == 0 && DB_REDO(op)) { pagep->lsn = *lsnp; flags = DB_MPOOL_DIRTY; } else if (cmp_n == 0 && DB_UNDO(op)) {#ifdef HAVE_FTRUNCATE /* If this record allocated the pages give them back. */ if (argp->newalloc) { if (pagep != NULL && (ret = __memp_fput(mpf, pagep, DB_MPOOL_DISCARD)) != 0) goto out; pagep = NULL; if ((ret = __memp_ftruncate(mpf, argp->pgno, 0)) != 0) goto out; } else#endif { /* * Otherwise just roll the page back to its * previous state. */ pagep->lsn = argp->pagelsn; flags = DB_MPOOL_DIRTY; } } if (pagep != NULL && (ret = __memp_fput(mpf, pagep, flags)) != 0) goto out;do_meta: /* Now we have to update the meta-data page. */ hcp = (HASH_CURSOR *)dbc->internal; if ((ret = __ham_get_meta(dbc)) != 0) goto out; cmp_n = log_compare(lsnp, &hcp->hdr->dbmeta.lsn); cmp_p = log_compare(&hcp->hdr->dbmeta.lsn, &argp->metalsn); CHECK_LSN(op, cmp_p, &hcp->hdr->dbmeta.lsn, &argp->metalsn); did_recover = 0; if (cmp_p == 0 && DB_REDO(op)) { /* Redo the actual updating of bucket counts. */ ++hcp->hdr->max_bucket; if (groupgrow) { hcp->hdr->low_mask = hcp->hdr->high_mask; hcp->hdr->high_mask = (argp->bucket + 1) | hcp->hdr->low_mask; } hcp->hdr->dbmeta.lsn = *lsnp; did_recover = 1; } else if (cmp_n == 0 && DB_UNDO(op)) { /* Undo the actual updating of bucket counts. */ --hcp->hdr->max_bucket; if (groupgrow) { hcp->hdr->high_mask = hcp->hdr->low_mask; hcp->hdr->low_mask = hcp->hdr->high_mask >> 1; } hcp->hdr->dbmeta.lsn = argp->metalsn; did_recover = 1; } /* * Now we need to fix up the spares array. Each entry in the * spares array indicates the beginning page number for the * indicated doubling. We need to fill this in whenever the * spares array is invalid, if we never reclaim pages then * we have to allocate the pages to the * spares array in both the redo and undo cases. */ if (groupgrow &&#ifdef HAVE_FTRUNCATE !DB_UNDO(op) &&#endif hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] == PGNO_INVALID) { hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] = (argp->pgno - argp->bucket) - 1; did_recover = 1; }#ifdef HAVE_FTRUNCATE if (cmp_n == 0 && groupgrow && DB_UNDO(op)) { hcp->hdr->spares[ __db_log2(argp->bucket + 1) + 1] = PGNO_INVALID; did_recover = 1; }#endif /* * Finally, we need to potentially fix up the last_pgno field * in the master meta-data page (which may or may not be the * same as the hash header page). */ if (argp->mmpgno != argp->mpgno) { if ((ret = __memp_fget(mpf, &argp->mmpgno, 0, &mmeta)) != 0) goto out; mmeta_flags = 0; cmp_n = log_compare(lsnp, &mmeta->lsn); cmp_p = log_compare(&mmeta->lsn, &argp->mmetalsn); if (cmp_p == 0 && DB_REDO(op)) mmeta->lsn = *lsnp;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -