📄 mp_fget.c
字号:
* of control are both doing DB_MPOOL_NEW calls, they won't * collide, that is, they won't both get the same page. * * There's a possibility that another thread allocated the page * we were planning to allocate while we were off doing buffer * allocation. We can do that by making sure the page number * we were going to use is still available. If it's not, then * we check to see if the next available page number hashes to * the same mpool region as the old one -- if it does, we can * continue, otherwise, we have to start over. */ if (flags == DB_MPOOL_NEW && *pgnoaddr != mfp->last_pgno + 1) { *pgnoaddr = mfp->last_pgno + 1; if (n_cache != NCACHE(mp, mf_offset, *pgnoaddr)) { __db_shalloc_free( dbmp->reginfo[n_cache].addr, alloc_bhp); /* * flags == DB_MPOOL_NEW, so extending is set * and we're holding the region locked. */ R_UNLOCK(dbenv, dbmp->reginfo); alloc_bhp = NULL; goto alloc; } } /* * We released the region lock, so another thread might have * extended the file. Update the last_pgno and initialize * the file, as necessary, if we extended the file. */ if (extending) {#ifdef HAVE_FILESYSTEM_NOTZERO if (*pgnoaddr > mfp->last_pgno && __os_fs_notzero() && F_ISSET(dbmfp->fhp, DB_FH_VALID)) ret = __memp_fs_notzero( dbenv, dbmfp, mfp, pgnoaddr); else ret = 0;#endif if (ret == 0 && *pgnoaddr > mfp->last_pgno) mfp->last_pgno = *pgnoaddr; R_UNLOCK(dbenv, dbmp->reginfo); if (ret != 0) goto err; } goto hb_search; case SECOND_FOUND: /* * We allocated buffer space for the requested page, but then * found the page in the buffer cache on our second check. * That's OK -- we can use the page we found in the pool, * unless DB_MPOOL_NEW is set. * * Free the allocated memory, we no longer need it. Since we * can't acquire the region lock while holding the hash bucket * lock, we have to release the hash bucket and re-acquire it. * That's OK, because we have the buffer pinned down. */ MUTEX_UNLOCK(dbenv, &hp->hash_mutex); R_LOCK(dbenv, &dbmp->reginfo[n_cache]); __db_shalloc_free(dbmp->reginfo[n_cache].addr, alloc_bhp); alloc_bhp = NULL; R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]); MUTEX_LOCK(dbenv, &hp->hash_mutex); /* * We can't use the page we found in the pool if DB_MPOOL_NEW * was set. (For details, see the above comment beginning * "DB_MPOOL_NEW does not guarantee you a page unreferenced by * any other thread of control".) If DB_MPOOL_NEW is set, we * release our pin on this particular buffer, and try to get * another one. */ if (flags == DB_MPOOL_NEW) { --bhp->ref; b_incr = 0; goto alloc; } break; case SECOND_MISS: /* * We allocated buffer space for the requested page, and found * the page still missing on our second pass through the buffer * cache. Instantiate the page. */ bhp = alloc_bhp; alloc_bhp = NULL; /* * Initialize all the BH and hash bucket fields so we can call * __memp_bhfree if an error occurs. * * Append the buffer to the tail of the bucket list and update * the hash bucket's priority. */ b_incr = 1; memset(bhp, 0, sizeof(BH)); bhp->ref = 1; bhp->priority = UINT32_T_MAX; bhp->pgno = *pgnoaddr; bhp->mf_offset = mf_offset; SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq); hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority; /* If we extended the file, make sure the page is never lost. */ if (extending) { ++hp->hash_page_dirty; F_SET(bhp, BH_DIRTY | BH_DIRTY_CREATE); } /* * If we created the page, zero it out. If we didn't create * the page, read from the backing file. * * !!! * DB_MPOOL_NEW doesn't call the pgin function. * * If DB_MPOOL_CREATE is used, then the application's pgin * function has to be able to handle pages of 0's -- if it * uses DB_MPOOL_NEW, it can detect all of its page creates, * and not bother. * * If we're running in diagnostic mode, smash any bytes on the * page that are unknown quantities for the caller. * * Otherwise, read the page into memory, optionally creating it * if DB_MPOOL_CREATE is set. */ if (extending) { if (mfp->clear_len == 0) memset(bhp->buf, 0, mfp->stat.st_pagesize); else { memset(bhp->buf, 0, mfp->clear_len);#if defined(DIAGNOSTIC) || defined(UMRW) memset(bhp->buf + mfp->clear_len, CLEAR_BYTE, mfp->stat.st_pagesize - mfp->clear_len);#endif } if (flags == DB_MPOOL_CREATE && mfp->ftype != 0) F_SET(bhp, BH_CALLPGIN); ++mfp->stat.st_page_create; } else { F_SET(bhp, BH_TRASH); ++mfp->stat.st_cache_miss; } /* Increment buffer count referenced by MPOOLFILE. */ MUTEX_LOCK(dbenv, &mfp->mutex); ++mfp->block_cnt; MUTEX_UNLOCK(dbenv, &mfp->mutex); /* * Initialize the mutex. This is the last initialization step, * because it's the only one that can fail, and everything else * must be set up or we can't jump to the err label because it * will call __memp_bhfree. */ if ((ret = __db_mutex_setup(dbenv, &dbmp->reginfo[n_cache], &bhp->mutex, 0)) != 0) goto err; } DB_ASSERT(bhp->ref != 0); /* * If we're the only reference, update buffer and bucket priorities. * We may be about to release the hash bucket lock, and everything * should be correct, first. (We've already done this if we created * the buffer, so there is no need to do it again.) */ if (state != SECOND_MISS && bhp->ref == 1) { bhp->priority = UINT32_T_MAX; SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh); SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq); hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority; } /* * BH_TRASH -- * The buffer we found may need to be filled from the disk. * * It's possible for the read function to fail, which means we fail as * well. Note, the __memp_pgread() function discards and reacquires * the hash lock, so the buffer must be pinned down so that it cannot * move and its contents are unchanged. Discard the buffer on failure * unless another thread is waiting on our I/O to complete. It's OK to * leave the buffer around, as the waiting thread will see the BH_TRASH * flag set, and will also attempt to discard it. If there's a waiter, * we need to decrement our reference count. */ if (F_ISSET(bhp, BH_TRASH) && (ret = __memp_pgread(dbmfp, &hp->hash_mutex, bhp, LF_ISSET(DB_MPOOL_CREATE) ? 1 : 0)) != 0) goto err; /* * BH_CALLPGIN -- * The buffer was processed for being written to disk, and now has * to be re-converted for use. */ if (F_ISSET(bhp, BH_CALLPGIN)) { if ((ret = __memp_pg(dbmfp, bhp, 1)) != 0) goto err; F_CLR(bhp, BH_CALLPGIN); } MUTEX_UNLOCK(dbenv, &hp->hash_mutex);#ifdef DIAGNOSTIC /* Update the file's pinned reference count. */ R_LOCK(dbenv, dbmp->reginfo); ++dbmfp->pinref; R_UNLOCK(dbenv, dbmp->reginfo); /* * We want to switch threads as often as possible, and at awkward * times. Yield every time we get a new page to ensure contention. */ if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) __os_yield(dbenv, 1);#endif *(void **)addrp = bhp->buf; return (0);err: /* * Discard our reference. If we're the only reference, discard the * the buffer entirely. If we held a reference to a buffer, we are * also still holding the hash bucket mutex. */ if (b_incr) { if (bhp->ref == 1) (void)__memp_bhfree(dbmp, hp, bhp, 1); else { --bhp->ref; MUTEX_UNLOCK(dbenv, &hp->hash_mutex); } } /* If alloc_bhp is set, free the memory. */ if (alloc_bhp != NULL) __db_shalloc_free(dbmp->reginfo[n_cache].addr, alloc_bhp); return (ret);}#ifdef HAVE_FILESYSTEM_NOTZERO/* * __memp_fs_notzero -- * Initialize the underlying allocated pages in the file. */static int__memp_fs_notzero(dbenv, dbmfp, mfp, pgnoaddr) DB_ENV *dbenv; DB_MPOOLFILE *dbmfp; MPOOLFILE *mfp; db_pgno_t *pgnoaddr;{ DB_IO db_io; u_int32_t i, npages; size_t nw; int ret; u_int8_t *page; char *fail; /* * Pages allocated by writing pages past end-of-file are not zeroed, * on some systems. Recovery could theoretically be fooled by a page * showing up that contained garbage. In order to avoid this, we * have to write the pages out to disk, and flush them. The reason * for the flush is because if we don't sync, the allocation of another * page subsequent to this one might reach the disk first, and if we * crashed at the right moment, leave us with this page as the one * allocated by writing a page past it in the file. * * Hash is the only access method that allocates groups of pages. We * know that it will use the existence of the last page in a group to * signify that the entire group is OK; so, write all the pages but * the last one in the group, flush them to disk, and then write the * last one to disk and flush it. */ if ((ret = __os_calloc(dbenv, 1, mfp->stat.st_pagesize, &page)) != 0) return (ret); db_io.fhp = dbmfp->fhp; db_io.mutexp = dbmfp->mutexp; db_io.pagesize = db_io.bytes = mfp->stat.st_pagesize; db_io.buf = page; npages = *pgnoaddr - mfp->last_pgno; for (i = 1; i < npages; ++i) { db_io.pgno = mfp->last_pgno + i; if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) { fail = "write"; goto err; } } if (i != 1 && (ret = __os_fsync(dbenv, dbmfp->fhp)) != 0) { fail = "sync"; goto err; } db_io.pgno = mfp->last_pgno + npages; if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) { fail = "write"; goto err; } if ((ret = __os_fsync(dbenv, dbmfp->fhp)) != 0) { fail = "sync";err: __db_err(dbenv, "%s: %s failed for page %lu", __memp_fn(dbmfp), fail, (u_long)db_io.pgno); } __os_free(dbenv, page); return (ret);}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -