⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mp_fget.c

📁 这是国外的resip协议栈
💻 C
📖 第 1 页 / 共 2 页
字号:
		extending = ret = 0;		R_LOCK(dbenv, dbmp->reginfo);		switch (flags) {		case DB_MPOOL_NEW:			extending = 1;			if (mfp->maxpgno != 0 &&			    mfp->last_pgno >= mfp->maxpgno) {				__db_err(dbenv, "%s: file limited to %lu pages",				    __memp_fn(dbmfp), (u_long)mfp->maxpgno);				ret = ENOSPC;			} else				*pgnoaddr = mfp->last_pgno + 1;			break;		case DB_MPOOL_CREATE:			if (mfp->maxpgno != 0 && *pgnoaddr > mfp->maxpgno) {				__db_err(dbenv, "%s: file limited to %lu pages",				    __memp_fn(dbmfp), (u_long)mfp->maxpgno);				ret = ENOSPC;			} else				extending = *pgnoaddr > mfp->last_pgno;			break;		default:			ret = *pgnoaddr > mfp->last_pgno ? DB_PAGE_NOTFOUND : 0;			break;		}		R_UNLOCK(dbenv, dbmp->reginfo);		if (ret != 0)			goto err;		/*		 * !!!		 * In the DB_MPOOL_NEW code path, mf_offset and n_cache have		 * not yet been initialized.		 */		mf_offset = R_OFFSET(dbmp->reginfo, mfp);		n_cache = NCACHE(mp, mf_offset, *pgnoaddr);		c_mp = dbmp->reginfo[n_cache].primary;		/* Allocate a new buffer header and data space. */		if ((ret = __memp_alloc(dbmp,		    &dbmp->reginfo[n_cache], mfp, 0, NULL, &alloc_bhp)) != 0)			goto err;#ifdef DIAGNOSTIC		if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) {			__db_err(dbenv,		    "DB_MPOOLFILE->get: buffer data is NOT size_t aligned");			ret = __db_panic(dbenv, EINVAL);			goto err;		}#endif		/*		 * If we are extending the file, we'll need the region lock		 * again.		 */		if (extending)			R_LOCK(dbenv, dbmp->reginfo);		/*		 * DB_MPOOL_NEW does not guarantee you a page unreferenced by		 * any other thread of control.  (That guarantee is interesting		 * for DB_MPOOL_NEW, unlike DB_MPOOL_CREATE, because the caller		 * did not specify the page number, and so, may reasonably not		 * have any way to lock the page outside of mpool.) Regardless,		 * if we allocate the page, and some other thread of control		 * requests the page by number, we will not detect that and the		 * thread of control that allocated using DB_MPOOL_NEW may not		 * have a chance to initialize the page.  (Note: we *could*		 * detect this case if we set a flag in the buffer header which		 * guaranteed that no gets of the page would succeed until the		 * reference count went to 0, that is, until the creating page		 * put the page.)  What we do guarantee is that if two threads		 * of control are both doing DB_MPOOL_NEW calls, they won't		 * collide, that is, they won't both get the same page.		 *		 * There's a possibility that another thread allocated the page		 * we were planning to allocate while we were off doing buffer		 * allocation.  We can do that by making sure the page number		 * we were going to use is still available.  If it's not, then		 * we check to see if the next available page number hashes to		 * the same mpool region as the old one -- if it does, we can		 * continue, otherwise, we have to start over.		 */		if (flags == DB_MPOOL_NEW && *pgnoaddr != mfp->last_pgno + 1) {			*pgnoaddr = mfp->last_pgno + 1;			if (n_cache != NCACHE(mp, mf_offset, *pgnoaddr)) {				/*				 * flags == DB_MPOOL_NEW, so extending is set				 * and we're holding the region locked.				 */				R_UNLOCK(dbenv, dbmp->reginfo);				R_LOCK(dbenv, &dbmp->reginfo[n_cache]);				__db_shalloc_free(				    &dbmp->reginfo[n_cache], alloc_bhp);				c_mp->stat.st_pages--;				R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);				alloc_bhp = NULL;				goto alloc;			}		}		/*		 * We released the region lock, so another thread might have		 * extended the file.  Update the last_pgno and initialize		 * the file, as necessary, if we extended the file.		 */		if (extending) {			if (*pgnoaddr > mfp->last_pgno)				mfp->last_pgno = *pgnoaddr;			R_UNLOCK(dbenv, dbmp->reginfo);			if (ret != 0)				goto err;		}		goto hb_search;	case SECOND_FOUND:		/*		 * We allocated buffer space for the requested page, but then		 * found the page in the buffer cache on our second check.		 * That's OK -- we can use the page we found in the pool,		 * unless DB_MPOOL_NEW is set.		 *		 * Free the allocated memory, we no longer need it.  Since we		 * can't acquire the region lock while holding the hash bucket		 * lock, we have to release the hash bucket and re-acquire it.		 * That's OK, because we have the buffer pinned down.		 */		MUTEX_UNLOCK(dbenv, &hp->hash_mutex);		R_LOCK(dbenv, &dbmp->reginfo[n_cache]);		__db_shalloc_free(&dbmp->reginfo[n_cache], alloc_bhp);		c_mp->stat.st_pages--;		alloc_bhp = NULL;		R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);		/*		 * We can't use the page we found in the pool if DB_MPOOL_NEW		 * was set.  (For details, see the above comment beginning		 * "DB_MPOOL_NEW does not guarantee you a page unreferenced by		 * any other thread of control".)  If DB_MPOOL_NEW is set, we		 * release our pin on this particular buffer, and try to get		 * another one.		 */		if (flags == DB_MPOOL_NEW) {			--bhp->ref;			b_incr = 0;			goto alloc;		}		/* We can use the page -- get the bucket lock. */		MUTEX_LOCK(dbenv, &hp->hash_mutex);		break;	case SECOND_MISS:		/*		 * We allocated buffer space for the requested page, and found		 * the page still missing on our second pass through the buffer		 * cache.  Instantiate the page.		 */		bhp = alloc_bhp;		alloc_bhp = NULL;		/*		 * Initialize all the BH and hash bucket fields so we can call		 * __memp_bhfree if an error occurs.		 *		 * Append the buffer to the tail of the bucket list and update		 * the hash bucket's priority.		 */		b_incr = 1;		/*lint --e{668} (flexelint: bhp cannot be NULL). */		memset(bhp, 0, sizeof(BH));		bhp->ref = 1;		bhp->priority = UINT32_MAX;		bhp->pgno = *pgnoaddr;		bhp->mf_offset = mf_offset;		SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);		hp->hash_priority =		    SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;		/* If we extended the file, make sure the page is never lost. */		if (extending) {			++hp->hash_page_dirty;			F_SET(bhp, BH_DIRTY | BH_DIRTY_CREATE);		}		/*		 * If we created the page, zero it out.  If we didn't create		 * the page, read from the backing file.		 *		 * !!!		 * DB_MPOOL_NEW doesn't call the pgin function.		 *		 * If DB_MPOOL_CREATE is used, then the application's pgin		 * function has to be able to handle pages of 0's -- if it		 * uses DB_MPOOL_NEW, it can detect all of its page creates,		 * and not bother.		 *		 * If we're running in diagnostic mode, smash any bytes on the		 * page that are unknown quantities for the caller.		 *		 * Otherwise, read the page into memory, optionally creating it		 * if DB_MPOOL_CREATE is set.		 */		if (extending) {			if (mfp->clear_len == 0)				memset(bhp->buf, 0, mfp->stat.st_pagesize);			else {				memset(bhp->buf, 0, mfp->clear_len);#if defined(DIAGNOSTIC) || defined(UMRW)				memset(bhp->buf + mfp->clear_len, CLEAR_BYTE,				    mfp->stat.st_pagesize - mfp->clear_len);#endif			}			if (flags == DB_MPOOL_CREATE && mfp->ftype != 0)				F_SET(bhp, BH_CALLPGIN);			++mfp->stat.st_page_create;		} else {			F_SET(bhp, BH_TRASH);			++mfp->stat.st_cache_miss;		}		/* Increment buffer count referenced by MPOOLFILE. */		MUTEX_LOCK(dbenv, &mfp->mutex);		++mfp->block_cnt;		MUTEX_UNLOCK(dbenv, &mfp->mutex);		/*		 * Initialize the mutex.  This is the last initialization step,		 * because it's the only one that can fail, and everything else		 * must be set up or we can't jump to the err label because it		 * will call __memp_bhfree.		 */		if ((ret = __db_mutex_setup(dbenv,		    &dbmp->reginfo[n_cache], &bhp->mutex, 0)) != 0)			goto err;	}	DB_ASSERT(bhp->ref != 0);	/*	 * If we're the only reference, update buffer and bucket priorities.	 * We may be about to release the hash bucket lock, and everything	 * should be correct, first.  (We've already done this if we created	 * the buffer, so there is no need to do it again.)	 */	if (state != SECOND_MISS && bhp->ref == 1) {		bhp->priority = UINT32_MAX;		SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);		SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);		hp->hash_priority =		    SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;	}	/*	 * BH_TRASH --	 * The buffer we found may need to be filled from the disk.	 *	 * It's possible for the read function to fail, which means we fail as	 * well.  Note, the __memp_pgread() function discards and reacquires	 * the hash lock, so the buffer must be pinned down so that it cannot	 * move and its contents are unchanged.  Discard the buffer on failure	 * unless another thread is waiting on our I/O to complete.  It's OK to	 * leave the buffer around, as the waiting thread will see the BH_TRASH	 * flag set, and will also attempt to discard it.  If there's a waiter,	 * we need to decrement our reference count.	 */	if (F_ISSET(bhp, BH_TRASH) &&	    (ret = __memp_pgread(dbmfp,	    &hp->hash_mutex, bhp, LF_ISSET(DB_MPOOL_CREATE) ? 1 : 0)) != 0)		goto err;	/*	 * BH_CALLPGIN --	 * The buffer was processed for being written to disk, and now has	 * to be re-converted for use.	 */	if (F_ISSET(bhp, BH_CALLPGIN)) {		if ((ret = __memp_pg(dbmfp, bhp, 1)) != 0)			goto err;		F_CLR(bhp, BH_CALLPGIN);	}	MUTEX_UNLOCK(dbenv, &hp->hash_mutex);#ifdef DIAGNOSTIC	/* Update the file's pinned reference count. */	R_LOCK(dbenv, dbmp->reginfo);	++dbmfp->pinref;	R_UNLOCK(dbenv, dbmp->reginfo);	/*	 * We want to switch threads as often as possible, and at awkward	 * times.  Yield every time we get a new page to ensure contention.	 */	if (F_ISSET(dbenv, DB_ENV_YIELDCPU))		__os_yield(dbenv, 1);#endif	*(void **)addrp = bhp->buf;	return (0);err:	/*	 * Discard our reference.  If we're the only reference, discard the	 * the buffer entirely.  If we held a reference to a buffer, we are	 * also still holding the hash bucket mutex.	 */	if (b_incr) {		if (bhp->ref == 1)			__memp_bhfree(dbmp, hp, bhp, BH_FREE_FREEMEM);		else {			--bhp->ref;			MUTEX_UNLOCK(dbenv, &hp->hash_mutex);		}	}	/* If alloc_bhp is set, free the memory. */	if (alloc_bhp != NULL) {		R_LOCK(dbenv, &dbmp->reginfo[n_cache]);		__db_shalloc_free(&dbmp->reginfo[n_cache], alloc_bhp);		c_mp->stat.st_pages--;		R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);	}	return (ret);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -