⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vacuumlazy.c

📁 PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统
💻 C
📖 第 1 页 / 共 3 页
字号:
		if (pageSpaces[i].blkno < new_rel_pages)		{			pageSpaces[j] = pageSpaces[i];			j++;		}	}	vacrelstats->num_free_pages = j;	/* We destroyed the heap ordering, so mark array unordered */	vacrelstats->fs_is_heap = false;	/* update statistics */	vacrelstats->rel_pages = new_rel_pages;	vacrelstats->pages_removed = old_rel_pages - new_rel_pages;	/*	 * We keep the exclusive lock until commit (perhaps not necessary)?	 */	ereport(elevel,			(errmsg("\"%s\": truncated %u to %u pages",					RelationGetRelationName(onerel),					old_rel_pages, new_rel_pages),			 errdetail("%s.",					   pg_rusage_show(&ru0))));}/* * Rescan end pages to verify that they are (still) empty of needed tuples. * * Returns number of nondeletable pages (last nonempty page + 1). */static BlockNumbercount_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats){	BlockNumber blkno;	HeapTupleData tuple;	/* Strange coding of loop control is needed because blkno is unsigned */	blkno = vacrelstats->rel_pages;	while (blkno > vacrelstats->nonempty_pages)	{		Buffer		buf;		Page		page;		OffsetNumber offnum,					maxoff;		bool		tupgone,					hastup;		vacuum_delay_point();		blkno--;		buf = ReadBuffer(onerel, blkno);		/* In this phase we only need shared access to the buffer */		LockBuffer(buf, BUFFER_LOCK_SHARE);		page = BufferGetPage(buf);		if (PageIsNew(page) || PageIsEmpty(page))		{			/* PageIsNew probably shouldn't happen... */			LockBuffer(buf, BUFFER_LOCK_UNLOCK);			ReleaseBuffer(buf);			continue;		}		hastup = false;		maxoff = PageGetMaxOffsetNumber(page);		for (offnum = FirstOffsetNumber;			 offnum <= maxoff;			 offnum = OffsetNumberNext(offnum))		{			ItemId		itemid;			itemid = PageGetItemId(page, offnum);			if (!ItemIdIsUsed(itemid))				continue;			tuple.t_datamcxt = NULL;			tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);			tuple.t_len = ItemIdGetLength(itemid);			ItemPointerSet(&(tuple.t_self), blkno, offnum);			tupgone = false;			switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin, buf))			{				case HEAPTUPLE_DEAD:					tupgone = true;		/* we can delete the tuple */					break;				case HEAPTUPLE_LIVE:					/* Shouldn't be necessary to re-freeze anything */					break;				case HEAPTUPLE_RECENTLY_DEAD:					/*					 * If tuple is recently deleted then we must not remove it					 * from relation.					 */					break;				case HEAPTUPLE_INSERT_IN_PROGRESS:					/* This is an expected case during concurrent vacuum */					break;				case HEAPTUPLE_DELETE_IN_PROGRESS:					/* This is an expected case during concurrent vacuum */					break;				default:					elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");					break;			}			if (!tupgone)			{				hastup = true;				break;			/* can stop scanning */			}		}						/* scan along page */		LockBuffer(buf, BUFFER_LOCK_UNLOCK);		ReleaseBuffer(buf);		/* Done scanning if we found a tuple here */		if (hastup)			return blkno + 1;	}	/*	 * If we fall out of the loop, all the previously-thought-to-be-empty	 * pages really are; we need not bother to look at the last known-nonempty	 * page.	 */	return vacrelstats->nonempty_pages;}/* * lazy_space_alloc - space allocation decisions for lazy vacuum * * See the comments at the head of this file for rationale. */static voidlazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks){	long		maxtuples;	int			maxpages;	maxtuples = (maintenance_work_mem * 1024L) / sizeof(ItemPointerData);	maxtuples = Min(maxtuples, INT_MAX);	maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));	/* stay sane if small maintenance_work_mem */	maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);	vacrelstats->num_dead_tuples = 0;	vacrelstats->max_dead_tuples = (int) maxtuples;	vacrelstats->dead_tuples = (ItemPointer)		palloc(maxtuples * sizeof(ItemPointerData));	maxpages = MaxFSMPages;	maxpages = Min(maxpages, MaxAllocSize / sizeof(PageFreeSpaceInfo));	/* No need to allocate more pages than the relation has blocks */	if (relblocks < (BlockNumber) maxpages)		maxpages = (int) relblocks;	vacrelstats->fs_is_heap = false;	vacrelstats->num_free_pages = 0;	vacrelstats->max_free_pages = maxpages;	vacrelstats->free_pages = (PageFreeSpaceInfo *)		palloc(maxpages * sizeof(PageFreeSpaceInfo));}/* * lazy_record_dead_tuple - remember one deletable tuple */static voidlazy_record_dead_tuple(LVRelStats *vacrelstats,					   ItemPointer itemptr){	/*	 * The array shouldn't overflow under normal behavior, but perhaps it	 * could if we are given a really small maintenance_work_mem. In that	 * case, just forget the last few tuples.	 */	if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)	{		vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;		vacrelstats->num_dead_tuples++;	}}/* * lazy_record_free_space - remember free space on one page */static voidlazy_record_free_space(LVRelStats *vacrelstats,					   BlockNumber page,					   Size avail){	PageFreeSpaceInfo *pageSpaces;	int			n;	/*	 * A page with less than stats->threshold free space will be forgotten	 * immediately, and never passed to the free space map.  Removing the	 * uselessly small entries early saves cycles, and in particular reduces	 * the amount of time we spend holding the FSM lock when we finally call	 * RecordRelationFreeSpace.  Since the FSM will probably drop pages with	 * little free space anyway, there's no point in making this really small.	 *	 * XXX Is it worth trying to measure average tuple size, and using that to	 * adjust the threshold?  Would be worthwhile if FSM has no stats yet for	 * this relation.  But changing the threshold as we scan the rel might	 * lead to bizarre behavior, too.  Also, it's probably better if vacuum.c	 * has the same thresholding behavior as we do here.	 */	if (avail < vacrelstats->threshold)		return;	/* Copy pointers to local variables for notational simplicity */	pageSpaces = vacrelstats->free_pages;	n = vacrelstats->max_free_pages;	/* If we haven't filled the array yet, just keep adding entries */	if (vacrelstats->num_free_pages < n)	{		pageSpaces[vacrelstats->num_free_pages].blkno = page;		pageSpaces[vacrelstats->num_free_pages].avail = avail;		vacrelstats->num_free_pages++;		return;	}	/*----------	 * The rest of this routine works with "heap" organization of the	 * free space arrays, wherein we maintain the heap property	 *			avail[(j-1) div 2] <= avail[j]	for 0 < j < n.	 * In particular, the zero'th element always has the smallest available	 * space and can be discarded to make room for a new page with more space.	 * See Knuth's discussion of heap-based priority queues, sec 5.2.3;	 * but note he uses 1-origin array subscripts, not 0-origin.	 *----------	 */	/* If we haven't yet converted the array to heap organization, do it */	if (!vacrelstats->fs_is_heap)	{		/*		 * Scan backwards through the array, "sift-up" each value into its		 * correct position.  We can start the scan at n/2-1 since each entry		 * above that position has no children to worry about.		 */		int			l = n / 2;		while (--l >= 0)		{			BlockNumber R = pageSpaces[l].blkno;			Size		K = pageSpaces[l].avail;			int			i;		/* i is where the "hole" is */			i = l;			for (;;)			{				int			j = 2 * i + 1;				if (j >= n)					break;				if (j + 1 < n && pageSpaces[j].avail > pageSpaces[j + 1].avail)					j++;				if (K <= pageSpaces[j].avail)					break;				pageSpaces[i] = pageSpaces[j];				i = j;			}			pageSpaces[i].blkno = R;			pageSpaces[i].avail = K;		}		vacrelstats->fs_is_heap = true;	}	/* If new page has more than zero'th entry, insert it into heap */	if (avail > pageSpaces[0].avail)	{		/*		 * Notionally, we replace the zero'th entry with the new data, and		 * then sift-up to maintain the heap property.	Physically, the new		 * data doesn't get stored into the arrays until we find the right		 * location for it.		 */		int			i = 0;		/* i is where the "hole" is */		for (;;)		{			int			j = 2 * i + 1;			if (j >= n)				break;			if (j + 1 < n && pageSpaces[j].avail > pageSpaces[j + 1].avail)				j++;			if (avail <= pageSpaces[j].avail)				break;			pageSpaces[i] = pageSpaces[j];			i = j;		}		pageSpaces[i].blkno = page;		pageSpaces[i].avail = avail;	}}/* *	lazy_tid_reaped() -- is a particular tid deletable? * *		This has the right signature to be an IndexBulkDeleteCallback. * *		Assumes dead_tuples array is in sorted order. */static boollazy_tid_reaped(ItemPointer itemptr, void *state){	LVRelStats *vacrelstats = (LVRelStats *) state;	ItemPointer res;	res = (ItemPointer) bsearch((void *) itemptr,								(void *) vacrelstats->dead_tuples,								vacrelstats->num_dead_tuples,								sizeof(ItemPointerData),								vac_cmp_itemptr);	return (res != NULL);}/* * Dummy version for lazy_scan_index. */static booldummy_tid_reaped(ItemPointer itemptr, void *state){	return false;}/* * Update the shared Free Space Map with the info we now have about * free space in the relation, discarding any old info the map may have. */static voidlazy_update_fsm(Relation onerel, LVRelStats *vacrelstats){	PageFreeSpaceInfo *pageSpaces = vacrelstats->free_pages;	int			nPages = vacrelstats->num_free_pages;	/*	 * Sort data into order, as required by RecordRelationFreeSpace.	 */	if (nPages > 1)		qsort(pageSpaces, nPages, sizeof(PageFreeSpaceInfo),			  vac_cmp_page_spaces);	RecordRelationFreeSpace(&onerel->rd_node, nPages, pageSpaces);}/* * Comparator routines for use with qsort() and bsearch(). */static intvac_cmp_itemptr(const void *left, const void *right){	BlockNumber lblk,				rblk;	OffsetNumber loff,				roff;	lblk = ItemPointerGetBlockNumber((ItemPointer) left);	rblk = ItemPointerGetBlockNumber((ItemPointer) right);	if (lblk < rblk)		return -1;	if (lblk > rblk)		return 1;	loff = ItemPointerGetOffsetNumber((ItemPointer) left);	roff = ItemPointerGetOffsetNumber((ItemPointer) right);	if (loff < roff)		return -1;	if (loff > roff)		return 1;	return 0;}static intvac_cmp_page_spaces(const void *left, const void *right){	PageFreeSpaceInfo *linfo = (PageFreeSpaceInfo *) left;	PageFreeSpaceInfo *rinfo = (PageFreeSpaceInfo *) right;	if (linfo->blkno < rinfo->blkno)		return -1;	else if (linfo->blkno > rinfo->blkno)		return 1;	return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -