⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rtree.c

📁 PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统
💻 C
📖 第 1 页 / 共 3 页
字号:
			if ((i == seed_1) || (i == seed_2))				continue;			else if (i == newitemoff)				item_1 = itup;			else				item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));			datum_alpha = IndexTupleGetDatum(item_1);			union_dl = FunctionCall2(&rtstate->unionFn, datum_l, datum_alpha);			union_dr = FunctionCall2(&rtstate->unionFn, datum_r, datum_alpha);			FunctionCall2(&rtstate->sizeFn, union_dl,						  PointerGetDatum(&size_alpha));			FunctionCall2(&rtstate->sizeFn, union_dr,						  PointerGetDatum(&size_beta));			pfree(DatumGetPointer(union_dl));			pfree(DatumGetPointer(union_dr));			diff = (size_alpha - size_l) - (size_beta - size_r);			cost_vector[n].offset_number = i;			cost_vector[n].cost_differential = fabs(diff);			cost_vector[n].choose_left = (diff < 0);			n++;		}		/*		 * Sort the array.	The function qsort_comp_splitcost is set up		 * "backwards", to provided descending order.		 */		qsort(cost_vector, num_tuples_without_seeds, sizeof(SPLITCOST),			  &qsort_comp_splitcost);	}	/*	 * Now make the final decisions about where each tuple will go, and build	 * the vectors to return in the SPLITVEC record.	 *	 * The cost_vector array contains (descriptions of) all the tuples, in the	 * order that we want to consider them, so we we just iterate through it	 * and place each tuple in left or right nodes, according to the criteria	 * described below.	 */	left = v->spl_left;	v->spl_nleft = 0;	right = v->spl_right;	v->spl_nright = 0;	/*	 * Place the seeds first. left avail space, left union, right avail space,	 * and right union have already been adjusted for the seeds.	 */	*left++ = seed_1;	v->spl_nleft++;	*right++ = seed_2;	v->spl_nright++;	for (n = 0; n < num_tuples_without_seeds; n++)	{		bool		left_feasible,					right_feasible,					choose_left;		/*		 * We need to figure out which page needs the least enlargement in		 * order to store the item.		 */		i = cost_vector[n].offset_number;		/* Compute new union datums and sizes for both possible additions */		if (i == newitemoff)		{			item_1 = itup;			/* Needn't leave room for new item anymore */			newitemsz = 0;		}		else			item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));		item_1_sz = IndexTupleTotalSize(item_1);		datum_alpha = IndexTupleGetDatum(item_1);		union_dl = FunctionCall2(&rtstate->unionFn, datum_l, datum_alpha);		union_dr = FunctionCall2(&rtstate->unionFn, datum_r, datum_alpha);		FunctionCall2(&rtstate->sizeFn, union_dl,					  PointerGetDatum(&size_alpha));		FunctionCall2(&rtstate->sizeFn, union_dr,					  PointerGetDatum(&size_beta));		/*		 * We prefer the page that shows smaller enlargement of its union area		 * (Guttman's algorithm), but we must take care that at least one page		 * will still have room for the new item after this one is added.		 *		 * (We know that all the old items together can fit on one page, so we		 * need not worry about any other problem than failing to fit the new		 * item.)		 *		 * Guttman's algorithm actually has two factors to consider (in		 * order): 1. if one node has so many tuples already assigned to it		 * that the other needs all the rest in order to satisfy the condition		 * that neither node has fewer than m tuples, then that is decisive;		 * 2. otherwise, choose the page that shows the smaller enlargement of		 * its union area.		 *		 * I have chosen m = M/2, where M is the maximum number of tuples on a		 * page.  (Actually, this is only strictly true for fixed size tuples.		 * For variable size tuples, there still might have to be only one		 * tuple on a page, if it is really big.  But even with variable size		 * tuples we still try to get m as close as possible to M/2.)		 *		 * The question of which page shows the smaller enlargement of its		 * union area has already been answered, and the answer stored in the		 * choose_left field of the SPLITCOST record.		 */		left_feasible = (left_avail_space >= item_1_sz &&						 ((left_avail_space - item_1_sz) >= newitemsz ||						  right_avail_space >= newitemsz));		right_feasible = (right_avail_space >= item_1_sz &&						  ((right_avail_space - item_1_sz) >= newitemsz ||						   left_avail_space >= newitemsz));		if (left_feasible && right_feasible)		{			/*			 * Both feasible, use Guttman's algorithm. First check the m			 * condition described above, and if that doesn't apply, choose			 * the page with the smaller enlargement of its union area.			 */			if (v->spl_nleft > max_after_split)				choose_left = false;			else if (v->spl_nright > max_after_split)				choose_left = true;			else				choose_left = cost_vector[n].choose_left;		}		else if (left_feasible)			choose_left = true;		else if (right_feasible)			choose_left = false;		else		{			elog(ERROR, "failed to find a workable rtree page split");			choose_left = false;	/* keep compiler quiet */		}		if (choose_left)		{			pfree(DatumGetPointer(datum_l));			pfree(DatumGetPointer(union_dr));			datum_l = union_dl;			size_l = size_alpha;			left_avail_space -= item_1_sz;			*left++ = i;			v->spl_nleft++;		}		else		{			pfree(DatumGetPointer(datum_r));			pfree(DatumGetPointer(union_dl));			datum_r = union_dr;			size_r = size_beta;			right_avail_space -= item_1_sz;			*right++ = i;			v->spl_nright++;		}	}	if (num_tuples_without_seeds > 0)		pfree(cost_vector);	*left = *right = InvalidOffsetNumber;		/* add ending sentinels */	v->spl_ldatum = datum_l;	v->spl_rdatum = datum_r;}static voidRTInitBuffer(Buffer b, uint32 f){	RTreePageOpaque opaque;	Page		page;	Size		pageSize;	pageSize = BufferGetPageSize(b);	page = BufferGetPage(b);	PageInit(page, pageSize, sizeof(RTreePageOpaqueData));	opaque = (RTreePageOpaque) PageGetSpecialPointer(page);	opaque->flags = f;}static OffsetNumberchoose(Relation r, Page p, IndexTuple it, RTSTATE *rtstate){	OffsetNumber maxoff;	OffsetNumber i;	Datum		ud,				id;	Datum		datum;	float		usize,				dsize;	OffsetNumber which;	float		which_grow;	id = IndexTupleGetDatum(it);	maxoff = PageGetMaxOffsetNumber(p);	which_grow = -1.0;	which = -1;	for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))	{		datum = IndexTupleGetDatum(PageGetItem(p, PageGetItemId(p, i)));		FunctionCall2(&rtstate->sizeFn, datum,					  PointerGetDatum(&dsize));		ud = FunctionCall2(&rtstate->unionFn, datum, id);		FunctionCall2(&rtstate->sizeFn, ud,					  PointerGetDatum(&usize));		pfree(DatumGetPointer(ud));		if (which_grow < 0 || usize - dsize < which_grow)		{			which = i;			which_grow = usize - dsize;			if (which_grow == 0)				break;		}	}	return which;}static intnospace(Page p, IndexTuple it){	return PageGetFreeSpace(p) < IndexTupleSize(it);}voidfreestack(RTSTACK *s){	RTSTACK    *p;	while (s != NULL)	{		p = s->rts_parent;		pfree(s);		s = p;	}}/* * Bulk deletion of all index entries pointing to a set of heap tuples. * The set of target tuples is specified via a callback routine that tells * whether any given heap tuple (identified by ItemPointer) is being deleted. * * Result: a palloc'd struct containing statistical info for VACUUM displays. */Datumrtbulkdelete(PG_FUNCTION_ARGS){	Relation	rel = (Relation) PG_GETARG_POINTER(0);	IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);	void	   *callback_state = (void *) PG_GETARG_POINTER(2);	IndexBulkDeleteResult *result;	BlockNumber num_pages;	double		tuples_removed;	double		num_index_tuples;	IndexScanDesc iscan;	tuples_removed = 0;	num_index_tuples = 0;	/*	 * Since rtree is not marked "amconcurrent" in pg_am, caller should have	 * acquired exclusive lock on index relation.  We need no locking here.	 */	/*	 * XXX generic implementation --- should be improved!	 */	/* walk through the entire index */	iscan = index_beginscan(NULL, rel, SnapshotAny, 0, NULL);	/* including killed tuples */	iscan->ignore_killed_tuples = false;	while (index_getnext_indexitem(iscan, ForwardScanDirection))	{		vacuum_delay_point();		if (callback(&iscan->xs_ctup.t_self, callback_state))		{			ItemPointerData indextup = iscan->currentItemData;			BlockNumber blkno;			OffsetNumber offnum;			Buffer		buf;			Page		page;			blkno = ItemPointerGetBlockNumber(&indextup);			offnum = ItemPointerGetOffsetNumber(&indextup);			/* adjust any scans that will be affected by this deletion */			/* (namely, my own scan) */			rtadjscans(rel, RTOP_DEL, blkno, offnum);			/* delete the index tuple */			buf = ReadBuffer(rel, blkno);			page = BufferGetPage(buf);			PageIndexTupleDelete(page, offnum);			WriteBuffer(buf);			tuples_removed += 1;		}		else			num_index_tuples += 1;	}	index_endscan(iscan);	/* return statistics */	num_pages = RelationGetNumberOfBlocks(rel);	result = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));	result->num_pages = num_pages;	result->num_index_tuples = num_index_tuples;	result->tuples_removed = tuples_removed;	PG_RETURN_POINTER(result);}static voidinitRtstate(RTSTATE *rtstate, Relation index){	fmgr_info_copy(&rtstate->unionFn,				   index_getprocinfo(index, 1, RT_UNION_PROC),				   CurrentMemoryContext);	fmgr_info_copy(&rtstate->sizeFn,				   index_getprocinfo(index, 1, RT_SIZE_PROC),				   CurrentMemoryContext);	fmgr_info_copy(&rtstate->interFn,				   index_getprocinfo(index, 1, RT_INTER_PROC),				   CurrentMemoryContext);}/* for sorting SPLITCOST records in descending order */static intqsort_comp_splitcost(const void *a, const void *b){	float		diff =	((SPLITCOST *) a)->cost_differential -	((SPLITCOST *) b)->cost_differential;	if (diff < 0)		return 1;	else if (diff > 0)		return -1;	else		return 0;}#ifdef RTDEBUGvoid_rtdump(Relation r){	Buffer		buf;	Page		page;	OffsetNumber offnum,				maxoff;	BlockNumber blkno;	BlockNumber nblocks;	RTreePageOpaque po;	IndexTuple	itup;	BlockNumber itblkno;	OffsetNumber itoffno;	Datum		datum;	char	   *itkey;	nblocks = RelationGetNumberOfBlocks(r);	for (blkno = 0; blkno < nblocks; blkno++)	{		buf = ReadBuffer(r, blkno);		page = BufferGetPage(buf);		po = (RTreePageOpaque) PageGetSpecialPointer(page);		maxoff = PageGetMaxOffsetNumber(page);		printf("Page %d maxoff %d <%s>\n", blkno, maxoff,			   (po->flags & F_LEAF ? "LEAF" : "INTERNAL"));		if (PageIsEmpty(page))		{			ReleaseBuffer(buf);			continue;		}		for (offnum = FirstOffsetNumber;			 offnum <= maxoff;			 offnum = OffsetNumberNext(offnum))		{			itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));			itblkno = ItemPointerGetBlockNumber(&(itup->t_tid));			itoffno = ItemPointerGetOffsetNumber(&(itup->t_tid));			datum = IndexTupleGetDatum(itup);			itkey = DatumGetCString(DirectFunctionCall1(box_out,														datum));			printf("\t[%d] size %d heap <%d,%d> key:%s\n",				   offnum, IndexTupleSize(itup), itblkno, itoffno, itkey);			pfree(itkey);		}		ReleaseBuffer(buf);	}}#endif   /* defined RTDEBUG */voidrtree_redo(XLogRecPtr lsn, XLogRecord *record){	elog(PANIC, "rtree_redo: unimplemented");}voidrtree_desc(char *buf, uint8 xl_info, char *rec){}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -