rtree.c
来自「PostgreSQL7.4.6 for Linux」· C语言 代码 · 共 1,365 行 · 第 1/3 页
C
1,365 行
/*------------------------------------------------------------------------- * * rtree.c * interface routines for the postgres rtree indexed access method. * * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * $Header: /cvsroot/pgsql/src/backend/access/rtree/rtree.c,v 1.80 2003/09/25 06:57:57 petere Exp $ * *------------------------------------------------------------------------- */#include "postgres.h"#include "access/genam.h"#include "access/heapam.h"#include "access/rtree.h"#include "access/xlogutils.h"#include "catalog/index.h"#include "executor/executor.h"#include "miscadmin.h"/* * XXX We assume that all datatypes indexable in rtrees are pass-by-reference. * To fix this, you'd need to improve the IndexTupleGetDatum() macro, and * do something with the various datum-pfreeing code. However, it's not that * unreasonable an assumption in practice. */#define IndexTupleGetDatum(itup) \ PointerGetDatum(((char *) (itup)) + sizeof(IndexTupleData))/* * Space-allocation macros. Note we count the item's line pointer in its size. */#define RTPageAvailSpace \ (BLCKSZ - (sizeof(PageHeaderData) - sizeof(ItemIdData)) \ - MAXALIGN(sizeof(RTreePageOpaqueData)))#define IndexTupleTotalSize(itup) \ (MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData))#define IndexTupleAttSize(itup) \ (IndexTupleSize(itup) - sizeof(IndexTupleData))/* results of rtpicksplit() */typedef struct SPLITVEC{ OffsetNumber *spl_left; int spl_nleft; Datum spl_ldatum; OffsetNumber *spl_right; int spl_nright; Datum spl_rdatum;} SPLITVEC;/* for sorting tuples by cost, for picking split */typedef struct SPLITCOST{ OffsetNumber offset_number; float cost_differential; bool choose_left;} SPLITCOST;typedef struct RTSTATE{ FmgrInfo unionFn; /* union function */ FmgrInfo sizeFn; /* size function */ FmgrInfo interFn; /* intersection function */} RTSTATE;/* Working state for rtbuild and its callback */typedef struct{ RTSTATE rtState; double indtuples;} RTBuildState;/* non-export function prototypes */static void rtbuildCallback(Relation index, HeapTuple htup, Datum *attdata, char *nulls, bool tupleIsAlive, void *state);static InsertIndexResult rtdoinsert(Relation r, IndexTuple itup, RTSTATE *rtstate);static void rttighten(Relation r, RTSTACK *stk, Datum datum, int att_size, RTSTATE *rtstate);static InsertIndexResult rtdosplit(Relation r, Buffer buffer, RTSTACK *stack, IndexTuple itup, RTSTATE *rtstate);static void rtintinsert(Relation r, RTSTACK *stk, IndexTuple ltup, IndexTuple rtup, RTSTATE *rtstate);static void rtnewroot(Relation r, IndexTuple lt, IndexTuple rt);static void rtpicksplit(Relation r, Page page, SPLITVEC *v, IndexTuple itup, RTSTATE *rtstate);static void RTInitBuffer(Buffer b, uint32 f);static OffsetNumber choose(Relation r, Page p, IndexTuple it, RTSTATE *rtstate);static int nospace(Page p, IndexTuple it);static void initRtstate(RTSTATE *rtstate, Relation index);static int qsort_comp_splitcost(const void *a, const void *b);/* * routine to build an index. Basically calls insert over and over */Datumrtbuild(PG_FUNCTION_ARGS){ Relation heap = (Relation) PG_GETARG_POINTER(0); Relation index = (Relation) PG_GETARG_POINTER(1); IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2); double reltuples; RTBuildState buildstate; Buffer buffer; /* no locking is needed */ initRtstate(&buildstate.rtState, index); /* * We expect to be called exactly once for any index relation. If * that's not the case, big trouble's what we have. */ if (RelationGetNumberOfBlocks(index) != 0) elog(ERROR, "index \"%s\" already contains data", RelationGetRelationName(index)); /* initialize the root page */ buffer = ReadBuffer(index, P_NEW); RTInitBuffer(buffer, F_LEAF); WriteBuffer(buffer); /* build the index */ buildstate.indtuples = 0; /* do the heap scan */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, rtbuildCallback, (void *) &buildstate); /* okay, all heap tuples are indexed */ /* * Since we just counted the tuples in the heap, we update its stats * in pg_class to guarantee that the planner takes advantage of the * index we just created. But, only update statistics during normal * index definitions, not for indices on system catalogs created * during bootstrap processing. We must close the relations before * updating statistics to guarantee that the relcache entries are * flushed when we increment the command counter in UpdateStats(). But * we do not release any locks on the relations; those will be held * until end of transaction. */ if (IsNormalProcessingMode()) { Oid hrelid = RelationGetRelid(heap); Oid irelid = RelationGetRelid(index); heap_close(heap, NoLock); index_close(index); UpdateStats(hrelid, reltuples); UpdateStats(irelid, buildstate.indtuples); } PG_RETURN_VOID();}/* * Per-tuple callback from IndexBuildHeapScan */static voidrtbuildCallback(Relation index, HeapTuple htup, Datum *attdata, char *nulls, bool tupleIsAlive, void *state){ RTBuildState *buildstate = (RTBuildState *) state; IndexTuple itup; InsertIndexResult res; /* form an index tuple and point it at the heap tuple */ itup = index_formtuple(RelationGetDescr(index), attdata, nulls); itup->t_tid = htup->t_self; /* rtree indexes don't index nulls, see notes in rtinsert */ if (IndexTupleHasNulls(itup)) { pfree(itup); return; } /* * Since we already have the index relation locked, we call rtdoinsert * directly. Normal access method calls dispatch through rtinsert, * which locks the relation for write. This is the right thing to do * if you're inserting single tups, but not when you're initializing * the whole index at once. */ res = rtdoinsert(index, itup, &buildstate->rtState); if (res) pfree(res); buildstate->indtuples += 1; pfree(itup);}/* * rtinsert -- wrapper for rtree tuple insertion. * * This is the public interface routine for tuple insertion in rtrees. * It doesn't do any work; just locks the relation and passes the buck. */Datumrtinsert(PG_FUNCTION_ARGS){ Relation r = (Relation) PG_GETARG_POINTER(0); Datum *datum = (Datum *) PG_GETARG_POINTER(1); char *nulls = (char *) PG_GETARG_POINTER(2); ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);#ifdef NOT_USED Relation heapRel = (Relation) PG_GETARG_POINTER(4); bool checkUnique = PG_GETARG_BOOL(5);#endif InsertIndexResult res; IndexTuple itup; RTSTATE rtState; /* generate an index tuple */ itup = index_formtuple(RelationGetDescr(r), datum, nulls); itup->t_tid = *ht_ctid; /* * Currently, rtrees do not support indexing NULLs; considerable * infrastructure work would have to be done to do anything reasonable * with a NULL. */ if (IndexTupleHasNulls(itup)) { pfree(itup); PG_RETURN_POINTER((InsertIndexResult) NULL); } initRtstate(&rtState, r); /* * Since rtree is not marked "amconcurrent" in pg_am, caller should * have acquired exclusive lock on index relation. We need no locking * here. */ res = rtdoinsert(r, itup, &rtState); PG_RETURN_POINTER(res);}static InsertIndexResultrtdoinsert(Relation r, IndexTuple itup, RTSTATE *rtstate){ Page page; Buffer buffer; BlockNumber blk; IndexTuple which; OffsetNumber l; RTSTACK *stack; InsertIndexResult res; RTreePageOpaque opaque; Datum datum; blk = P_ROOT; buffer = InvalidBuffer; stack = (RTSTACK *) NULL; do { /* let go of current buffer before getting next */ if (buffer != InvalidBuffer) ReleaseBuffer(buffer); /* get next buffer */ buffer = ReadBuffer(r, blk); page = (Page) BufferGetPage(buffer); opaque = (RTreePageOpaque) PageGetSpecialPointer(page); if (!(opaque->flags & F_LEAF)) { RTSTACK *n; ItemId iid; n = (RTSTACK *) palloc(sizeof(RTSTACK)); n->rts_parent = stack; n->rts_blk = blk; n->rts_child = choose(r, page, itup, rtstate); stack = n; iid = PageGetItemId(page, n->rts_child); which = (IndexTuple) PageGetItem(page, iid); blk = ItemPointerGetBlockNumber(&(which->t_tid)); } } while (!(opaque->flags & F_LEAF)); if (nospace(page, itup)) { /* need to do a split */ res = rtdosplit(r, buffer, stack, itup, rtstate); freestack(stack); WriteBuffer(buffer); /* don't forget to release buffer! */ return res; } /* add the item and write the buffer */ if (PageIsEmpty(page)) { l = PageAddItem(page, (Item) itup, IndexTupleSize(itup), FirstOffsetNumber, LP_USED); } else { l = PageAddItem(page, (Item) itup, IndexTupleSize(itup), OffsetNumberNext(PageGetMaxOffsetNumber(page)), LP_USED); } if (l == InvalidOffsetNumber) elog(ERROR, "failed to add index item to \"%s\"", RelationGetRelationName(r)); WriteBuffer(buffer); datum = IndexTupleGetDatum(itup); /* now expand the page boundary in the parent to include the new child */ rttighten(r, stack, datum, IndexTupleAttSize(itup), rtstate); freestack(stack); /* build and return an InsertIndexResult for this insertion */ res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData)); ItemPointerSet(&(res->pointerData), blk, l); return res;}static voidrttighten(Relation r, RTSTACK *stk, Datum datum, int att_size, RTSTATE *rtstate){ Datum oldud; Datum tdatum; Page p; float old_size, newd_size; Buffer b; if (stk == (RTSTACK *) NULL) return; b = ReadBuffer(r, stk->rts_blk); p = BufferGetPage(b); oldud = IndexTupleGetDatum(PageGetItem(p, PageGetItemId(p, stk->rts_child))); FunctionCall2(&rtstate->sizeFn, oldud, PointerGetDatum(&old_size)); datum = FunctionCall2(&rtstate->unionFn, oldud, datum); FunctionCall2(&rtstate->sizeFn, datum, PointerGetDatum(&newd_size)); /* * If newd_size == 0 we have degenerate rectangles, so we don't know * if there was any change, so we have to assume there was. */ if ((newd_size == 0) || (newd_size != old_size)) { TupleDesc td = RelationGetDescr(r); if (td->attrs[0]->attlen < 0) { /* * This is an internal page, so 'oldud' had better be a union * (constant-length) key, too. (See comment below.) */ Assert(VARSIZE(DatumGetPointer(datum)) == VARSIZE(DatumGetPointer(oldud))); memmove(DatumGetPointer(oldud), DatumGetPointer(datum), VARSIZE(DatumGetPointer(datum))); } else { memmove(DatumGetPointer(oldud), DatumGetPointer(datum), att_size); } WriteBuffer(b); /* * The user may be defining an index on variable-sized data (like * polygons). If so, we need to get a constant-sized datum for * insertion on the internal page. We do this by calling the * union proc, which is required to return a rectangle. */ tdatum = FunctionCall2(&rtstate->unionFn, datum, datum); rttighten(r, stk->rts_parent, tdatum, att_size, rtstate); pfree(DatumGetPointer(tdatum)); } else ReleaseBuffer(b); pfree(DatumGetPointer(datum));}/* * rtdosplit -- split a page in the tree. * * rtpicksplit does the interesting work of choosing the split. * This routine just does the bit-pushing. */static InsertIndexResultrtdosplit(Relation r, Buffer buffer, RTSTACK *stack, IndexTuple itup, RTSTATE *rtstate){ Page p; Buffer leftbuf, rightbuf; Page left, right; ItemId itemid; IndexTuple item; IndexTuple ltup, rtup; OffsetNumber maxoff; OffsetNumber i; OffsetNumber leftoff, rightoff; BlockNumber lbknum, rbknum; BlockNumber bufblock; RTreePageOpaque opaque; int blank; InsertIndexResult res; char *isnull; SPLITVEC v;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?