vacuum.c
来自「PostgreSQL7.4.6 for Linux」· C语言 代码 · 共 2,201 行 · 第 1/5 页
C
2,201 行
UnlockRelationForSession(&onerelid, lmode); return result;}/**************************************************************************** * * * Code for VACUUM FULL (only) * * * **************************************************************************** *//* * full_vacuum_rel() -- perform FULL VACUUM for one heap relation * * This routine vacuums a single heap, cleans out its indexes, and * updates its num_pages and num_tuples statistics. * * At entry, we have already established a transaction and opened * and locked the relation. */static voidfull_vacuum_rel(Relation onerel, VacuumStmt *vacstmt){ VacPageListData vacuum_pages; /* List of pages to vacuum and/or * clean indexes */ VacPageListData fraged_pages; /* List of pages with space enough * for re-using */ Relation *Irel; int nindexes, i; VRelStats *vacrelstats; vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared, &OldestXmin, &FreezeLimit); /* * Set up statistics-gathering machinery. */ vacrelstats = (VRelStats *) palloc(sizeof(VRelStats)); vacrelstats->rel_pages = 0; vacrelstats->rel_tuples = 0; vacrelstats->hasindex = false; /* scan the heap */ vacuum_pages.num_pages = fraged_pages.num_pages = 0; scan_heap(vacrelstats, onerel, &vacuum_pages, &fraged_pages); /* Now open all indexes of the relation */ vac_open_indexes(onerel, &nindexes, &Irel); if (nindexes > 0) vacrelstats->hasindex = true; /* Clean/scan index relation(s) */ if (Irel != (Relation *) NULL) { if (vacuum_pages.num_pages > 0) { for (i = 0; i < nindexes; i++) vacuum_index(&vacuum_pages, Irel[i], vacrelstats->rel_tuples, 0); } else { /* just scan indexes to update statistic */ for (i = 0; i < nindexes; i++) scan_index(Irel[i], vacrelstats->rel_tuples); } } if (fraged_pages.num_pages > 0) { /* Try to shrink heap */ repair_frag(vacrelstats, onerel, &vacuum_pages, &fraged_pages, nindexes, Irel); vac_close_indexes(nindexes, Irel); } else { vac_close_indexes(nindexes, Irel); if (vacuum_pages.num_pages > 0) { /* Clean pages from vacuum_pages list */ vacuum_heap(vacrelstats, onerel, &vacuum_pages); } else { /* * Flush dirty pages out to disk. We must do this even if we * didn't do anything else, because we want to ensure that all * tuples have correct on-row commit status on disk (see * bufmgr.c's comments for FlushRelationBuffers()). */ i = FlushRelationBuffers(onerel, vacrelstats->rel_pages); if (i < 0) elog(ERROR, "FlushRelationBuffers returned %d", i); } } /* update shared free space map with final free space info */ vac_update_fsm(onerel, &fraged_pages, vacrelstats->rel_pages); /* update statistics in pg_class */ vac_update_relstats(RelationGetRelid(onerel), vacrelstats->rel_pages, vacrelstats->rel_tuples, vacrelstats->hasindex);}/* * scan_heap() -- scan an open heap relation * * This routine sets commit status bits, constructs vacuum_pages (list * of pages we need to compact free space on and/or clean indexes of * deleted tuples), constructs fraged_pages (list of pages with free * space that tuples could be moved into), and calculates statistics * on the number of live tuples in the heap. */static voidscan_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages, VacPageList fraged_pages){ BlockNumber nblocks, blkno; ItemId itemid; Buffer buf; HeapTupleData tuple; OffsetNumber offnum, maxoff; bool pgchanged, tupgone, notup; char *relname; VacPage vacpage, vacpagecopy; BlockNumber empty_pages, empty_end_pages; double num_tuples, tups_vacuumed, nkeep, nunused; double free_space, usable_free_space; Size min_tlen = MaxTupleSize; Size max_tlen = 0; int i; bool do_shrinking = true; VTupleLink vtlinks = (VTupleLink) palloc(100 * sizeof(VTupleLinkData)); int num_vtlinks = 0; int free_vtlinks = 100; VacRUsage ru0; vac_init_rusage(&ru0); relname = RelationGetRelationName(onerel); ereport(elevel, (errmsg("vacuuming \"%s.%s\"", get_namespace_name(RelationGetNamespace(onerel)), relname))); empty_pages = empty_end_pages = 0; num_tuples = tups_vacuumed = nkeep = nunused = 0; free_space = 0; nblocks = RelationGetNumberOfBlocks(onerel); /* * We initially create each VacPage item in a maximal-sized workspace, * then copy the workspace into a just-large-enough copy. */ vacpage = (VacPage) palloc(sizeof(VacPageData) + MaxOffsetNumber * sizeof(OffsetNumber)); for (blkno = 0; blkno < nblocks; blkno++) { Page page, tempPage = NULL; bool do_reap, do_frag; CHECK_FOR_INTERRUPTS(); buf = ReadBuffer(onerel, blkno); page = BufferGetPage(buf); vacpage->blkno = blkno; vacpage->offsets_used = 0; vacpage->offsets_free = 0; if (PageIsNew(page)) { ereport(WARNING, (errmsg("relation \"%s\" page %u is uninitialized --- fixing", relname, blkno))); PageInit(page, BufferGetPageSize(buf), 0); vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower; free_space += vacpage->free; empty_pages++; empty_end_pages++; vacpagecopy = copy_vac_page(vacpage); vpage_insert(vacuum_pages, vacpagecopy); vpage_insert(fraged_pages, vacpagecopy); WriteBuffer(buf); continue; } if (PageIsEmpty(page)) { vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower; free_space += vacpage->free; empty_pages++; empty_end_pages++; vacpagecopy = copy_vac_page(vacpage); vpage_insert(vacuum_pages, vacpagecopy); vpage_insert(fraged_pages, vacpagecopy); ReleaseBuffer(buf); continue; } pgchanged = false; notup = true; maxoff = PageGetMaxOffsetNumber(page); for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { uint16 sv_infomask; itemid = PageGetItemId(page, offnum); /* * Collect un-used items too - it's possible to have indexes * pointing here after crash. */ if (!ItemIdIsUsed(itemid)) { vacpage->offsets[vacpage->offsets_free++] = offnum; nunused += 1; continue; } tuple.t_datamcxt = NULL; tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid); tuple.t_len = ItemIdGetLength(itemid); ItemPointerSet(&(tuple.t_self), blkno, offnum); tupgone = false; sv_infomask = tuple.t_data->t_infomask; switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin)) { case HEAPTUPLE_DEAD: tupgone = true; /* we can delete the tuple */ break; case HEAPTUPLE_LIVE: /* * Tuple is good. Consider whether to replace its * xmin value with FrozenTransactionId. */ if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) && TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data), FreezeLimit)) { HeapTupleHeaderSetXmin(tuple.t_data, FrozenTransactionId); /* infomask should be okay already */ Assert(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED); pgchanged = true; } break; case HEAPTUPLE_RECENTLY_DEAD: /* * If tuple is recently deleted then we must not * remove it from relation. */ nkeep += 1; /* * If we do shrinking and this tuple is updated one * then remember it to construct updated tuple * dependencies. */ if (do_shrinking && !(ItemPointerEquals(&(tuple.t_self), &(tuple.t_data->t_ctid)))) { if (free_vtlinks == 0) { free_vtlinks = 1000; vtlinks = (VTupleLink) repalloc(vtlinks, (free_vtlinks + num_vtlinks) * sizeof(VTupleLinkData)); } vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid; vtlinks[num_vtlinks].this_tid = tuple.t_self; free_vtlinks--; num_vtlinks++; } break; case HEAPTUPLE_INSERT_IN_PROGRESS: /* * This should not happen, since we hold exclusive * lock on the relation; shouldn't we raise an error? * (Actually, it can happen in system catalogs, since * we tend to release write lock before commit there.) */ ereport(NOTICE, (errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation", relname, blkno, offnum, HeapTupleHeaderGetXmin(tuple.t_data)))); do_shrinking = false; break; case HEAPTUPLE_DELETE_IN_PROGRESS: /* * This should not happen, since we hold exclusive * lock on the relation; shouldn't we raise an error? * (Actually, it can happen in system catalogs, since * we tend to release write lock before commit there.) */ ereport(NOTICE, (errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation", relname, blkno, offnum, HeapTupleHeaderGetXmax(tuple.t_data)))); do_shrinking = false; break; default: elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); break; } /* check for hint-bit update by HeapTupleSatisfiesVacuum */ if (sv_infomask != tuple.t_data->t_infomask) pgchanged = true; /* * Other checks... */ if (onerel->rd_rel->relhasoids && !OidIsValid(HeapTupleGetOid(&tuple))) elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid", relname, blkno, offnum); if (tupgone) { ItemId lpp; /* * Here we are building a temporary copy of the page with * dead tuples removed. Below we will apply * PageRepairFragmentation to the copy, so that we can * determine how much space will be available after * removal of dead tuples. But note we are NOT changing * the real page yet... */ if (tempPage == (Page) NULL) { Size pageSize; pageSize = PageGetPageSize(page); tempPage = (Page) palloc(pageSize); memcpy(tempPage, page, pageSize); } /* mark it unused on the temp page */ lpp = PageGetItemId(tempPage, offnum); lpp->lp_flags &= ~LP_USED; vacpage->offsets[vacpage->offsets_free++] = offnum; tups_vacuumed += 1; } else { num_tuples += 1; notup = false; if (tuple.t_len < min_tlen) min_tlen = tuple.t_len; if (tuple.t_len > max_tlen) max_tlen = tuple.t_len; } } /* scan along page */ if (tempPage != (Page) NULL) { /* Some tuples are removable; figure free space after removal */ PageRepairFragmentation(tempPage, NULL); vacpage->free = ((PageHeader) tempPage)->pd_upper - ((PageHeader) tempPage)->pd_lower; pfree(tempPage); do_reap = true; } else { /* Just use current available space */ vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower; /* Need to reap the page if it has ~LP_USED line pointers */ do_reap = (vacpage->offsets_free > 0); } free_space += vacpage->free; /* * Add the page to fraged_pages if it has a useful amount of free * space. "Useful" means enough for a minimal-sized tuple. But we * don't know that accurately near the start of the relation, so * add pages unconditionally if they have >= BLCKSZ/10 free space. */ do_frag = (vacpage->free >= min_tlen || vacpage->free >= BLCKSZ / 10); if (do_reap || do_frag) { vacpagecopy = copy_vac_page(vacpage); if (do_reap) vpage_insert(vacuum_pages, vacpagecopy); if (do_frag) vpage_insert(fraged_pages, vacpagecopy); } /* * Include the page in empty_end_pages if it will be empty after * vacuuming; this is to keep us from using it as a move * destination. */ if (notup) { empty_pages++; empty_end_pages++; } else empty_end_pages = 0; if (pgchanged) WriteBuffer(buf); else ReleaseBuffer(buf); } pfree(vacpage); /* save stats in the rel list for use later */ vacrelstats->rel_tuples = num_tuples; vacrelstats->rel_pages = nblocks;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?