📄 vacuumlazy.c
字号:
lazy_record_free_space(vacrelstats, blkno, PageGetFreeSpace(page)); } /* Remember the location of the last page with nonremovable tuples */ if (hastup) vacrelstats->nonempty_pages = blkno + 1; LockBuffer(buf, BUFFER_LOCK_UNLOCK); if (pgchanged) SetBufferCommitInfoNeedsSave(buf); ReleaseBuffer(buf); } /* save stats for use later */ vacrelstats->rel_tuples = num_tuples; /* If any tuples need to be deleted, perform final vacuum cycle */ /* XXX put a threshold on min number of tuples here? */ if (vacrelstats->num_dead_tuples > 0) { /* Remove index entries */ for (i = 0; i < nindexes; i++) lazy_vacuum_index(Irel[i], vacrelstats); /* Remove tuples from heap */ lazy_vacuum_heap(onerel, vacrelstats); } else { /* Must do post-vacuum cleanup and statistics update anyway */ for (i = 0; i < nindexes; i++) lazy_scan_index(Irel[i], vacrelstats); } ereport(elevel, (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages", RelationGetRelationName(onerel), tups_vacuumed, num_tuples, nblocks), errdetail("%.0f dead row versions cannot be removed yet.\n" "There were %.0f unused item pointers.\n" "%u pages are entirely empty.\n" "%s", nkeep, nunused, empty_pages, vac_show_rusage(&ru0))));}/* * lazy_vacuum_heap() -- second pass over the heap * * This routine marks dead tuples as unused and compacts out free * space on their pages. Pages not having dead tuples recorded from * lazy_scan_heap are not visited at all. * * Note: the reason for doing this as a second pass is we cannot remove * the tuples until we've removed their index entries, and we want to * process index entry removal in batches as large as possible. */static voidlazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats){ int tupindex; int npages; VacRUsage ru0; vac_init_rusage(&ru0); npages = 0; tupindex = 0; while (tupindex < vacrelstats->num_dead_tuples) { BlockNumber tblk; Buffer buf; Page page; CHECK_FOR_INTERRUPTS(); tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]); buf = ReadBuffer(onerel, tblk); LockBufferForCleanup(buf); tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats); /* Now that we've compacted the page, record its available space */ page = BufferGetPage(buf); lazy_record_free_space(vacrelstats, tblk, PageGetFreeSpace(page)); LockBuffer(buf, BUFFER_LOCK_UNLOCK); WriteBuffer(buf); npages++; } ereport(elevel, (errmsg("\"%s\": removed %d row versions in %d pages", RelationGetRelationName(onerel), tupindex, npages), errdetail("%s", vac_show_rusage(&ru0))));}/* * lazy_vacuum_page() -- free dead tuples on a page * and repair its fragmentation. * * Caller is expected to handle reading, locking, and writing the buffer. * * tupindex is the index in vacrelstats->dead_tuples of the first dead * tuple for this page. We assume the rest follow sequentially. * The return value is the first tupindex after the tuples of this page. */static intlazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats){ OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)]; int uncnt; Page page = BufferGetPage(buffer); ItemId itemid; START_CRIT_SECTION(); for (; tupindex < vacrelstats->num_dead_tuples; tupindex++) { BlockNumber tblk; OffsetNumber toff; tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]); if (tblk != blkno) break; /* past end of tuples for this block */ toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]); itemid = PageGetItemId(page, toff); itemid->lp_flags &= ~LP_USED; } uncnt = PageRepairFragmentation(page, unused); /* XLOG stuff */ if (!onerel->rd_istemp) { XLogRecPtr recptr; recptr = log_heap_clean(onerel, buffer, unused, uncnt); PageSetLSN(page, recptr); PageSetSUI(page, ThisStartUpID); } else { /* No XLOG record, but still need to flag that XID exists on disk */ MyXactMadeTempRelUpdate = true; } END_CRIT_SECTION(); return tupindex;}/* * lazy_scan_index() -- scan one index relation to update pg_class statistic. * * We use this when we have no deletions to do. */static voidlazy_scan_index(Relation indrel, LVRelStats *vacrelstats){ IndexBulkDeleteResult *stats; IndexVacuumCleanupInfo vcinfo; VacRUsage ru0; vac_init_rusage(&ru0); /* * If index is unsafe for concurrent access, must lock it. */ if (!indrel->rd_am->amconcurrent) LockRelation(indrel, AccessExclusiveLock); /* * Even though we're not planning to delete anything, we use the * ambulkdelete call, because (a) the scan happens within the index AM * for more speed, and (b) it may want to pass private statistics to * the amvacuumcleanup call. */ stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL); /* Do post-VACUUM cleanup, even though we deleted nothing */ vcinfo.vacuum_full = false; vcinfo.message_level = elevel; stats = index_vacuum_cleanup(indrel, &vcinfo, stats); /* * Release lock acquired above. */ if (!indrel->rd_am->amconcurrent) UnlockRelation(indrel, AccessExclusiveLock); if (!stats) return; /* now update statistics in pg_class */ vac_update_relstats(RelationGetRelid(indrel), stats->num_pages, stats->num_index_tuples, false); ereport(elevel, (errmsg("index \"%s\" now contains %.0f row versions in %u pages", RelationGetRelationName(indrel), stats->num_index_tuples, stats->num_pages), errdetail("%u index pages have been deleted, %u are currently reusable.\n" "%s", stats->pages_deleted, stats->pages_free, vac_show_rusage(&ru0)))); pfree(stats);}/* * lazy_vacuum_index() -- vacuum one index relation. * * Delete all the index entries pointing to tuples listed in * vacrelstats->dead_tuples. * * Finally, we arrange to update the index relation's statistics in * pg_class. */static voidlazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats){ IndexBulkDeleteResult *stats; IndexVacuumCleanupInfo vcinfo; VacRUsage ru0; vac_init_rusage(&ru0); /* * If index is unsafe for concurrent access, must lock it. */ if (!indrel->rd_am->amconcurrent) LockRelation(indrel, AccessExclusiveLock); /* Do bulk deletion */ stats = index_bulk_delete(indrel, lazy_tid_reaped, (void *) vacrelstats); /* Do post-VACUUM cleanup */ vcinfo.vacuum_full = false; vcinfo.message_level = elevel; stats = index_vacuum_cleanup(indrel, &vcinfo, stats); /* * Release lock acquired above. */ if (!indrel->rd_am->amconcurrent) UnlockRelation(indrel, AccessExclusiveLock); if (!stats) return; /* now update statistics in pg_class */ vac_update_relstats(RelationGetRelid(indrel), stats->num_pages, stats->num_index_tuples, false); ereport(elevel, (errmsg("index \"%s\" now contains %.0f row versions in %u pages", RelationGetRelationName(indrel), stats->num_index_tuples, stats->num_pages), errdetail("%.0f index row versions were removed.\n" "%u index pages have been deleted, %u are currently reusable.\n" "%s", stats->tuples_removed, stats->pages_deleted, stats->pages_free, vac_show_rusage(&ru0)))); pfree(stats);}/* * lazy_truncate_heap - try to truncate off any empty pages at the end */static voidlazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats){ BlockNumber old_rel_pages = vacrelstats->rel_pages; BlockNumber new_rel_pages; PageFreeSpaceInfo *pageSpaces; int n; int i, j; VacRUsage ru0; vac_init_rusage(&ru0); /* * We need full exclusive lock on the relation in order to do * truncation. If we can't get it, give up rather than waiting --- we * don't want to block other backends, and we don't want to deadlock * (which is quite possible considering we already hold a lower-grade * lock). */ if (!ConditionalLockRelation(onerel, AccessExclusiveLock)) return; /* * Now that we have exclusive lock, look to see if the rel has grown * whilst we were vacuuming with non-exclusive lock. If so, give up; * the newly added pages presumably contain non-deletable tuples. */ new_rel_pages = RelationGetNumberOfBlocks(onerel); if (new_rel_pages != old_rel_pages) { /* might as well use the latest news when we update pg_class stats */ vacrelstats->rel_pages = new_rel_pages; UnlockRelation(onerel, AccessExclusiveLock); return; } /* * Scan backwards from the end to verify that the end pages actually * contain nothing we need to keep. This is *necessary*, not * optional, because other backends could have added tuples to these * pages whilst we were vacuuming. */ new_rel_pages = count_nondeletable_pages(onerel, vacrelstats); if (new_rel_pages >= old_rel_pages) { /* can't do anything after all */ UnlockRelation(onerel, AccessExclusiveLock); return; } /* * Okay to truncate. * * First, flush any shared buffers for the blocks we intend to delete. * FlushRelationBuffers is a bit more than we need for this, since it * will also write out dirty buffers for blocks we aren't deleting, * but it's the closest thing in bufmgr's API. */ i = FlushRelationBuffers(onerel, new_rel_pages); if (i < 0) elog(ERROR, "FlushRelationBuffers returned %d", i); /* * Do the physical truncation. */ new_rel_pages = smgrtruncate(DEFAULT_SMGR, onerel, new_rel_pages); onerel->rd_nblocks = new_rel_pages; /* update relcache immediately */ onerel->rd_targblock = InvalidBlockNumber; vacrelstats->rel_pages = new_rel_pages; /* save new number of * blocks */ /* * Drop free-space info for removed blocks; these must not get entered * into the FSM! */ pageSpaces = vacrelstats->free_pages; n = vacrelstats->num_free_pages; j = 0; for (i = 0; i < n; i++) { if (pageSpaces[i].blkno < new_rel_pages) { pageSpaces[j] = pageSpaces[i]; j++; } } vacrelstats->num_free_pages = j; /* We destroyed the heap ordering, so mark array unordered */ vacrelstats->fs_is_heap = false; /* * We keep the exclusive lock until commit (perhaps not necessary)? */ ereport(elevel, (errmsg("\"%s\": truncated %u to %u pages", RelationGetRelationName(onerel), old_rel_pages, new_rel_pages), errdetail("%s", vac_show_rusage(&ru0))));}/* * Rescan end pages to verify that they are (still) empty of needed tuples. *
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -