📄 vacuumlazy.c
字号:
/* This is an expected case during concurrent vacuum */ break; case HEAPTUPLE_DELETE_IN_PROGRESS: /* This is an expected case during concurrent vacuum */ break; default: elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); break; } if (tupgone) { lazy_record_dead_tuple(vacrelstats, &(tuple.t_self)); tups_vacuumed += 1; } else { num_tuples += 1; hastup = true; } } /* scan along page */ /* * If we remembered any tuples for deletion, then the page will be * visited again by lazy_vacuum_heap, which will compute and record * its post-compaction free space. If not, then we're done with this * page, so remember its free space as-is. */ if (vacrelstats->num_dead_tuples == prev_dead_count) { lazy_record_free_space(vacrelstats, blkno, PageGetFreeSpace(page)); } /* Remember the location of the last page with nonremovable tuples */ if (hastup) vacrelstats->nonempty_pages = blkno + 1; LockBuffer(buf, BUFFER_LOCK_UNLOCK); if (pgchanged) WriteBuffer(buf); else ReleaseBuffer(buf); } /* save stats for use later */ vacrelstats->rel_tuples = num_tuples; vacrelstats->tuples_deleted = tups_vacuumed; /* If any tuples need to be deleted, perform final vacuum cycle */ /* XXX put a threshold on min number of tuples here? */ if (vacrelstats->num_dead_tuples > 0) { /* Remove index entries */ for (i = 0; i < nindexes; i++) lazy_vacuum_index(Irel[i], &index_tups_vacuumed[i], &index_pages_removed[i], vacrelstats); /* Remove tuples from heap */ lazy_vacuum_heap(onerel, vacrelstats); } else if (!did_vacuum_index) { /* Must do post-vacuum cleanup and statistics update anyway */ for (i = 0; i < nindexes; i++) lazy_scan_index(Irel[i], vacrelstats); } ereport(elevel, (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages", RelationGetRelationName(onerel), tups_vacuumed, num_tuples, nblocks), errdetail("%.0f dead row versions cannot be removed yet.\n" "There were %.0f unused item pointers.\n" "%u pages are entirely empty.\n" "%s.", nkeep, nunused, empty_pages, pg_rusage_show(&ru0))));}/* * lazy_vacuum_heap() -- second pass over the heap * * This routine marks dead tuples as unused and compacts out free * space on their pages. Pages not having dead tuples recorded from * lazy_scan_heap are not visited at all. * * Note: the reason for doing this as a second pass is we cannot remove * the tuples until we've removed their index entries, and we want to * process index entry removal in batches as large as possible. */static voidlazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats){ int tupindex; int npages; PGRUsage ru0; pg_rusage_init(&ru0); npages = 0; tupindex = 0; while (tupindex < vacrelstats->num_dead_tuples) { BlockNumber tblk; Buffer buf; Page page; vacuum_delay_point(); tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]); buf = ReadBuffer(onerel, tblk); LockBufferForCleanup(buf); tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats); /* Now that we've compacted the page, record its available space */ page = BufferGetPage(buf); lazy_record_free_space(vacrelstats, tblk, PageGetFreeSpace(page)); LockBuffer(buf, BUFFER_LOCK_UNLOCK); WriteBuffer(buf); npages++; } ereport(elevel, (errmsg("\"%s\": removed %d row versions in %d pages", RelationGetRelationName(onerel), tupindex, npages), errdetail("%s.", pg_rusage_show(&ru0))));}/* * lazy_vacuum_page() -- free dead tuples on a page * and repair its fragmentation. * * Caller is expected to handle reading, locking, and writing the buffer. * * tupindex is the index in vacrelstats->dead_tuples of the first dead * tuple for this page. We assume the rest follow sequentially. * The return value is the first tupindex after the tuples of this page. */static intlazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats){ OffsetNumber unused[MaxOffsetNumber]; int uncnt; Page page = BufferGetPage(buffer); ItemId itemid; START_CRIT_SECTION(); for (; tupindex < vacrelstats->num_dead_tuples; tupindex++) { BlockNumber tblk; OffsetNumber toff; tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]); if (tblk != blkno) break; /* past end of tuples for this block */ toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]); itemid = PageGetItemId(page, toff); itemid->lp_flags &= ~LP_USED; } uncnt = PageRepairFragmentation(page, unused); /* XLOG stuff */ if (!onerel->rd_istemp) { XLogRecPtr recptr; recptr = log_heap_clean(onerel, buffer, unused, uncnt); PageSetLSN(page, recptr); PageSetTLI(page, ThisTimeLineID); } else { /* No XLOG record, but still need to flag that XID exists on disk */ MyXactMadeTempRelUpdate = true; } END_CRIT_SECTION(); return tupindex;}/* * lazy_scan_index() -- scan one index relation to update pg_class statistic. * * We use this when we have no deletions to do. */static voidlazy_scan_index(Relation indrel, LVRelStats *vacrelstats){ IndexBulkDeleteResult *stats; IndexVacuumCleanupInfo vcinfo; PGRUsage ru0; pg_rusage_init(&ru0); /* * Acquire appropriate type of lock on index: must be exclusive if index * AM isn't concurrent-safe. */ if (indrel->rd_am->amconcurrent) LockRelation(indrel, RowExclusiveLock); else LockRelation(indrel, AccessExclusiveLock); /* * Even though we're not planning to delete anything, we use the * ambulkdelete call, because (a) the scan happens within the index AM for * more speed, and (b) it may want to pass private statistics to the * amvacuumcleanup call. */ stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL); /* Do post-VACUUM cleanup, even though we deleted nothing */ vcinfo.vacuum_full = false; vcinfo.message_level = elevel; stats = index_vacuum_cleanup(indrel, &vcinfo, stats); /* * Release lock acquired above. */ if (indrel->rd_am->amconcurrent) UnlockRelation(indrel, RowExclusiveLock); else UnlockRelation(indrel, AccessExclusiveLock); if (!stats) return; /* now update statistics in pg_class */ vac_update_relstats(RelationGetRelid(indrel), stats->num_pages, stats->num_index_tuples, false); ereport(elevel, (errmsg("index \"%s\" now contains %.0f row versions in %u pages", RelationGetRelationName(indrel), stats->num_index_tuples, stats->num_pages), errdetail("%u index pages have been deleted, %u are currently reusable.\n" "%s.", stats->pages_deleted, stats->pages_free, pg_rusage_show(&ru0)))); pfree(stats);}/* * lazy_vacuum_index() -- vacuum one index relation. * * Delete all the index entries pointing to tuples listed in * vacrelstats->dead_tuples. * * Increment *index_tups_vacuumed by the number of index entries * removed, and *index_pages_removed by the number of pages removed. * * Finally, we arrange to update the index relation's statistics in * pg_class. */static voidlazy_vacuum_index(Relation indrel, double *index_tups_vacuumed, BlockNumber *index_pages_removed, LVRelStats *vacrelstats){ IndexBulkDeleteResult *stats; IndexVacuumCleanupInfo vcinfo; PGRUsage ru0; pg_rusage_init(&ru0); /* * Acquire appropriate type of lock on index: must be exclusive if index * AM isn't concurrent-safe. */ if (indrel->rd_am->amconcurrent) LockRelation(indrel, RowExclusiveLock); else LockRelation(indrel, AccessExclusiveLock); /* Do bulk deletion */ stats = index_bulk_delete(indrel, lazy_tid_reaped, (void *) vacrelstats); /* Do post-VACUUM cleanup */ vcinfo.vacuum_full = false; vcinfo.message_level = elevel; stats = index_vacuum_cleanup(indrel, &vcinfo, stats); /* * Release lock acquired above. */ if (indrel->rd_am->amconcurrent) UnlockRelation(indrel, RowExclusiveLock); else UnlockRelation(indrel, AccessExclusiveLock); if (!stats) return; /* accumulate total removed over multiple index-cleaning cycles */ *index_tups_vacuumed += stats->tuples_removed; *index_pages_removed += stats->pages_removed; /* now update statistics in pg_class */ vac_update_relstats(RelationGetRelid(indrel), stats->num_pages, stats->num_index_tuples, false); ereport(elevel, (errmsg("index \"%s\" now contains %.0f row versions in %u pages", RelationGetRelationName(indrel), stats->num_index_tuples, stats->num_pages), errdetail("%.0f index row versions were removed.\n" "%u index pages have been deleted, %u are currently reusable.\n" "%s.", stats->tuples_removed, stats->pages_deleted, stats->pages_free, pg_rusage_show(&ru0)))); pfree(stats);}/* * lazy_truncate_heap - try to truncate off any empty pages at the end */static voidlazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats){ BlockNumber old_rel_pages = vacrelstats->rel_pages; BlockNumber new_rel_pages; PageFreeSpaceInfo *pageSpaces; int n; int i, j; PGRUsage ru0; pg_rusage_init(&ru0); /* * We need full exclusive lock on the relation in order to do truncation. * If we can't get it, give up rather than waiting --- we don't want to * block other backends, and we don't want to deadlock (which is quite * possible considering we already hold a lower-grade lock). */ if (!ConditionalLockRelation(onerel, AccessExclusiveLock)) return; /* * Now that we have exclusive lock, look to see if the rel has grown * whilst we were vacuuming with non-exclusive lock. If so, give up; the * newly added pages presumably contain non-deletable tuples. */ new_rel_pages = RelationGetNumberOfBlocks(onerel); if (new_rel_pages != old_rel_pages) { /* might as well use the latest news when we update pg_class stats */ vacrelstats->rel_pages = new_rel_pages; UnlockRelation(onerel, AccessExclusiveLock); return; } /* * Scan backwards from the end to verify that the end pages actually * contain nothing we need to keep. This is *necessary*, not optional, * because other backends could have added tuples to these pages whilst we * were vacuuming. */ new_rel_pages = count_nondeletable_pages(onerel, vacrelstats); if (new_rel_pages >= old_rel_pages) { /* can't do anything after all */ UnlockRelation(onerel, AccessExclusiveLock); return; } /* * Okay to truncate. */ RelationTruncate(onerel, new_rel_pages); /* * Drop free-space info for removed blocks; these must not get entered * into the FSM! */ pageSpaces = vacrelstats->free_pages; n = vacrelstats->num_free_pages; j = 0; for (i = 0; i < n; i++) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -