📄 vacuum.c
字号:
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN); Ptp.t_data->t_infomask |= HEAP_MOVED_OFF; WriteBuffer(Pbuf); continue; }#endif tp.t_data = Ptp.t_data; tlen = tp.t_len = ItemIdGetLength(Pitemid); if (freeCbuf) ReleaseBuffer(Cbuf); Cbuf = Pbuf; freeCbuf = true; break; } if (num_vtmove == 0) break; } if (freeCbuf) ReleaseBuffer(Cbuf); if (num_vtmove == 0) /* chain can't be moved */ { pfree(vtmove); break; } ItemPointerSetInvalid(&Ctid); for (ti = 0; ti < num_vtmove; ti++) { /* Get tuple from chain */ tuple.t_self = vtmove[ti].tid; Cbuf = ReadBuffer(onerel, ItemPointerGetBlockNumber(&(tuple.t_self))); Cpage = BufferGetPage(Cbuf); Citemid = PageGetItemId(Cpage, ItemPointerGetOffsetNumber(&(tuple.t_self))); tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid); tuple_len = tuple.t_len = ItemIdGetLength(Citemid); /* Get page to move in */ cur_buffer = ReadBuffer(onerel, vtmove[ti].vpd->vpd_blkno); /* * We should LockBuffer(cur_buffer) but don't, at the * moment. If you'll do LockBuffer then UNLOCK it * before index_insert: unique btree-s call heap_fetch * to get t_infomask of inserted heap tuple !!! */ ToPage = BufferGetPage(cur_buffer); /* if this page was not used before - clean it */ if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd) vc_vacpage(ToPage, vtmove[ti].vpd); heap_copytuple_with_tuple(&tuple, &newtup); RelationInvalidateHeapTuple(onerel, &tuple); TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin)); newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF); newtup.t_data->t_infomask |= HEAP_MOVED_IN; newoff = PageAddItem(ToPage, (Item) newtup.t_data, tuple_len, InvalidOffsetNumber, LP_USED); if (newoff == InvalidOffsetNumber) { elog(ERROR, "\moving chain: failed to add item with len = %u to page %u", tuple_len, vtmove[ti].vpd->vpd_blkno); } newitemid = PageGetItemId(ToPage, newoff); pfree(newtup.t_data); newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid); ItemPointerSet(&(newtup.t_self), vtmove[ti].vpd->vpd_blkno, newoff); /* * Set t_ctid pointing to itself for last tuple in * chain and to next tuple in chain otherwise. */ if (!ItemPointerIsValid(&Ctid)) newtup.t_data->t_ctid = newtup.t_self; else newtup.t_data->t_ctid = Ctid; Ctid = newtup.t_self; TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin)); tuple.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN); tuple.t_data->t_infomask |= HEAP_MOVED_OFF; num_moved++; /* * Remember that we moved tuple from the current page * (corresponding index tuple will be cleaned). */ if (Cbuf == buf) vpc->vpd_offsets[vpc->vpd_offsets_free++] = ItemPointerGetOffsetNumber(&(tuple.t_self)); else keep_tuples++; if (Irel != (Relation *) NULL) { for (i = 0, idcur = Idesc; i < nindices; i++, idcur++) { FormIndexDatum(idcur->natts, (AttrNumber *) &(idcur->tform->indkey[0]), &newtup, tupdesc, idatum, inulls, idcur->finfoP); iresult = index_insert(Irel[i], idatum, inulls, &newtup.t_self, onerel); if (iresult) pfree(iresult); } } WriteBuffer(cur_buffer); if (Cbuf == buf) ReleaseBuffer(Cbuf); else WriteBuffer(Cbuf); } cur_buffer = InvalidBuffer; pfree(vtmove); chain_tuple_moved = true; continue; } /* try to find new page for this tuple */ if (cur_buffer == InvalidBuffer || !vc_enough_space(cur_page, tuple_len)) { if (cur_buffer != InvalidBuffer) { WriteBuffer(cur_buffer); cur_buffer = InvalidBuffer; /* * If no one tuple can't be added to this page - * remove page from fraged_pages. - vadim 11/27/96 * * But we can't remove last page - this is our * "show-stopper" !!! - vadim 02/25/98 */ if (cur_page != last_fraged_page && !vc_enough_space(cur_page, vacrelstats->min_tlen)) { Assert(num_fraged_pages > cur_item + 1); memmove(fraged_pages->vpl_pagedesc + cur_item, fraged_pages->vpl_pagedesc + cur_item + 1, sizeof(VPageDescr *) * (num_fraged_pages - cur_item - 1)); num_fraged_pages--; Assert(last_fraged_page == fraged_pages->vpl_pagedesc[num_fraged_pages - 1]); } } for (i = 0; i < num_fraged_pages; i++) { if (vc_enough_space(fraged_pages->vpl_pagedesc[i], tuple_len)) break; } if (i == num_fraged_pages) break; /* can't move item anywhere */ cur_item = i; cur_page = fraged_pages->vpl_pagedesc[cur_item]; cur_buffer = ReadBuffer(onerel, cur_page->vpd_blkno); ToPage = BufferGetPage(cur_buffer); /* if this page was not used before - clean it */ if (!PageIsEmpty(ToPage) && cur_page->vpd_offsets_used == 0) vc_vacpage(ToPage, cur_page); } /* copy tuple */ heap_copytuple_with_tuple(&tuple, &newtup); RelationInvalidateHeapTuple(onerel, &tuple); /* * Mark new tuple as moved_in by vacuum and store vacuum XID * in t_cmin !!! */ TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin)); newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF); newtup.t_data->t_infomask |= HEAP_MOVED_IN; /* add tuple to the page */ newoff = PageAddItem(ToPage, (Item) newtup.t_data, tuple_len, InvalidOffsetNumber, LP_USED); if (newoff == InvalidOffsetNumber) { elog(ERROR, "\failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)", tuple_len, cur_page->vpd_blkno, cur_page->vpd_free, cur_page->vpd_offsets_used, cur_page->vpd_offsets_free); } newitemid = PageGetItemId(ToPage, newoff); pfree(newtup.t_data); newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid); ItemPointerSet(&(newtup.t_data->t_ctid), cur_page->vpd_blkno, newoff); newtup.t_self = newtup.t_data->t_ctid; /* * Mark old tuple as moved_off by vacuum and store vacuum XID * in t_cmin !!! */ TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin)); tuple.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN); tuple.t_data->t_infomask |= HEAP_MOVED_OFF; cur_page->vpd_offsets_used++; num_moved++; cur_page->vpd_free = ((PageHeader) ToPage)->pd_upper - ((PageHeader) ToPage)->pd_lower; vpc->vpd_offsets[vpc->vpd_offsets_free++] = offnum; /* insert index' tuples if needed */ if (Irel != (Relation *) NULL) { for (i = 0, idcur = Idesc; i < nindices; i++, idcur++) { FormIndexDatum(idcur->natts, (AttrNumber *) &(idcur->tform->indkey[0]), &newtup, tupdesc, idatum, inulls, idcur->finfoP); iresult = index_insert(Irel[i], idatum, inulls, &newtup.t_self, onerel); if (iresult) pfree(iresult); } } } /* walk along page */ if (offnum < maxoff && keep_tuples > 0) { OffsetNumber off; for (off = OffsetNumberNext(offnum); off <= maxoff; off = OffsetNumberNext(off)) { itemid = PageGetItemId(page, off); if (!ItemIdIsUsed(itemid)) continue; tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid); if (tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED) continue; if ((TransactionId) tuple.t_data->t_cmin != myXID) elog(ERROR, "Invalid XID in t_cmin (4)"); if (tuple.t_data->t_infomask & HEAP_MOVED_IN) elog(ERROR, "HEAP_MOVED_IN was not expected (2)"); if (tuple.t_data->t_infomask & HEAP_MOVED_OFF) { if (chain_tuple_moved) /* some chains was moved * while */ { /* cleaning this page */ Assert(vpc->vpd_offsets_free > 0); for (i = 0; i < vpc->vpd_offsets_free; i++) { if (vpc->vpd_offsets[i] == off) break; } if (i >= vpc->vpd_offsets_free) /* not found */ { vpc->vpd_offsets[vpc->vpd_offsets_free++] = off; Assert(keep_tuples > 0); keep_tuples--; } } else { vpc->vpd_offsets[vpc->vpd_offsets_free++] = off; Assert(keep_tuples > 0); keep_tuples--; } } } } if (vpc->vpd_offsets_free > 0) /* some tuples were moved */ { if (chain_tuple_moved) /* else - they are ordered */ { qsort((char *) (vpc->vpd_offsets), vpc->vpd_offsets_free, sizeof(OffsetNumber), vc_cmp_offno); } vc_reappage(&Nvpl, vpc); WriteBuffer(buf); } else if (dowrite) WriteBuffer(buf); else ReleaseBuffer(buf); if (offnum <= maxoff) break; /* some item(s) left */ } /* walk along relation */ blkno++; /* new number of blocks */ if (cur_buffer != InvalidBuffer) { Assert(num_moved > 0); WriteBuffer(cur_buffer); } if (num_moved > 0) { /* * We have to commit our tuple' movings before we'll truncate * relation, but we shouldn't lose our locks. And so - quick hack: * flush buffers and record status of current transaction as * committed, and continue. - vadim 11/13/96 */ FlushBufferPool(!TransactionFlushEnabled()); TransactionIdCommit(myXID); FlushBufferPool(!TransactionFlushEnabled()); } /* * Clean uncleaned reapped pages from vacuum_pages list list and set * xmin committed for inserted tuples */ checked_moved = 0; for (i = 0, vpp = vacuum_pages->vpl_pagedesc; i < vacuumed_pages; i++, vpp++) { Assert((*vpp)->vpd_blkno < blkno); buf = ReadBuffer(onerel, (*vpp)->vpd_blkno); page = BufferGetPage(buf); if ((*vpp)->vpd_offsets_used == 0) /* this page was not used */ { if (!PageIsEmpty(page)) vc_vacpage(page, *vpp); } else/* this page was used */ { num_tuples = 0; max_offset = PageGetMaxOffsetNumber(page); for (newoff = FirstOffsetNumber; newoff <= max_offset; newoff = OffsetNumberNext(newoff)) { itemid = PageGetItemId(page, newoff); if (!ItemIdIsUsed(itemid)) continue; tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid); if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)) { if ((TransactionId) tuple.t_data->t_cmin != myXID) elog(ERROR, "Invalid XID in t_cmin (2)"); if (tuple.t_data->t_infomask & HEAP_MOVED_IN) { tuple.t_data->t_infomask |= HEAP_XMIN_COMMITTED; num_tuples++; } else if (tuple.t_data->t_infomask & HEAP_MOVED_OFF) tuple.t_data->t_infomask |= HEAP_XMIN_INVALID; else elog(ERROR, "HEAP_MOVED_OFF/HEAP_MOVED_IN was expected"); } } Assert((*vpp)->vpd_offsets_used == num_tuples); checked_moved += num_tuples; } WriteBuffer(buf); } Assert(num_moved == checked_moved); getrusage(RUSAGE_SELF, &ru1); elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u. \Elapsed %u/%u sec.", (RelationGetRelationName(onerel))->data, nblocks, blkno, num_moved, ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec, ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec); if (Nvpl.vpl_num_pages > 0) { /* vacuum indices again if needed */ if (Irel != (Relation *) NULL) { VPageDescr *vpleft, *vpright, vpsave; /* re-sort Nvpl.vpl_pagedesc */ for (vpleft = Nvpl.vpl_pagedesc, vpright = Nvpl.vpl_pagedesc + Nvpl.vpl_num_pages - 1; vpleft < vpright; vpleft++, vpright--) { vpsave = *vpleft; *vpleft = *vpright; *vpright = vpsave; } Assert(keep_tuples >= 0); for (i = 0; i < nindices; i++) vc_vaconeind(&Nvpl, Irel[i], vacrelstats->num_tuples, keep_tuples); } /* * clean moved tuples from last page in Nvpl list */ if (vpc->vpd_blkno == blkno - 1 && vpc->vpd_offsets_free > 0) { buf = ReadBuffer(onerel, vpc->vpd_blkno); page = BufferGetPage(buf); num_tuples = 0; for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { itemid = PageGetItemId(page, offnum); if (!ItemIdIsUsed(itemid)) continue; tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid); if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)) { if ((TransactionId) tuple.t_data->t_cmin != myXID) elog(ERROR, "Invalid XID in t_cmin (3)"); if (tuple.t_data->t_infomask & HEAP_MOVED_OFF) { itemid->lp_flags &= ~LP_USED; num_tuples++; } else elog(ERROR, "HEAP_MOVED_OFF was expected (2)"); } } Assert(vpc->vpd_offsets_free == num_tuples); PageRepairFragmentation(page); WriteBuffer(buf); } /* now - free new list of reapped pages */ vpp = Nvpl.vpl_pagedesc; for (i = 0; i < Nvpl.vpl_num_pages; i++, vpp++) pfree(*vpp); pfree(Nvpl.vpl_pagedesc); } /* truncate relation */ if (blkno < nblocks) { i = BlowawayRelationBuffers(onerel, blkno); if (i < 0) elog(FATAL, "VACUUM (vc_rpfheap): BlowawayRelationBuffers returned %d", i); blkno = smgrtruncate(DEFAULT_SMGR, onerel, blkno); Assert(blkno >= 0); vacrelstats->num_pages = blkno; /* set new number of blocks */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -