vacuum.c
来自「PostgreSQL7.4.6 for Linux」· C语言 代码 · 共 2,201 行 · 第 1/5 页
C
2,201 行
break; /* out of check-all-items loop */ } to_item = i; to_vacpage = fraged_pages->pagedesc[to_item]; } to_vacpage->free -= MAXALIGN(tlen); if (to_vacpage->offsets_used >= to_vacpage->offsets_free) to_vacpage->free -= sizeof(ItemIdData); (to_vacpage->offsets_used)++; if (free_vtmove == 0) { free_vtmove = 1000; vtmove = (VTupleMove) repalloc(vtmove, (free_vtmove + num_vtmove) * sizeof(VTupleMoveData)); } vtmove[num_vtmove].tid = tp.t_self; vtmove[num_vtmove].vacpage = to_vacpage; if (to_vacpage->offsets_used == 1) vtmove[num_vtmove].cleanVpd = true; else vtmove[num_vtmove].cleanVpd = false; free_vtmove--; num_vtmove++; /* At beginning of chain? */ if (!(tp.t_data->t_infomask & HEAP_UPDATED) || TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data), OldestXmin)) break; /* No, move to tuple with prior row version */ vtld.new_tid = tp.t_self; vtlp = (VTupleLink) vac_bsearch((void *) &vtld, (void *) (vacrelstats->vtlinks), vacrelstats->num_vtlinks, sizeof(VTupleLinkData), vac_cmp_vtlinks); if (vtlp == NULL) { /* see discussion above */ elog(DEBUG2, "parent item in update-chain not found --- can't continue repair_frag"); chain_move_failed = true; break; /* out of check-all-items loop */ } tp.t_self = vtlp->this_tid; Pbuf = ReadBuffer(onerel, ItemPointerGetBlockNumber(&(tp.t_self))); Ppage = BufferGetPage(Pbuf); Pitemid = PageGetItemId(Ppage, ItemPointerGetOffsetNumber(&(tp.t_self))); /* this can't happen since we saw tuple earlier: */ if (!ItemIdIsUsed(Pitemid)) elog(ERROR, "parent itemid marked as unused"); Ptp.t_datamcxt = NULL; Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid); /* ctid should not have changed since we saved it */ Assert(ItemPointerEquals(&(vtld.new_tid), &(Ptp.t_data->t_ctid))); /* * Read above about cases when !ItemIdIsUsed(Citemid) * (child item is removed)... Due to the fact that at * the moment we don't remove unuseful part of * update-chain, it's possible to get too old parent * row here. Like as in the case which caused this * problem, we stop shrinking here. I could try to * find real parent row but want not to do it because * of real solution will be implemented anyway, later, * and we are too close to 6.5 release. - vadim * 06/11/99 */ if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(Ptp.t_data), HeapTupleHeaderGetXmin(tp.t_data)))) { ReleaseBuffer(Pbuf); elog(DEBUG2, "too old parent tuple found --- can't continue repair_frag"); chain_move_failed = true; break; /* out of check-all-items loop */ } tp.t_datamcxt = Ptp.t_datamcxt; tp.t_data = Ptp.t_data; tlen = tp.t_len = ItemIdGetLength(Pitemid); if (freeCbuf) ReleaseBuffer(Cbuf); Cbuf = Pbuf; freeCbuf = true; } /* end of check-all-items loop */ if (freeCbuf) ReleaseBuffer(Cbuf); freeCbuf = false; if (chain_move_failed) { /* * Undo changes to offsets_used state. We don't * bother cleaning up the amount-free state, since * we're not going to do any further tuple motion. */ for (i = 0; i < num_vtmove; i++) { Assert(vtmove[i].vacpage->offsets_used > 0); (vtmove[i].vacpage->offsets_used)--; } pfree(vtmove); break; /* out of walk-along-page loop */ } /* * Okay, move the whle tuple chain */ ItemPointerSetInvalid(&Ctid); for (ti = 0; ti < num_vtmove; ti++) { VacPage destvacpage = vtmove[ti].vacpage; /* Get page to move from */ tuple.t_self = vtmove[ti].tid; Cbuf = ReadBuffer(onerel, ItemPointerGetBlockNumber(&(tuple.t_self))); /* Get page to move to */ cur_buffer = ReadBuffer(onerel, destvacpage->blkno); LockBuffer(cur_buffer, BUFFER_LOCK_EXCLUSIVE); if (cur_buffer != Cbuf) LockBuffer(Cbuf, BUFFER_LOCK_EXCLUSIVE); ToPage = BufferGetPage(cur_buffer); Cpage = BufferGetPage(Cbuf); Citemid = PageGetItemId(Cpage, ItemPointerGetOffsetNumber(&(tuple.t_self))); tuple.t_datamcxt = NULL; tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid); tuple_len = tuple.t_len = ItemIdGetLength(Citemid); /* * make a copy of the source tuple, and then mark the * source tuple MOVED_OFF. */ heap_copytuple_with_tuple(&tuple, &newtup); /* * register invalidation of source tuple in catcaches. */ CacheInvalidateHeapTuple(onerel, &tuple); /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */ START_CRIT_SECTION(); tuple.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN); tuple.t_data->t_infomask |= HEAP_MOVED_OFF; HeapTupleHeaderSetXvac(tuple.t_data, myXID); /* * If this page was not used before - clean it. * * NOTE: a nasty bug used to lurk here. It is possible * for the source and destination pages to be the same * (since this tuple-chain member can be on a page * lower than the one we're currently processing in * the outer loop). If that's true, then after * vacuum_page() the source tuple will have been * moved, and tuple.t_data will be pointing at * garbage. Therefore we must do everything that uses * tuple.t_data BEFORE this step!! * * This path is different from the other callers of * vacuum_page, because we have already incremented * the vacpage's offsets_used field to account for the * tuple(s) we expect to move onto the page. Therefore * vacuum_page's check for offsets_used == 0 is wrong. * But since that's a good debugging check for all * other callers, we work around it here rather than * remove it. */ if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd) { int sv_offsets_used = destvacpage->offsets_used; destvacpage->offsets_used = 0; vacuum_page(onerel, cur_buffer, destvacpage); destvacpage->offsets_used = sv_offsets_used; } /* * Update the state of the copied tuple, and store it * on the destination page. */ newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF); newtup.t_data->t_infomask |= HEAP_MOVED_IN; HeapTupleHeaderSetXvac(newtup.t_data, myXID); newoff = PageAddItem(ToPage, (Item) newtup.t_data, tuple_len, InvalidOffsetNumber, LP_USED); if (newoff == InvalidOffsetNumber) { elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain", (unsigned long) tuple_len, destvacpage->blkno); } newitemid = PageGetItemId(ToPage, newoff); pfree(newtup.t_data); newtup.t_datamcxt = NULL; newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid); ItemPointerSet(&(newtup.t_self), destvacpage->blkno, newoff); /* XLOG stuff */ if (!onerel->rd_istemp) { XLogRecPtr recptr = log_heap_move(onerel, Cbuf, tuple.t_self, cur_buffer, &newtup); if (Cbuf != cur_buffer) { PageSetLSN(Cpage, recptr); PageSetSUI(Cpage, ThisStartUpID); } PageSetLSN(ToPage, recptr); PageSetSUI(ToPage, ThisStartUpID); } else { /* * No XLOG record, but still need to flag that XID * exists on disk */ MyXactMadeTempRelUpdate = true; } END_CRIT_SECTION(); if (destvacpage->blkno > last_move_dest_block) last_move_dest_block = destvacpage->blkno; /* * Set new tuple's t_ctid pointing to itself for last * tuple in chain, and to next tuple in chain * otherwise. */ if (!ItemPointerIsValid(&Ctid)) newtup.t_data->t_ctid = newtup.t_self; else newtup.t_data->t_ctid = Ctid; Ctid = newtup.t_self; num_moved++; /* * Remember that we moved tuple from the current page * (corresponding index tuple will be cleaned). */ if (Cbuf == buf) vacpage->offsets[vacpage->offsets_free++] = ItemPointerGetOffsetNumber(&(tuple.t_self)); else keep_tuples++; LockBuffer(cur_buffer, BUFFER_LOCK_UNLOCK); if (cur_buffer != Cbuf) LockBuffer(Cbuf, BUFFER_LOCK_UNLOCK); /* Create index entries for the moved tuple */ if (resultRelInfo->ri_NumIndices > 0) { ExecStoreTuple(&newtup, slot, InvalidBuffer, false); ExecInsertIndexTuples(slot, &(newtup.t_self), estate, true); } WriteBuffer(cur_buffer); WriteBuffer(Cbuf); } /* end of move-the-tuple-chain loop */ cur_buffer = InvalidBuffer; pfree(vtmove); chain_tuple_moved = true; /* advance to next tuple in walk-along-page loop */ continue; } /* end of is-tuple-in-chain test */ /* try to find new page for this tuple */ if (cur_buffer == InvalidBuffer || !enough_space(cur_page, tuple_len)) { if (cur_buffer != InvalidBuffer) { WriteBuffer(cur_buffer); cur_buffer = InvalidBuffer; } for (i = 0; i < num_fraged_pages; i++) { if (enough_space(fraged_pages->pagedesc[i], tuple_len)) break; } if (i == num_fraged_pages) break; /* can't move item anywhere */ cur_item = i; cur_page = fraged_pages->pagedesc[cur_item]; cur_buffer = ReadBuffer(onerel, cur_page->blkno); LockBuffer(cur_buffer, BUFFER_LOCK_EXCLUSIVE); ToPage = BufferGetPage(cur_buffer); /* if this page was not used before - clean it */ if (!PageIsEmpty(ToPage) && cur_page->offsets_used == 0) vacuum_page(onerel, cur_buffer, cur_page); } else LockBuffer(cur_buffer, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); /* copy tuple */ heap_copytuple_with_tuple(&tuple, &newtup); /* * register invalidation of source tuple in catcaches. * * (Note: we do not need to register the copied tuple, because we * are not changing the tuple contents and so there cannot be * any need to flush negative catcache entries.) */ CacheInvalidateHeapTuple(onerel, &tuple); /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */ START_CRIT_SECTION(); /* * Mark new tuple as MOVED_IN by me. */ newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF); newtup.t_data->t_infomask |= HEAP_MOVED_IN; HeapTupleHeaderSetXvac(newtup.t_data, myXID); /* add tuple to the page */ newoff = PageAddItem(ToPage, (Item) newtup.t_data, tuple_len, InvalidOffsetNumber, LP_USED); if (newoff == InvalidOffsetNumber) { elog(PANIC, "failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)", (unsigned long) tuple_len, cur_page->blkno, (unsigned long) cur_page->free, cur_page->offsets_used, cur_page->offsets_free); } newitemid = PageGetItemId(ToPage, newoff); pfree(newtup.t_data); newtup.t_datamcxt = NULL; newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid); ItemPointerSet(&(newtup.t_data->t_ctid), cur_page->blkno, newoff); newtup.t_self = newtup.t_data->t_ctid; /* * Mark old tuple as MOVED_OFF by me. */ tuple.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN); tuple.t_data->t_infomask |= HEAP_MOVED_OFF; HeapTupleHeaderSetXvac(tuple.t_data, myXID); /* XLOG stuff */ if (!onerel->rd_istemp) { XLogRecPtr recptr = log_heap_move(onerel, buf, tuple.t_self, cur_buffer, &newtup); PageSetLSN(page, recptr); PageSetSUI(page, ThisStartUpID); PageSetLSN(ToPage, recptr); PageSetSUI(ToPage, ThisStartUpID); } else { /* * No XLOG record, but still need to flag that XID exists * on disk */ MyXactMadeTempRelUpdate = true; } END_CRIT_SECTION(); cur_page->offsets_used++; num_moved++; cur_page->free = ((PageHeader) ToPage)->pd_upper - ((PageHeader) ToPage)->pd_lower; if (cur_page->blkno > last_move_dest_block) last_move_dest_block = cur_page->blkno; vacpage->offsets[vacpage->offsets_free++] = offnum; LockBuffer(cur_buffer, BUFFER_LOCK_UNLOCK); LockBuffer(buf, BUFFER_LOCK_UNLOCK); /* insert index' tuples if needed */ if (resultRelInfo->ri_NumIndices > 0) { ExecStoreTuple(&newtup, slot, InvalidBuffer, false); ExecInsertIndexTuples(slot, &(newtup.t_self), estate, true); } } /* walk along page */ /* * If we broke out of the walk-along-page loop early (ie, still * have offnum <= maxoff), then we failed to move some tuple off * this page. No point in shrinking any more, so clean up and * exit the per-page loop. */ if (offnum < maxoff && keep_tuples > 0) { OffsetNumber off; /* * Fix vacpage state for any unvisited tuples remaining on * page */ for (off = OffsetNumberNext(offnum); off <= maxoff; off = OffsetNumberNext(off)) { itemid = PageGetItemId(page, off); if (!ItemIdIsUsed(itemid)) continue; tuple.t_datamcxt
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?