📄 bufmgr.c
字号:
/* * InvalidateBuffer -- mark a shared buffer invalid and return it to the * freelist. * * The buffer header spinlock must be held at entry. We drop it before * returning. (This is sane because the caller must have locked the * buffer in order to be sure it should be dropped.) * * This is used only in contexts such as dropping a relation. We assume * that no other backend could possibly be interested in using the page, * so the only reason the buffer might be pinned is if someone else is * trying to write it out. We have to let them finish before we can * reclaim the buffer. * * The buffer could get reclaimed by someone else while we are waiting * to acquire the necessary locks; if so, don't mess it up. */static voidInvalidateBuffer(volatile BufferDesc *buf){ BufferTag oldTag; BufFlags oldFlags; /* Save the original buffer tag before dropping the spinlock */ oldTag = buf->tag; UnlockBufHdr(buf);retry: /* * Acquire exclusive mapping lock in preparation for changing the buffer's * association. */ LWLockAcquire(BufMappingLock, LW_EXCLUSIVE); /* Re-lock the buffer header (NoHoldoff since we have an LWLock) */ LockBufHdr_NoHoldoff(buf); /* If it's changed while we were waiting for lock, do nothing */ if (!BUFFERTAGS_EQUAL(buf->tag, oldTag)) { UnlockBufHdr_NoHoldoff(buf); LWLockRelease(BufMappingLock); return; } /* * We assume the only reason for it to be pinned is that someone else is * flushing the page out. Wait for them to finish. (This could be an * infinite loop if the refcount is messed up... it would be nice to time * out after awhile, but there seems no way to be sure how many loops may * be needed. Note that if the other guy has pinned the buffer but not * yet done StartBufferIO, WaitIO will fall through and we'll effectively * be busy-looping here.) */ if (buf->refcount != 0) { UnlockBufHdr_NoHoldoff(buf); LWLockRelease(BufMappingLock); /* safety check: should definitely not be our *own* pin */ if (PrivateRefCount[buf->buf_id] != 0) elog(ERROR, "buffer is pinned in InvalidateBuffer"); WaitIO(buf); goto retry; } /* * Clear out the buffer's tag and flags. We must do this to ensure that * linear scans of the buffer array don't think the buffer is valid. */ oldFlags = buf->flags; CLEAR_BUFFERTAG(buf->tag); buf->flags = 0; buf->usage_count = 0; UnlockBufHdr_NoHoldoff(buf); /* * Remove the buffer from the lookup hashtable, if it was in there. */ if (oldFlags & BM_TAG_VALID) BufTableDelete(&oldTag); /* * Avoid accepting a cancel interrupt when we release the mapping lock; * that would leave the buffer free but not on the freelist. (Which would * not be fatal, since it'd get picked up again by the clock scanning * code, but we'd rather be sure it gets to the freelist.) */ HOLD_INTERRUPTS(); LWLockRelease(BufMappingLock); /* * Insert the buffer at the head of the list of free buffers. */ StrategyFreeBuffer(buf, true); RESUME_INTERRUPTS();}/* * write_buffer -- common functionality for * WriteBuffer and WriteNoReleaseBuffer */static voidwrite_buffer(Buffer buffer, bool unpin){ volatile BufferDesc *bufHdr; if (!BufferIsValid(buffer)) elog(ERROR, "bad buffer id: %d", buffer); if (BufferIsLocal(buffer)) { WriteLocalBuffer(buffer, unpin); return; } bufHdr = &BufferDescriptors[buffer - 1]; Assert(PrivateRefCount[buffer - 1] > 0); LockBufHdr(bufHdr); Assert(bufHdr->refcount > 0); /* * If the buffer was not dirty already, do vacuum cost accounting. */ if (!(bufHdr->flags & BM_DIRTY) && VacuumCostActive) VacuumCostBalance += VacuumCostPageDirty; bufHdr->flags |= (BM_DIRTY | BM_JUST_DIRTIED); UnlockBufHdr(bufHdr); if (unpin) UnpinBuffer(bufHdr, true, true);}/* * WriteBuffer * * Marks buffer contents as dirty (actual write happens later). * * Assume that buffer is pinned. Assume that reln is valid. * * Side Effects: * Pin count is decremented. */voidWriteBuffer(Buffer buffer){ write_buffer(buffer, true);}/* * WriteNoReleaseBuffer -- like WriteBuffer, but do not unpin the buffer * when the operation is complete. */voidWriteNoReleaseBuffer(Buffer buffer){ write_buffer(buffer, false);}/* * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer() * * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock * compared to calling the two routines separately. Now it's mainly just * a convenience function. However, if the passed buffer is valid and * already contains the desired block, we just return it as-is; and that * does save considerable work compared to a full release and reacquire. * * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old * buffer actually needs to be released. This case is the same as ReadBuffer, * but can save some tests in the caller. */BufferReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum){ volatile BufferDesc *bufHdr; if (BufferIsValid(buffer)) { if (BufferIsLocal(buffer)) { Assert(LocalRefCount[-buffer - 1] > 0); bufHdr = &LocalBufferDescriptors[-buffer - 1]; if (bufHdr->tag.blockNum == blockNum && RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node)) return buffer; ResourceOwnerForgetBuffer(CurrentResourceOwner, buffer); LocalRefCount[-buffer - 1]--; if (LocalRefCount[-buffer - 1] == 0 && bufHdr->usage_count < BM_MAX_USAGE_COUNT) bufHdr->usage_count++; } else { Assert(PrivateRefCount[buffer - 1] > 0); bufHdr = &BufferDescriptors[buffer - 1]; /* we have pin, so it's ok to examine tag without spinlock */ if (bufHdr->tag.blockNum == blockNum && RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node)) return buffer; UnpinBuffer(bufHdr, true, true); } } return ReadBuffer(relation, blockNum);}/* * PinBuffer -- make buffer unavailable for replacement. * * This should be applied only to shared buffers, never local ones. * * Note that ResourceOwnerEnlargeBuffers must have been done already. * * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows * some callers to avoid an extra spinlock cycle. */static boolPinBuffer(volatile BufferDesc *buf){ int b = buf->buf_id; bool result; if (PrivateRefCount[b] == 0) { /* * Use NoHoldoff here because we don't want the unlock to be a * potential place to honor a QueryCancel request. (The caller should * be holding off interrupts anyway.) */ LockBufHdr_NoHoldoff(buf); buf->refcount++; result = (buf->flags & BM_VALID) != 0; UnlockBufHdr_NoHoldoff(buf); } else { /* If we previously pinned the buffer, it must surely be valid */ result = true; } PrivateRefCount[b]++; Assert(PrivateRefCount[b] > 0); ResourceOwnerRememberBuffer(CurrentResourceOwner, BufferDescriptorGetBuffer(buf)); return result;}/* * PinBuffer_Locked -- as above, but caller already locked the buffer header. * The spinlock is released before return. * * Note: use of this routine is frequently mandatory, not just an optimization * to save a spin lock/unlock cycle, because we need to pin a buffer before * its state can change under us. */static voidPinBuffer_Locked(volatile BufferDesc *buf){ int b = buf->buf_id; if (PrivateRefCount[b] == 0) buf->refcount++; /* NoHoldoff since we mustn't accept cancel interrupt here */ UnlockBufHdr_NoHoldoff(buf); PrivateRefCount[b]++; Assert(PrivateRefCount[b] > 0); ResourceOwnerRememberBuffer(CurrentResourceOwner, BufferDescriptorGetBuffer(buf)); /* Now we can accept cancel */ RESUME_INTERRUPTS();}/* * UnpinBuffer -- make buffer available for replacement. * * This should be applied only to shared buffers, never local ones. * * Most but not all callers want CurrentResourceOwner to be adjusted. * Those that don't should pass fixOwner = FALSE. * * normalAccess indicates that we are finishing a "normal" page access, * that is, one requested by something outside the buffer subsystem. * Passing FALSE means it's an internal access that should not update the * buffer's usage count nor cause a change in the freelist. * * If we are releasing a buffer during VACUUM, and it's not been otherwise * used recently, and normalAccess is true, we send the buffer to the freelist. */static voidUnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool normalAccess){ int b = buf->buf_id; if (fixOwner) ResourceOwnerForgetBuffer(CurrentResourceOwner, BufferDescriptorGetBuffer(buf)); Assert(PrivateRefCount[b] > 0); PrivateRefCount[b]--; if (PrivateRefCount[b] == 0) { bool immed_free_buffer = false; /* I'd better not still hold any locks on the buffer */ Assert(!LWLockHeldByMe(buf->content_lock)); Assert(!LWLockHeldByMe(buf->io_in_progress_lock)); /* NoHoldoff ensures we don't lose control before sending signal */ LockBufHdr_NoHoldoff(buf); /* Decrement the shared reference count */ Assert(buf->refcount > 0); buf->refcount--; /* Update buffer usage info, unless this is an internal access */ if (normalAccess) { if (!strategy_hint_vacuum) { if (buf->usage_count < BM_MAX_USAGE_COUNT) buf->usage_count++; } else { /* VACUUM accesses don't bump usage count, instead... */ if (buf->refcount == 0 && buf->usage_count == 0) immed_free_buffer = true; } } if ((buf->flags & BM_PIN_COUNT_WAITER) && buf->refcount == 1) { /* we just released the last pin other than the waiter's */ int wait_backend_pid = buf->wait_backend_pid; buf->flags &= ~BM_PIN_COUNT_WAITER; UnlockBufHdr_NoHoldoff(buf); ProcSendSignal(wait_backend_pid); } else UnlockBufHdr_NoHoldoff(buf); /* * If VACUUM is releasing an otherwise-unused buffer, send it to the * freelist for near-term reuse. We put it at the tail so that it * won't be used before any invalid buffers that may exist. */ if (immed_free_buffer) StrategyFreeBuffer(buf, false); }}/* * BufferSync -- Write out all dirty buffers in the pool. * * This is called at checkpoint time to write out all dirty shared buffers. */voidBufferSync(void){ int buf_id; int num_to_scan; /* * Find out where to start the circular scan. */ buf_id = StrategySyncStart(); /* Make sure we can handle the pin inside SyncOneBuffer */ ResourceOwnerEnlargeBuffers(CurrentResourceOwner); /* * Loop over all buffers. */ num_to_scan = NBuffers; while (num_to_scan-- > 0) { (void) SyncOneBuffer(buf_id, false); if (++buf_id >= NBuffers) buf_id = 0; }}/* * BgBufferSync -- Write out some dirty buffers in the pool. * * This is called periodically by the background writer process. */voidBgBufferSync(void){ static int buf_id1 = 0; int buf_id2; int num_to_scan; int num_written; /* Make sure we can handle the pin inside SyncOneBuffer */ ResourceOwnerEnlargeBuffers(CurrentResourceOwner); /* * To minimize work at checkpoint time, we want to try to keep all the * buffers clean; this motivates a scan that proceeds sequentially through * all buffers. But we are also charged with ensuring that buffers that * will be recycled soon are clean when needed; these buffers are the ones * just ahead of the StrategySyncStart point. We make a separate scan * through those. */ /* * This loop runs over all buffers, including pinned ones. The starting * point advances through the buffer pool on successive calls. * * Note that we advance the static counter *before* trying to write. This * ensures that, if we have a persistent write failure on a dirty buffer, * we'll still be able to make progress writing other buffers. (The * bgwriter will catch the error and just call us again later.) */ if (bgwriter_all_percent > 0.0 && bgwriter_all_maxpages > 0) { num_to_scan = (int) ((NBuffers * bgwriter_all_percent + 99) / 100); num_written = 0; while (num_to_scan-- > 0) { if (++buf_id1 >= NBuffers) buf_id1 = 0; if (SyncOneBuffer(buf_id1, false)) { if (++num_written >= bgwriter_all_maxpages) break; } } } /* * This loop considers only unpinned buffers close to the clock sweep * point. */ if (bgwriter_lru_percent > 0.0 && bgwriter_lru_maxpages > 0) { num_to_scan = (int) ((NBuffers * bgwriter_lru_percent + 99) / 100); num_written = 0; buf_id2 = StrategySyncStart(); while (num_to_scan-- > 0) { if (SyncOneBuffer(buf_id2, true)) { if (++num_written >= bgwriter_lru_maxpages) break; } if (++buf_id2 >= NBuffers) buf_id2 = 0; } }}/* * SyncOneBuffer -- process a single buffer during syncing. * * If skip_pinned is true, we don't write currently-pinned buffers, nor * buffers marked recently used, as these are not replacement candidates. * * Returns true if buffer was written, else false. (This could be in error * if FlushBuffers finds the buffer clean after locking it, but we don't * care all that much.) * * Note: caller must have done ResourceOwnerEnlargeBuffers. */static boolSyncOneBuffer(int buf_id, bool skip_pinned){ volatile BufferDesc *bufHdr = &BufferDescriptors[buf_id]; /* * Check whether buffer needs writing. * * We can make this check without taking the buffer content lock so long * as we mark pages dirty in access methods *before* logging changes with * XLogInsert(): if someone marks the buffer dirty just after our check we * don't worry because our checkpoint.redo points before log record for * upcoming changes and so we are not required to write such dirty buffer. */ LockBufHdr(bufHdr); if (!(bufHdr->flags & BM_VALID) || !(bufHdr->flags & BM_DIRTY)) { UnlockBufHdr(bufHdr); return false; } if (skip_pinned && (bufHdr->refcount != 0 || bufHdr->usage_count != 0)) { UnlockBufHdr(bufHdr); return false; } /* * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the * buffer is clean by the time we've locked it.) */ PinBuffer_Locked(bufHdr); LWLockAcquire(bufHdr->content_lock, LW_SHARED); FlushBuffer(bufHdr, NULL); LWLockRelease(bufHdr->content_lock); UnpinBuffer(bufHdr, true, false /* don't change freelist */ ); return true;}/* * Return a palloc'd string containing buffer usage statistics. */char *
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -