📄 freespace.c
字号:
} /* now is there enough space? */ if (fsmrel->nextPhysical != NULL) limitChunkIndex = fsmrel->nextPhysical->firstChunk; else limitChunkIndex = FreeSpaceMap->totalChunks; if (newChunkIndex + curChunks > limitChunkIndex) { /* uh-oh, forcibly cut the allocation to fit */ newAlloc = limitChunkIndex - newChunkIndex; /* * If newAlloc < 0 at this point, we are moving the rel's * firstChunk into territory currently assigned to a later * rel. This is okay so long as we do not copy any data. * The rels will be back in nondecreasing firstChunk order * at completion of the compaction pass. */ if (newAlloc < 0) newAlloc = 0; if (fsmrel->isIndex) newAllocPages = newAlloc * INDEXCHUNKPAGES; else newAllocPages = newAlloc * CHUNKPAGES; fsmrel->storedPages = newAllocPages; curChunks = fsm_current_chunks(fsmrel); } } memmove(newLocation, oldLocation, curChunks * CHUNKBYTES); } else if (newAllocPages < fsmrel->storedPages) { /* * Need to compress the page data. For an index, "compression" * just means dropping excess pages; otherwise we try to keep the * ones with the most space. */ if (fsmrel->isIndex) { fsmrel->storedPages = newAllocPages; /* may need to move data */ if (newChunkIndex != oldChunkIndex) memmove(newLocation, oldLocation, newAlloc * CHUNKBYTES); } else { pack_existing_pages((FSMPageData *) newLocation, newAllocPages, (FSMPageData *) oldLocation, fsmrel->storedPages); fsmrel->storedPages = newAllocPages; } } else if (newChunkIndex != oldChunkIndex) { /* * No compression needed, but must copy the data up */ memmove(newLocation, oldLocation, curChunks * CHUNKBYTES); } fsmrel->firstChunk = newChunkIndex; nextChunkIndex += newAlloc; } Assert(nextChunkIndex <= FreeSpaceMap->totalChunks); FreeSpaceMap->usedChunks = nextChunkIndex;}/* * Push all FSMRels physically after afterRel to the end of the storage arena. * * We sometimes have to do this when deletion or truncation of a relation * causes the allocations of remaining rels to expand markedly. We must * temporarily push existing data down to the end so that we can move it * back up in an orderly fashion. */static voidpush_fsm_rels_after(FSMRelation *afterRel){ int nextChunkIndex = FreeSpaceMap->totalChunks; FSMRelation *fsmrel; FreeSpaceMap->usedChunks = FreeSpaceMap->totalChunks; for (fsmrel = FreeSpaceMap->lastRel; fsmrel != NULL; fsmrel = fsmrel->priorPhysical) { int chunkCount; int newChunkIndex; int oldChunkIndex; char *newLocation; char *oldLocation; if (fsmrel == afterRel) break; chunkCount = fsm_current_chunks(fsmrel); nextChunkIndex -= chunkCount; newChunkIndex = nextChunkIndex; oldChunkIndex = fsmrel->firstChunk; if (newChunkIndex < oldChunkIndex) { /* we're pushing down, how can it move up? */ elog(PANIC, "inconsistent entry sizes in FSM"); } else if (newChunkIndex > oldChunkIndex) { /* need to move it */ newLocation = FreeSpaceMap->arena + newChunkIndex * CHUNKBYTES; oldLocation = FreeSpaceMap->arena + oldChunkIndex * CHUNKBYTES; memmove(newLocation, oldLocation, chunkCount * CHUNKBYTES); fsmrel->firstChunk = newChunkIndex; } } Assert(nextChunkIndex >= 0);}/* * Pack a set of per-page freespace data into a smaller amount of space. * * The method is to compute a low-resolution histogram of the free space * amounts, then determine which histogram bin contains the break point. * We then keep all pages above that bin, none below it, and just enough * of the pages in that bin to fill the output area exactly. */#define HISTOGRAM_BINS 64static voidpack_incoming_pages(FSMPageData *newLocation, int newPages, PageFreeSpaceInfo *pageSpaces, int nPages){ int histogram[HISTOGRAM_BINS]; int above, binct, i; Size thresholdL, thresholdU; Assert(newPages < nPages); /* else I shouldn't have been called */ /* Build histogram */ MemSet(histogram, 0, sizeof(histogram)); for (i = 0; i < nPages; i++) { Size avail = pageSpaces[i].avail; if (avail >= BLCKSZ) elog(ERROR, "bogus freespace amount"); avail /= (BLCKSZ / HISTOGRAM_BINS); histogram[avail]++; } /* Find the breakpoint bin */ above = 0; for (i = HISTOGRAM_BINS - 1; i >= 0; i--) { int sum = above + histogram[i]; if (sum > newPages) break; above = sum; } Assert(i >= 0); thresholdL = i * BLCKSZ / HISTOGRAM_BINS; /* low bound of bp bin */ thresholdU = (i + 1) * BLCKSZ / HISTOGRAM_BINS; /* hi bound */ binct = newPages - above; /* number to take from bp bin */ /* And copy the appropriate data */ for (i = 0; i < nPages; i++) { BlockNumber page = pageSpaces[i].blkno; Size avail = pageSpaces[i].avail; /* Check caller provides sorted data */ if (i > 0 && page <= pageSpaces[i - 1].blkno) elog(ERROR, "free-space data is not in page order"); /* Save this page? */ if (avail >= thresholdU || (avail >= thresholdL && (--binct >= 0))) { FSMPageSetPageNum(newLocation, page); FSMPageSetSpace(newLocation, avail); newLocation++; newPages--; } } Assert(newPages == 0);}/* * Pack a set of per-page freespace data into a smaller amount of space. * * This is algorithmically identical to pack_incoming_pages(), but accepts * a different input representation. Also, we assume the input data has * previously been checked for validity (size in bounds, pages in order). * * Note: it is possible for the source and destination arrays to overlap. * The caller is responsible for making sure newLocation is at lower addresses * so that we can copy data moving forward in the arrays without problem. */static voidpack_existing_pages(FSMPageData *newLocation, int newPages, FSMPageData *oldLocation, int oldPages){ int histogram[HISTOGRAM_BINS]; int above, binct, i; Size thresholdL, thresholdU; Assert(newPages < oldPages); /* else I shouldn't have been called */ /* Build histogram */ MemSet(histogram, 0, sizeof(histogram)); for (i = 0; i < oldPages; i++) { Size avail = FSMPageGetSpace(oldLocation + i); /* Shouldn't happen, but test to protect against stack clobber */ if (avail >= BLCKSZ) elog(ERROR, "bogus freespace amount"); avail /= (BLCKSZ / HISTOGRAM_BINS); histogram[avail]++; } /* Find the breakpoint bin */ above = 0; for (i = HISTOGRAM_BINS - 1; i >= 0; i--) { int sum = above + histogram[i]; if (sum > newPages) break; above = sum; } Assert(i >= 0); thresholdL = i * BLCKSZ / HISTOGRAM_BINS; /* low bound of bp bin */ thresholdU = (i + 1) * BLCKSZ / HISTOGRAM_BINS; /* hi bound */ binct = newPages - above; /* number to take from bp bin */ /* And copy the appropriate data */ for (i = 0; i < oldPages; i++) { BlockNumber page = FSMPageGetPageNum(oldLocation + i); Size avail = FSMPageGetSpace(oldLocation + i); /* Save this page? */ if (avail >= thresholdU || (avail >= thresholdL && (--binct >= 0))) { FSMPageSetPageNum(newLocation, page); FSMPageSetSpace(newLocation, avail); newLocation++; newPages--; } } Assert(newPages == 0);}/* * Calculate number of chunks "requested" by a rel. The "request" is * anything beyond the rel's one guaranteed chunk. * * Rel's interestingPages and isIndex settings must be up-to-date when called. * * See notes at top of file for details. */static intfsm_calc_request(FSMRelation *fsmrel){ int req; /* Convert page count to chunk count */ if (fsmrel->isIndex) { /* test to avoid unsigned underflow at zero */ if (fsmrel->interestingPages <= INDEXCHUNKPAGES) return 0; /* quotient will fit in int, even if interestingPages doesn't */ req = (fsmrel->interestingPages - 1) / INDEXCHUNKPAGES; } else { if (fsmrel->interestingPages <= CHUNKPAGES) return 0; req = (fsmrel->interestingPages - 1) / CHUNKPAGES; } /* * We clamp the per-relation requests to at most half the arena size; this * is intended to prevent a single bloated relation from crowding out FSM * service for every other rel. */ req = Min(req, FreeSpaceMap->totalChunks / 2); return req;}/* * Same as above, but without the clamp ... this is just intended for * reporting the total space needed to store all information. */static intfsm_calc_request_unclamped(FSMRelation *fsmrel){ int req; /* Convert page count to chunk count */ if (fsmrel->isIndex) { /* test to avoid unsigned underflow at zero */ if (fsmrel->interestingPages <= INDEXCHUNKPAGES) return 0; /* quotient will fit in int, even if interestingPages doesn't */ req = (fsmrel->interestingPages - 1) / INDEXCHUNKPAGES; } else { if (fsmrel->interestingPages <= CHUNKPAGES) return 0; req = (fsmrel->interestingPages - 1) / CHUNKPAGES; } return req;}/* * Calculate target allocation (number of chunks) for a rel * * Parameter is the result from fsm_calc_request(). The global sumRequests * and numRels totals must be up-to-date already. * * See notes at top of file for details. */static intfsm_calc_target_allocation(int myRequest){ double spareChunks; int extra; spareChunks = FreeSpaceMap->totalChunks - FreeSpaceMap->numRels; Assert(spareChunks > 0); if (spareChunks >= FreeSpaceMap->sumRequests) { /* We aren't oversubscribed, so allocate exactly the request */ extra = myRequest; } else { extra = (int) rint(spareChunks * myRequest / FreeSpaceMap->sumRequests); if (extra < 0) /* shouldn't happen, but make sure */ extra = 0; } return 1 + extra;}/* * Calculate number of chunks actually used to store current data */static intfsm_current_chunks(FSMRelation *fsmrel){ int chunkCount; /* Make sure storedPages==0 produces right answer */ if (fsmrel->storedPages <= 0) return 0; /* Convert page count to chunk count */ if (fsmrel->isIndex) chunkCount = (fsmrel->storedPages - 1) / INDEXCHUNKPAGES + 1; else chunkCount = (fsmrel->storedPages - 1) / CHUNKPAGES + 1; return chunkCount;}/* * Calculate current actual allocation (number of chunks) for a rel */static intfsm_current_allocation(FSMRelation *fsmrel){ if (fsmrel->nextPhysical != NULL) return fsmrel->nextPhysical->firstChunk - fsmrel->firstChunk; else if (fsmrel == FreeSpaceMap->lastRel) return FreeSpaceMap->usedChunks - fsmrel->firstChunk; else { /* it's not in the storage-order list */ Assert(fsmrel->firstChunk < 0 && fsmrel->storedPages == 0); return 0; }}/* * Return the FreeSpaceMap structure for examination. */FSMHeader *GetFreeSpaceMap(void){ return FreeSpaceMap;}#ifdef FREESPACE_DEBUG/* * Dump contents of freespace map for debugging. * * We assume caller holds the FreeSpaceLock, or is otherwise unconcerned * about other processes. */voidDumpFreeSpace(void){ FSMRelation *fsmrel; FSMRelation *prevrel = NULL; int relNum = 0; int nPages; for (fsmrel = FreeSpaceMap->usageList; fsmrel; fsmrel = fsmrel->nextUsage) { relNum++; fprintf(stderr, "Map %d: rel %u/%u/%u isIndex %d avgRequest %u interestingPages %u nextPage %d\nMap= ", relNum, fsmrel->key.spcNode, fsmrel->key.dbNode, fsmrel->key.relNode, (int) fsmrel->isIndex, fsmrel->avgRequest, fsmrel->interestingPages, fsmrel->nextPage); if (fsmrel->isIndex) { IndexFSMPageData *page; page = (IndexFSMPageData *) (FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES); for (nPages = 0; nPages < fsmrel->storedPages; nPages++) { fprintf(stderr, " %u", IndexFSMPageGetPageNum(page)); page++; } } else { FSMPageData *page; page = (FSMPageData *) (FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES); for (nPages = 0; nPages < fsmrel->storedPages; nPages++) { fprintf(stderr, " %u:%u", FSMPageGetPageNum(page), FSMPageGetSpace(page)); page++; } } fprintf(stderr, "\n"); /* Cross-check list links */ if (prevrel != fsmrel->priorUsage) fprintf(stderr, "DumpFreeSpace: broken list links\n"); prevrel = fsmrel; } if (prevrel != FreeSpaceMap->usageListTail) fprintf(stderr, "DumpFreeSpace: broken list links\n"); /* Cross-check global counters */ if (relNum != FreeSpaceMap->numRels) fprintf(stderr, "DumpFreeSpace: %d rels in list, but numRels = %d\n", relNum, FreeSpaceMap->numRels);}#endif /* FREESPACE_DEBUG */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -