📄 buf0flu.c
字号:
|| flush_type == BUF_FLUSH_SINGLE_PAGE); mutex_enter(&(buf_pool->mutex)); block = buf_page_hash_get(space, offset); ut_a(!block || block->state == BUF_BLOCK_FILE_PAGE); if (flush_type == BUF_FLUSH_LIST && block && buf_flush_ready_for_flush(block, flush_type)) { block->io_fix = BUF_IO_WRITE; /* If AWE is enabled and the page is not mapped to a frame, then map it */ if (block->frame == NULL) { ut_a(srv_use_awe); /* We set second parameter TRUE because the block is in the LRU list and we must put it to awe_LRU_free_mapped list once mapped to a frame */ buf_awe_map_page_to_frame(block, TRUE); } block->flush_type = flush_type; if (buf_pool->n_flush[flush_type] == 0) { os_event_reset(buf_pool->no_flush[flush_type]); } (buf_pool->n_flush[flush_type])++; locked = FALSE; /* If the simulated aio thread is not running, we must not wait for any latch, as we may end up in a deadlock: if buf_fix_count == 0, then we know we need not wait */ if (block->buf_fix_count == 0) { rw_lock_s_lock_gen(&(block->lock), BUF_IO_WRITE); locked = TRUE; } mutex_exit(&(buf_pool->mutex)); if (!locked) { buf_flush_buffered_writes(); rw_lock_s_lock_gen(&(block->lock), BUF_IO_WRITE); }#ifdef UNIV_DEBUG if (buf_debug_prints) { fprintf(stderr, "Flushing page space %lu, page no %lu \n", (ulong) block->space, (ulong) block->offset); }#endif /* UNIV_DEBUG */ buf_flush_write_block_low(block); return(1); } else if (flush_type == BUF_FLUSH_LRU && block && buf_flush_ready_for_flush(block, flush_type)) { /* VERY IMPORTANT: Because any thread may call the LRU flush, even when owning locks on pages, to avoid deadlocks, we must make sure that the s-lock is acquired on the page without waiting: this is accomplished because in the if-condition above we require the page not to be bufferfixed (in function ..._ready_for_flush). */ block->io_fix = BUF_IO_WRITE; /* If AWE is enabled and the page is not mapped to a frame, then map it */ if (block->frame == NULL) { ut_a(srv_use_awe); /* We set second parameter TRUE because the block is in the LRU list and we must put it to awe_LRU_free_mapped list once mapped to a frame */ buf_awe_map_page_to_frame(block, TRUE); } block->flush_type = flush_type; if (buf_pool->n_flush[flush_type] == 0) { os_event_reset(buf_pool->no_flush[flush_type]); } (buf_pool->n_flush[flush_type])++; rw_lock_s_lock_gen(&(block->lock), BUF_IO_WRITE); /* Note that the s-latch is acquired before releasing the buf_pool mutex: this ensures that the latch is acquired immediately. */ mutex_exit(&(buf_pool->mutex)); buf_flush_write_block_low(block); return(1); } else if (flush_type == BUF_FLUSH_SINGLE_PAGE && block && buf_flush_ready_for_flush(block, flush_type)) { block->io_fix = BUF_IO_WRITE; /* If AWE is enabled and the page is not mapped to a frame, then map it */ if (block->frame == NULL) { ut_a(srv_use_awe); /* We set second parameter TRUE because the block is in the LRU list and we must put it to awe_LRU_free_mapped list once mapped to a frame */ buf_awe_map_page_to_frame(block, TRUE); } block->flush_type = flush_type; if (buf_pool->n_flush[block->flush_type] == 0) { os_event_reset(buf_pool->no_flush[block->flush_type]); } (buf_pool->n_flush[flush_type])++; mutex_exit(&(buf_pool->mutex)); rw_lock_s_lock_gen(&(block->lock), BUF_IO_WRITE);#ifdef UNIV_DEBUG if (buf_debug_prints) { fprintf(stderr, "Flushing single page space %lu, page no %lu \n", (ulong) block->space, (ulong) block->offset); }#endif /* UNIV_DEBUG */ buf_flush_write_block_low(block); return(1); } else { mutex_exit(&(buf_pool->mutex)); return(0); } }/***************************************************************Flushes to disk all flushable pages within the flush area. */staticulintbuf_flush_try_neighbors(/*====================*/ /* out: number of pages flushed */ ulint space, /* in: space id */ ulint offset, /* in: page offset */ ulint flush_type) /* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */{ buf_block_t* block; ulint low, high; ulint count = 0; ulint i; ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); low = (offset / BUF_FLUSH_AREA) * BUF_FLUSH_AREA; high = (offset / BUF_FLUSH_AREA + 1) * BUF_FLUSH_AREA; if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) { /* If there is little space, it is better not to flush any block except from the end of the LRU list */ low = offset; high = offset + 1; } /* fprintf(stderr, "Flush area: low %lu high %lu\n", low, high); */ if (high > fil_space_get_size(space)) { high = fil_space_get_size(space); } mutex_enter(&(buf_pool->mutex)); for (i = low; i < high; i++) { block = buf_page_hash_get(space, i); ut_a(!block || block->state == BUF_BLOCK_FILE_PAGE); if (block && flush_type == BUF_FLUSH_LRU && i != offset && !block->old) { /* We avoid flushing 'non-old' blocks in an LRU flush, because the flushed blocks are soon freed */ continue; } if (block && buf_flush_ready_for_flush(block, flush_type) && (i == offset || block->buf_fix_count == 0)) { /* We only try to flush those neighbors != offset where the buf fix count is zero, as we then know that we probably can latch the page without a semaphore wait. Semaphore waits are expensive because we must flush the doublewrite buffer before we start waiting. */ mutex_exit(&(buf_pool->mutex)); /* Note: as we release the buf_pool mutex above, in buf_flush_try_page we cannot be sure the page is still in a flushable state: therefore we check it again inside that function. */ count += buf_flush_try_page(space, i, flush_type); mutex_enter(&(buf_pool->mutex)); } } mutex_exit(&(buf_pool->mutex)); return(count);}/***********************************************************************This utility flushes dirty blocks from the end of the LRU list or flush_list.NOTE 1: in the case of an LRU flush the calling thread may own latches topages: to avoid deadlocks, this function must be written so that it cannotend up waiting for these latches! NOTE 2: in the case of a flush list flush,the calling thread is not allowed to own any latches on pages! */ulintbuf_flush_batch(/*============*/ /* out: number of blocks for which the write request was queued; ULINT_UNDEFINED if there was a flush of the same type already running */ ulint flush_type, /* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST; if BUF_FLUSH_LIST, then the caller must not own any latches on pages */ ulint min_n, /* in: wished minimum mumber of blocks flushed (it is not guaranteed that the actual number is that big, though) */ dulint lsn_limit) /* in the case BUF_FLUSH_LIST all blocks whose oldest_modification is smaller than this should be flushed (if their number does not exceed min_n), otherwise ignored */{ buf_block_t* block; ulint page_count = 0; ulint old_page_count; ulint space; ulint offset; ibool found; ut_ad((flush_type == BUF_FLUSH_LRU) || (flush_type == BUF_FLUSH_LIST)); ut_ad((flush_type != BUF_FLUSH_LIST) || sync_thread_levels_empty_gen(TRUE)); mutex_enter(&(buf_pool->mutex)); if ((buf_pool->n_flush[flush_type] > 0) || (buf_pool->init_flush[flush_type] == TRUE)) { /* There is already a flush batch of the same type running */ mutex_exit(&(buf_pool->mutex)); return(ULINT_UNDEFINED); } (buf_pool->init_flush)[flush_type] = TRUE; for (;;) { /* If we have flushed enough, leave the loop */ if (page_count >= min_n) { break; } /* Start from the end of the list looking for a suitable block to be flushed. */ if (flush_type == BUF_FLUSH_LRU) { block = UT_LIST_GET_LAST(buf_pool->LRU); } else { ut_ad(flush_type == BUF_FLUSH_LIST); block = UT_LIST_GET_LAST(buf_pool->flush_list); if (!block || (ut_dulint_cmp(block->oldest_modification, lsn_limit) >= 0)) { /* We have flushed enough */ break; } } found = FALSE; /* Note that after finding a single flushable page, we try to flush also all its neighbors, and after that start from the END of the LRU list or flush list again: the list may change during the flushing and we cannot safely preserve within this function a pointer to a block in the list! */ while ((block != NULL) && !found) { ut_a(block->state == BUF_BLOCK_FILE_PAGE); if (buf_flush_ready_for_flush(block, flush_type)) { found = TRUE; space = block->space; offset = block->offset; mutex_exit(&(buf_pool->mutex)); old_page_count = page_count; /* Try to flush also all the neighbors */ page_count += buf_flush_try_neighbors(space, offset, flush_type); /* fprintf(stderr, "Flush type %lu, page no %lu, neighb %lu\n", flush_type, offset, page_count - old_page_count); */ mutex_enter(&(buf_pool->mutex)); } else if (flush_type == BUF_FLUSH_LRU) { block = UT_LIST_GET_PREV(LRU, block); } else { ut_ad(flush_type == BUF_FLUSH_LIST); block = UT_LIST_GET_PREV(flush_list, block); } } /* If we could not find anything to flush, leave the loop */ if (!found) { break; } } (buf_pool->init_flush)[flush_type] = FALSE; if ((buf_pool->n_flush[flush_type] == 0) && (buf_pool->init_flush[flush_type] == FALSE)) { /* The running flush batch has ended */ os_event_set(buf_pool->no_flush[flush_type]); } mutex_exit(&(buf_pool->mutex)); buf_flush_buffered_writes();#ifdef UNIV_DEBUG if (buf_debug_prints && page_count > 0) { ut_a(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); fprintf(stderr, flush_type == BUF_FLUSH_LRU ? "Flushed %lu pages in LRU flush\n" : "Flushed %lu pages in flush list flush\n", (ulong) page_count); }#endif /* UNIV_DEBUG */ if (page_count != ULINT_UNDEFINED) srv_buf_pool_flushed+= page_count; return(page_count);}/**********************************************************************Waits until a flush batch of the given type ends */voidbuf_flush_wait_batch_end(/*=====================*/ ulint type) /* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */{ ut_ad((type == BUF_FLUSH_LRU) || (type == BUF_FLUSH_LIST)); os_event_wait(buf_pool->no_flush[type]);} /**********************************************************************Gives a recommendation of how many blocks should be flushed to establisha big enough margin of replaceable blocks near the end of the LRU listand in the free list. */staticulintbuf_flush_LRU_recommendation(void)/*==============================*/ /* out: number of blocks which should be flushed from the end of the LRU list */{ buf_block_t* block; ulint n_replaceable; ulint distance = 0; mutex_enter(&(buf_pool->mutex)); n_replaceable = UT_LIST_GET_LEN(buf_pool->free); block = UT_LIST_GET_LAST(buf_pool->LRU); while ((block != NULL) && (n_replaceable < BUF_FLUSH_FREE_BLOCK_MARGIN + BUF_FLUSH_EXTRA_MARGIN) && (distance < BUF_LRU_FREE_SEARCH_LEN)) { if (buf_flush_ready_for_replace(block)) { n_replaceable++; } distance++; block = UT_LIST_GET_PREV(LRU, block); } mutex_exit(&(buf_pool->mutex)); if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN) { return(0); } return(BUF_FLUSH_FREE_BLOCK_MARGIN + BUF_FLUSH_EXTRA_MARGIN - n_replaceable);}/*************************************************************************Flushes pages from the end of the LRU list if there is too small a marginof replaceable pages there or in the free list. VERY IMPORTANT: this functionis called also by threads which have locks on pages. To avoid deadlocks, weflush only pages such that the s-lock required for flushing can be acquiredimmediately, without waiting. */ voidbuf_flush_free_margin(void)/*=======================*/{ ulint n_to_flush; ulint n_flushed; n_to_flush = buf_flush_LRU_recommendation(); if (n_to_flush > 0) { n_flushed = buf_flush_batch(BUF_FLUSH_LRU, n_to_flush, ut_dulint_zero); if (n_flushed == ULINT_UNDEFINED) { /* There was an LRU type flush batch already running; let us wait for it to end */ buf_flush_wait_batch_end(BUF_FLUSH_LRU); } }}/**********************************************************************Validates the flush list. */staticiboolbuf_flush_validate_low(void)/*========================*/ /* out: TRUE if ok */{ buf_block_t* block; dulint om; UT_LIST_VALIDATE(flush_list, buf_block_t, buf_pool->flush_list); block = UT_LIST_GET_FIRST(buf_pool->flush_list); while (block != NULL) { om = block->oldest_modification; ut_a(block->state == BUF_BLOCK_FILE_PAGE); ut_a(ut_dulint_cmp(om, ut_dulint_zero) > 0); block = UT_LIST_GET_NEXT(flush_list, block); if (block) { ut_a(ut_dulint_cmp(om, block->oldest_modification) >= 0); } } return(TRUE);}/**********************************************************************Validates the flush list. */iboolbuf_flush_validate(void)/*====================*/ /* out: TRUE if ok */{ ibool ret; mutex_enter(&(buf_pool->mutex)); ret = buf_flush_validate_low(); mutex_exit(&(buf_pool->mutex)); return(ret);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -