📄 btr0sea.c
字号:
bytes = 0; offsets = rec_get_offsets(rec, cursor->index, offsets, n_unique, &heap); cmp = page_cmp_dtuple_rec_with_match(tuple, rec, offsets, &match, &bytes); if (mode == PAGE_CUR_GE) { if (cmp == 1) { goto exit_func; } cursor->up_match = match; if (match >= n_unique) { success = TRUE; goto exit_func; } } else if (mode == PAGE_CUR_LE) { if (cmp == -1) { goto exit_func; } cursor->low_match = match; } else if (mode == PAGE_CUR_G) { if (cmp != -1) { goto exit_func; } } else if (mode == PAGE_CUR_L) { if (cmp != 1) { goto exit_func; } } if (can_only_compare_to_cursor_rec) { /* Since we could not determine if our guess is right just by looking at the record under the cursor, return FALSE */ goto exit_func; } match = 0; bytes = 0; if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) { rec_t* prev_rec; ut_ad(!page_rec_is_infimum(rec)); prev_rec = page_rec_get_prev(rec); if (page_rec_is_infimum(prev_rec)) { success = btr_page_get_prev( buf_frame_align(prev_rec), mtr) == FIL_NULL; goto exit_func; } offsets = rec_get_offsets(prev_rec, cursor->index, offsets, n_unique, &heap); cmp = page_cmp_dtuple_rec_with_match(tuple, prev_rec, offsets, &match, &bytes); if (mode == PAGE_CUR_GE) { success = cmp == 1; } else { success = cmp != -1; } goto exit_func; } else { rec_t* next_rec; ut_ad(!page_rec_is_supremum(rec)); next_rec = page_rec_get_next(rec); if (page_rec_is_supremum(next_rec)) { if (btr_page_get_next( buf_frame_align(next_rec), mtr) == FIL_NULL) { cursor->up_match = 0; success = TRUE; } goto exit_func; } offsets = rec_get_offsets(next_rec, cursor->index, offsets, n_unique, &heap); cmp = page_cmp_dtuple_rec_with_match(tuple, next_rec, offsets, &match, &bytes); if (mode == PAGE_CUR_LE) { success = cmp == -1; cursor->up_match = match; } else { success = cmp != 1; } }exit_func: if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } return(success);}/**********************************************************************Tries to guess the right search position based on the hash search infoof the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,and the function returns TRUE, then cursor->up_match and cursor->low_matchboth have sensible values. */iboolbtr_search_guess_on_hash(/*=====================*/ /* out: TRUE if succeeded */ dict_index_t* index, /* in: index */ btr_search_t* info, /* in: index search info */ dtuple_t* tuple, /* in: logical record */ ulint mode, /* in: PAGE_CUR_L, ... */ ulint latch_mode, /* in: BTR_SEARCH_LEAF, ...; NOTE that only if has_search_latch is 0, we will have a latch set on the cursor page, otherwise we assume the caller uses his search latch to protect the record! */ btr_cur_t* cursor, /* out: tree cursor */ ulint has_search_latch,/* in: latch mode the caller currently has on btr_search_latch: RW_S_LATCH, RW_X_LATCH, or 0 */ mtr_t* mtr) /* in: mtr */{ buf_block_t* block; rec_t* rec; page_t* page; ulint fold; ulint tuple_n_fields; dulint tree_id; ibool can_only_compare_to_cursor_rec = TRUE;#ifdef notdefined btr_cur_t cursor2; btr_pcur_t pcur;#endif ut_ad(index && info && tuple && cursor && mtr); ut_ad((latch_mode == BTR_SEARCH_LEAF) || (latch_mode == BTR_MODIFY_LEAF)); /* Note that, for efficiency, the struct info may not be protected by any latch here! */ if (UNIV_UNLIKELY(info->n_hash_potential == 0)) { return(FALSE); } cursor->n_fields = info->n_fields; cursor->n_bytes = info->n_bytes; tuple_n_fields = dtuple_get_n_fields(tuple); if (UNIV_UNLIKELY(tuple_n_fields < cursor->n_fields)) { return(FALSE); } if (UNIV_UNLIKELY(tuple_n_fields == cursor->n_fields) && (cursor->n_bytes > 0)) { return(FALSE); } tree_id = (index->tree)->id;#ifdef UNIV_SEARCH_PERF_STAT info->n_hash_succ++;#endif fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, tree_id); cursor->fold = fold; cursor->flag = BTR_CUR_HASH; if (UNIV_LIKELY(!has_search_latch)) { rw_lock_s_lock(&btr_search_latch); } ut_ad(btr_search_latch.writer != RW_LOCK_EX); ut_ad(btr_search_latch.reader_count > 0); rec = ha_search_and_get_data(btr_search_sys->hash_index, fold); if (UNIV_UNLIKELY(!rec)) { goto failure_unlock; } page = buf_frame_align(rec); if (UNIV_LIKELY(!has_search_latch)) { if (UNIV_UNLIKELY(!buf_page_get_known_nowait(latch_mode, page, BUF_MAKE_YOUNG, __FILE__, __LINE__, mtr))) { goto failure_unlock; } rw_lock_s_unlock(&btr_search_latch); can_only_compare_to_cursor_rec = FALSE;#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_TREE_NODE_FROM_HASH);#endif /* UNIV_SYNC_DEBUG */ } block = buf_block_align(page); if (UNIV_UNLIKELY(block->state == BUF_BLOCK_REMOVE_HASH)) { if (UNIV_LIKELY(!has_search_latch)) { btr_leaf_page_release(page, latch_mode, mtr); } goto failure; } ut_ad(block->state == BUF_BLOCK_FILE_PAGE); ut_ad(page_rec_is_user_rec(rec)); btr_cur_position(index, rec, cursor); /* Check the validity of the guess within the page */ /* If we only have the latch on btr_search_latch, not on the page, it only protects the columns of the record the cursor is positioned on. We cannot look at the next of the previous record to determine if our guess for the cursor position is right. */ if (UNIV_EXPECT(ut_dulint_cmp(tree_id, btr_page_get_index_id(page)), 0) || !btr_search_check_guess(cursor, can_only_compare_to_cursor_rec, tuple, mode, mtr)) { if (UNIV_LIKELY(!has_search_latch)) { btr_leaf_page_release(page, latch_mode, mtr); } goto failure; } if (UNIV_LIKELY(info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5)) { info->n_hash_potential++; }#ifdef notdefined /* These lines of code can be used in a debug version to check the correctness of the searched cursor position: */ info->last_hash_succ = FALSE; /* Currently, does not work if the following fails: */ ut_ad(!has_search_latch); btr_leaf_page_release(page, latch_mode, mtr); btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode, &cursor2, 0, mtr); if (mode == PAGE_CUR_GE && page_rec_is_supremum(btr_cur_get_rec(&cursor2))) { /* If mode is PAGE_CUR_GE, then the binary search in the index tree may actually take us to the supremum of the previous page */ info->last_hash_succ = FALSE; btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode, &pcur, mtr); ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor)); } else { ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor)); } /* NOTE that it is theoretically possible that the above assertions fail if the page of the cursor gets removed from the buffer pool meanwhile! Thus it might not be a bug. */#endif info->last_hash_succ = TRUE;#ifdef UNIV_SEARCH_PERF_STAT btr_search_n_succ++;#endif if (UNIV_LIKELY(!has_search_latch) && buf_block_peek_if_too_old(block)) { buf_page_make_young(page); } /* Increment the page get statistics though we did not really fix the page: for user info only */ buf_pool->n_page_gets++; return(TRUE); /*-------------------------------------------*/failure_unlock: if (UNIV_LIKELY(!has_search_latch)) { rw_lock_s_unlock(&btr_search_latch); }failure: info->n_hash_fail++; cursor->flag = BTR_CUR_HASH_FAIL;#ifdef UNIV_SEARCH_PERF_STAT if (info->n_hash_succ > 0) { info->n_hash_succ--; }#endif info->last_hash_succ = FALSE; return(FALSE);}/************************************************************************Drops a page hash index. */voidbtr_search_drop_page_hash_index(/*============================*/ page_t* page) /* in: index page, s- or x-latched, or an index page for which we know that block->buf_fix_count == 0 */{ hash_table_t* table; buf_block_t* block; ulint n_fields; ulint n_bytes; rec_t* rec; ulint fold; ulint prev_fold; dulint tree_id; ulint n_cached; ulint n_recs; ulint* folds; ulint i; mem_heap_t* heap; dict_index_t* index; ulint* offsets;#ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));#endif /* UNIV_SYNC_DEBUG */retry: rw_lock_s_lock(&btr_search_latch); block = buf_block_align(page); if (UNIV_LIKELY(!block->is_hashed)) { rw_lock_s_unlock(&btr_search_latch); return; } table = btr_search_sys->hash_index;#ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED) || rw_lock_own(&(block->lock), RW_LOCK_EX) || (block->buf_fix_count == 0));#endif /* UNIV_SYNC_DEBUG */ n_fields = block->curr_n_fields; n_bytes = block->curr_n_bytes; index = block->index; /* NOTE: The fields of block must not be accessed after releasing btr_search_latch, as the index page might only be s-latched! */ rw_lock_s_unlock(&btr_search_latch); ut_a(n_fields + n_bytes > 0); n_recs = page_get_n_recs(page); /* Calculate and cache fold values into an array for fast deletion from the hash index */ folds = mem_alloc(n_recs * sizeof(ulint)); n_cached = 0; rec = page_get_infimum_rec(page); rec = page_rec_get_next(rec); tree_id = btr_page_get_index_id(page); ut_a(0 == ut_dulint_cmp(tree_id, index->id)); prev_fold = 0; heap = NULL; offsets = NULL; while (!page_rec_is_supremum(rec)) { /* FIXME: in a mixed tree, not all records may have enough ordering fields: */ offsets = rec_get_offsets(rec, index, offsets, n_fields + (n_bytes > 0), &heap); ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0)); fold = rec_fold(rec, offsets, n_fields, n_bytes, tree_id); if (fold == prev_fold && prev_fold != 0) { goto next_rec; } /* Remove all hash nodes pointing to this page from the hash chain */ folds[n_cached] = fold; n_cached++;next_rec: rec = page_rec_get_next(rec); prev_fold = fold; } if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } rw_lock_x_lock(&btr_search_latch); if (UNIV_UNLIKELY(!block->is_hashed)) { /* Someone else has meanwhile dropped the hash index */ goto cleanup; } ut_a(block->index == index); if (UNIV_UNLIKELY(block->curr_n_fields != n_fields) || UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) { /* Someone else has meanwhile built a new hash index on the page, with different parameters */ rw_lock_x_unlock(&btr_search_latch); mem_free(folds); goto retry; } for (i = 0; i < n_cached; i++) { ha_remove_all_nodes_to_page(table, folds[i], page); } block->is_hashed = FALSE; block->index = NULL;cleanup: if (UNIV_UNLIKELY(block->n_pointers)) { /* Corruption */ ut_print_timestamp(stderr); fprintf(stderr," InnoDB: Corruption of adaptive hash index. After dropping\n""InnoDB: the hash index to a page of %s, still %lu hash nodes remain.\n", index->name, (ulong) block->n_pointers); rw_lock_x_unlock(&btr_search_latch); btr_search_validate(); } else { rw_lock_x_unlock(&btr_search_latch); } mem_free(folds);}/************************************************************************Drops a page hash index when a page is freed from a fseg to the file system.Drops possible hash index if the page happens to be in the buffer pool. */voidbtr_search_drop_page_hash_when_freed(/*=================================*/ ulint space, /* in: space id */ ulint page_no) /* in: page number */{ ibool is_hashed; page_t* page; mtr_t mtr; is_hashed = buf_page_peek_if_search_hashed(space, page_no); if (!is_hashed) { return; } mtr_start(&mtr); /* We assume that if the caller has a latch on the page, then the caller has already dropped the hash index for the page, and we never get here. Therefore we can acquire the s-latch to the page without having to fear a deadlock. */ page = buf_page_get_gen(space, page_no, RW_S_LATCH, NULL, BUF_GET_IF_IN_POOL, __FILE__, __LINE__, &mtr);#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_TREE_NODE_FROM_HASH);#endif /* UNIV_SYNC_DEBUG */ btr_search_drop_page_hash_index(page); mtr_commit(&mtr);}/************************************************************************Builds a hash index on a page with the given parameters. If the page alreadyhas a hash index with different parameters, the old hash index is removed.If index is non-NULL, this function checks if n_fields and n_bytes aresensible values, and does not build a hash index if not. */staticvoidbtr_search_build_page_hash_index(/*=============================*/ dict_index_t* index, /* in: index for which to build */ page_t* page, /* in: index page, s- or x-latched */ ulint n_fields,/* in: hash this many full fields */ ulint n_bytes,/* in: hash this many bytes from the next field */ ulint side) /* in: hash for searches from this side */{ hash_table_t* table; buf_block_t* block; rec_t* rec; rec_t* next_rec; ulint fold; ulint next_fold; dulint tree_id; ulint n_cached; ulint n_recs; ulint* folds; rec_t** recs; ulint i; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; *offsets_ = (sizeof offsets_) / sizeof *offsets_; ut_ad(index); block = buf_block_align(page); table = btr_search_sys->hash_index;#ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED) || rw_lock_own(&(block->lock), RW_LOCK_EX));#endif /* UNIV_SYNC_DEBUG */ rw_lock_s_lock(&btr_search_latch); if (block->is_hashed && ((block->curr_n_fields != n_fields) || (block->curr_n_bytes != n_bytes) || (block->curr_side != side))) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -