📄 row0sel.c
字号:
} ut_ad(plan->mode == PAGE_CUR_GE); /* As the cursor is now placed on a user record after a search with the mode PAGE_CUR_GE, the up_match field in the cursor tells how many fields in the user record matched to the search tuple */ if (btr_pcur_get_up_match(&(plan->pcur)) < plan->n_exact_match) { return(SEL_EXHAUSTED); } /* This is a non-locking consistent read: if necessary, fetch a previous version of the record */ offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); if (index->type & DICT_CLUSTERED) { if (!lock_clust_rec_cons_read_sees(rec, index, offsets, node->read_view)) { ret = SEL_RETRY; goto func_exit; } } else if (!lock_sec_rec_cons_read_sees(rec, index, node->read_view)) { ret = SEL_RETRY; goto func_exit; } /* Test deleted flag. Fetch the columns needed in test conditions. */ row_sel_fetch_columns(index, rec, offsets, UT_LIST_GET_FIRST(plan->columns)); if (rec_get_deleted_flag(rec, plan->table->comp)) { ret = SEL_EXHAUSTED; goto func_exit; } /* Test the rest of search conditions */ if (!row_sel_test_other_conds(plan)) { ret = SEL_EXHAUSTED; goto func_exit; } ut_ad(plan->pcur.latch_mode == node->latch_mode); plan->n_rows_fetched++;func_exit: if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } return(SEL_FOUND);}/*************************************************************************Performs a select step. */staticulintrow_sel(/*====*/ /* out: DB_SUCCESS or error code */ sel_node_t* node, /* in: select node */ que_thr_t* thr) /* in: query thread */{ dict_index_t* index; plan_t* plan; mtr_t mtr; ibool moved; rec_t* rec; rec_t* old_vers; rec_t* clust_rec; ibool search_latch_locked; ibool consistent_read; /* The following flag becomes TRUE when we are doing a consistent read from a non-clustered index and we must look at the clustered index to find out the previous delete mark state of the non-clustered record: */ ibool cons_read_requires_clust_rec = FALSE; ulint cost_counter = 0; ibool cursor_just_opened; ibool must_go_to_next; ibool leaf_contains_updates = FALSE; /* TRUE if select_will_do_update is TRUE and the current clustered index leaf page has been updated during the current mtr: mtr must be committed at the same time as the leaf x-latch is released */ ibool mtr_has_extra_clust_latch = FALSE; /* TRUE if the search was made using a non-clustered index, and we had to access the clustered record: now &mtr contains a clustered index latch, and &mtr must be committed before we move to the next non-clustered record */ ulint found_flag; ulint err; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; *offsets_ = (sizeof offsets_) / sizeof *offsets_; ut_ad(thr->run_node == node); search_latch_locked = FALSE; if (node->read_view) { /* In consistent reads, we try to do with the hash index and not to use the buffer page get. This is to reduce memory bus load resulting from semaphore operations. The search latch will be s-locked when we access an index with a unique search condition, but not locked when we access an index with a less selective search condition. */ consistent_read = TRUE; } else { consistent_read = FALSE; }table_loop: /* TABLE LOOP ---------- This is the outer major loop in calculating a join. We come here when node->fetch_table changes, and after adding a row to aggregate totals and, of course, when this function is called. */ ut_ad(leaf_contains_updates == FALSE); ut_ad(mtr_has_extra_clust_latch == FALSE); plan = sel_node_get_nth_plan(node, node->fetch_table); index = plan->index; if (plan->n_rows_prefetched > 0) { sel_pop_prefetched_row(plan); goto next_table_no_mtr; } if (plan->cursor_at_end) { /* The cursor has already reached the result set end: no more rows to process for this table cursor, as also the prefetch stack was empty */ ut_ad(plan->pcur_is_open); goto table_exhausted_no_mtr; } /* Open a cursor to index, or restore an open cursor position */ mtr_start(&mtr); if (consistent_read && plan->unique_search && !plan->pcur_is_open && !plan->must_get_clust) { if (!search_latch_locked) { rw_lock_s_lock(&btr_search_latch); search_latch_locked = TRUE; } else if (btr_search_latch.writer_is_wait_ex) { /* There is an x-latch request waiting: release the s-latch for a moment; as an s-latch here is often kept for some 10 searches before being released, a waiting x-latch request would block other threads from acquiring an s-latch for a long time, lowering performance significantly in multiprocessors. */ rw_lock_s_unlock(&btr_search_latch); rw_lock_s_lock(&btr_search_latch); } found_flag = row_sel_try_search_shortcut(node, plan, &mtr); if (found_flag == SEL_FOUND) { goto next_table; } else if (found_flag == SEL_EXHAUSTED) { goto table_exhausted; } ut_ad(found_flag == SEL_RETRY); plan_reset_cursor(plan); mtr_commit(&mtr); mtr_start(&mtr); } if (search_latch_locked) { rw_lock_s_unlock(&btr_search_latch); search_latch_locked = FALSE; } if (!plan->pcur_is_open) { /* Evaluate the expressions to build the search tuple and open the cursor */ row_sel_open_pcur(node, plan, search_latch_locked, &mtr); cursor_just_opened = TRUE; /* A new search was made: increment the cost counter */ cost_counter++; } else { /* Restore pcur position to the index */ must_go_to_next = row_sel_restore_pcur_pos(node, plan, &mtr); cursor_just_opened = FALSE; if (must_go_to_next) { /* We have already processed the cursor record: move to the next */ goto next_rec; } } rec_loop: /* RECORD LOOP ----------- In this loop we use pcur and try to fetch a qualifying row, and also fill the prefetch buffer for this table if n_rows_fetched has exceeded a threshold. While we are inside this loop, the following holds: (1) &mtr is started, (2) pcur is positioned and open. NOTE that if cursor_just_opened is TRUE here, it means that we came to this point right after row_sel_open_pcur. */ ut_ad(mtr_has_extra_clust_latch == FALSE); rec = btr_pcur_get_rec(&(plan->pcur)); /* PHASE 1: Set a lock if specified */ if (!node->asc && cursor_just_opened && !page_rec_is_supremum(rec)) { /* When we open a cursor for a descending search, we must set a next-key lock on the successor record: otherwise it would be possible to insert new records next to the cursor position, and it might be that these new records should appear in the search result set, resulting in the phantom problem. */ if (!consistent_read) { /* If innodb_locks_unsafe_for_binlog option is used, we lock only the record, i.e., next-key locking is not used. */ rec_t* next_rec = page_rec_get_next(rec); ulint lock_type; offsets = rec_get_offsets(next_rec, index, offsets, ULINT_UNDEFINED, &heap); if (srv_locks_unsafe_for_binlog) { lock_type = LOCK_REC_NOT_GAP; } else { lock_type = LOCK_ORDINARY; } err = sel_set_rec_lock(next_rec, index, offsets, node->row_lock_mode, lock_type, thr); if (err != DB_SUCCESS) { /* Note that in this case we will store in pcur the PREDECESSOR of the record we are waiting the lock for */ goto lock_wait_or_error; } } } if (page_rec_is_infimum(rec)) { /* The infimum record on a page cannot be in the result set, and neither can a record lock be placed on it: we skip such a record. We also increment the cost counter as we may have processed yet another page of index. */ cost_counter++; goto next_rec; } if (!consistent_read) { /* Try to place a lock on the index record */ /* If innodb_locks_unsafe_for_binlog option is used, we lock only the record, i.e., next-key locking is not used. */ ulint lock_type; offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); if (srv_locks_unsafe_for_binlog) { lock_type = LOCK_REC_NOT_GAP; } else { lock_type = LOCK_ORDINARY; } err = sel_set_rec_lock(rec, index, offsets, node->row_lock_mode, lock_type, thr); if (err != DB_SUCCESS) { goto lock_wait_or_error; } } if (page_rec_is_supremum(rec)) { /* A page supremum record cannot be in the result set: skip it now when we have placed a possible lock on it */ goto next_rec; } ut_ad(page_rec_is_user_rec(rec)); if (cost_counter > SEL_COST_LIMIT) { /* Now that we have placed the necessary locks, we can stop for a while and store the cursor position; NOTE that if we would store the cursor position BEFORE placing a record lock, it might happen that the cursor would jump over some records that another transaction could meanwhile insert adjacent to the cursor: this would result in the phantom problem. */ goto stop_for_a_while; } /* PHASE 2: Check a mixed index mix id if needed */ if (plan->unique_search && cursor_just_opened) { ut_ad(plan->mode == PAGE_CUR_GE); /* As the cursor is now placed on a user record after a search with the mode PAGE_CUR_GE, the up_match field in the cursor tells how many fields in the user record matched to the search tuple */ if (btr_pcur_get_up_match(&(plan->pcur)) < plan->n_exact_match) { goto table_exhausted; } /* Ok, no need to test end_conds or mix id */ } else if (plan->mixed_index) { /* We have to check if the record in a mixed cluster belongs to this table */ if (!dict_is_mixed_table_rec(plan->table, rec)) { goto next_rec; } } /* We are ready to look at a possible new index entry in the result set: the cursor is now placed on a user record */ /* PHASE 3: Get previous version in a consistent read */ cons_read_requires_clust_rec = FALSE; offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); if (consistent_read) { /* This is a non-locking consistent read: if necessary, fetch a previous version of the record */ if (index->type & DICT_CLUSTERED) { if (!lock_clust_rec_cons_read_sees(rec, index, offsets, node->read_view)) { err = row_sel_build_prev_vers(node->read_view, plan, rec, &offsets, &heap, &old_vers, &mtr); if (err != DB_SUCCESS) { goto lock_wait_or_error; } if (old_vers == NULL) { offsets = rec_get_offsets( rec, index, offsets, ULINT_UNDEFINED, &heap); row_sel_fetch_columns(index, rec, offsets, UT_LIST_GET_FIRST(plan->columns)); if (!row_sel_test_end_conds(plan)) { goto table_exhausted; } goto next_rec; } rec = old_vers; } } else if (!lock_sec_rec_cons_read_sees(rec, index, node->read_view)) { cons_read_requires_clust_rec = TRUE; } } /* PHASE 4: Test search end conditions and deleted flag */ /* Fetch the columns needed in test conditions */ row_sel_fetch_columns(index, rec, offsets, UT_LIST_GET_FIRST(plan->columns)); /* Test the selection end conditions: these can only contain columns which already are found in the index, even though the index might be non-clustered */ if (plan->unique_search && cursor_just_opened) { /* No test necessary: the test was already made above */ } else if (!row_sel_test_end_conds(plan)) { goto table_exhausted; } if (rec_get_deleted_flag(rec, plan->table->comp) && !cons_read_requires_clust_rec) { /* The record is delete marked: we can skip it if this is not a consistent read which might see an earlier version of a non-clustered index record */ if (plan->unique_search) { goto table_exhausted; } goto next_rec; } /* PHASE 5: Get the clustered index record, if needed and if we did not do the search using the clustered index */ if (plan->must_get_clust || cons_read_requires_clust_rec) { /* It was a non-clustered index and we must fetch also the clustered index record */ err = row_sel_get_clust_rec(node, plan, rec, thr, &clust_rec, &mtr); mtr_has_extra_clust_latch = TRUE; if (err != DB_SUCCESS) { goto lock_wait_or_error; } /* Retrieving the clustered record required a search: increment the cost counter */ cost_counter++; if (clust_rec == NULL) { /* The record did not exist in the read view */ ut_ad(consistent_read); goto next_rec; } if (rec_get_deleted_flag(clust_rec, plan->table->comp)) { /* The record is delete marked: we can skip it */ goto next_rec; } if (node->can_get_updated) { btr_pcur_store_position(&(plan->clust_pcur), &mtr); } } /* PHASE 6: Test the rest of search conditions */ if (!row_sel_test_other_conds(plan)) { if (plan->unique_search) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -