📄 lock0lock.c
字号:
lock_t*lock_rec_get_first(/*===============*/ /* out: first lock, NULL if none exists */ rec_t* rec) /* in: record on a page */{ lock_t* lock;#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ lock = lock_rec_get_first_on_page(rec); if (UNIV_LIKELY_NULL(lock)) { ulint heap_no = rec_get_heap_no(rec, page_rec_is_comp(rec)); while (lock && !lock_rec_get_nth_bit(lock, heap_no)) { lock = lock_rec_get_next_on_page(lock); } } return(lock);}/*************************************************************************Resets the record lock bitmap to zero. NOTE: does not touch the wait_lockpointer in the transaction! This function is used in lock object creationand resetting. */staticvoidlock_rec_bitmap_reset(/*==================*/ lock_t* lock) /* in: record lock */{ byte* ptr; ulint n_bytes; ulint i; ut_ad(lock_get_type(lock) == LOCK_REC); /* Reset to zero the bitmap which resides immediately after the lock struct */ ptr = (byte*)lock + sizeof(lock_t); n_bytes = lock_rec_get_n_bits(lock) / 8; ut_ad((lock_rec_get_n_bits(lock) % 8) == 0); for (i = 0; i < n_bytes; i++) { *ptr = 0; ptr++; }}/*************************************************************************Copies a record lock to heap. */staticlock_t*lock_rec_copy(/*==========*/ /* out: copy of lock */ lock_t* lock, /* in: record lock */ mem_heap_t* heap) /* in: memory heap */{ lock_t* dupl_lock; ulint size; ut_ad(lock_get_type(lock) == LOCK_REC); size = sizeof(lock_t) + lock_rec_get_n_bits(lock) / 8; dupl_lock = mem_heap_alloc(heap, size); ut_memcpy(dupl_lock, lock, size); return(dupl_lock);}/*************************************************************************Gets the previous record lock set on a record. */staticlock_t*lock_rec_get_prev(/*==============*/ /* out: previous lock on the same record, NULL if none exists */ lock_t* in_lock,/* in: record lock */ ulint heap_no)/* in: heap number of the record */{ lock_t* lock; ulint space; ulint page_no; lock_t* found_lock = NULL;#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(in_lock) == LOCK_REC); space = in_lock->un_member.rec_lock.space; page_no = in_lock->un_member.rec_lock.page_no; lock = lock_rec_get_first_on_page_addr(space, page_no); for (;;) { ut_ad(lock); if (lock == in_lock) { return(found_lock); } if (lock_rec_get_nth_bit(lock, heap_no)) { found_lock = lock; } lock = lock_rec_get_next_on_page(lock); } }/*============= FUNCTIONS FOR ANALYZING TABLE LOCK QUEUE ================*//*************************************************************************Checks if a transaction has the specified table lock, or stronger. */UNIV_INLINElock_t*lock_table_has(/*===========*/ /* out: lock or NULL */ trx_t* trx, /* in: transaction */ dict_table_t* table, /* in: table */ ulint mode) /* in: lock mode */{ lock_t* lock;#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ /* Look for stronger locks the same trx already has on the table */ lock = UT_LIST_GET_LAST(table->locks); while (lock != NULL) { if (lock->trx == trx && lock_mode_stronger_or_eq(lock_get_mode(lock), mode)) { /* The same trx already has locked the table in a mode stronger or equal to the mode given */ ut_ad(!lock_get_wait(lock)); return(lock); } lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock); } return(NULL);} /*============= FUNCTIONS FOR ANALYZING RECORD LOCK QUEUE ================*//*************************************************************************Checks if a transaction has a GRANTED explicit lock on rec stronger or equalto precise_mode. */UNIV_INLINElock_t*lock_rec_has_expl(/*==============*/ /* out: lock or NULL */ ulint precise_mode,/* in: LOCK_S or LOCK_X possibly ORed to LOCK_GAP or LOCK_REC_NOT_GAP, for a supremum record we regard this always a gap type request */ rec_t* rec, /* in: record */ trx_t* trx) /* in: transaction */{ lock_t* lock;#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S || (precise_mode & LOCK_MODE_MASK) == LOCK_X); ut_ad(!(precise_mode & LOCK_INSERT_INTENTION)); lock = lock_rec_get_first(rec); while (lock) { if (lock->trx == trx && lock_mode_stronger_or_eq(lock_get_mode(lock), precise_mode & LOCK_MODE_MASK) && !lock_get_wait(lock) && (!lock_rec_get_rec_not_gap(lock) || (precise_mode & LOCK_REC_NOT_GAP) || page_rec_is_supremum(rec)) && (!lock_rec_get_gap(lock) || (precise_mode & LOCK_GAP) || page_rec_is_supremum(rec)) && (!lock_rec_get_insert_intention(lock))) { return(lock); } lock = lock_rec_get_next(rec, lock); } return(NULL);} /*************************************************************************Checks if some other transaction has a lock request in the queue. */staticlock_t*lock_rec_other_has_expl_req(/*========================*/ /* out: lock or NULL */ ulint mode, /* in: LOCK_S or LOCK_X */ ulint gap, /* in: LOCK_GAP if also gap locks are taken into account, or 0 if not */ ulint wait, /* in: LOCK_WAIT if also waiting locks are taken into account, or 0 if not */ rec_t* rec, /* in: record to look at */ trx_t* trx) /* in: transaction, or NULL if requests by all transactions are taken into account */{ lock_t* lock; #ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ ut_ad(mode == LOCK_X || mode == LOCK_S); ut_ad(gap == 0 || gap == LOCK_GAP); ut_ad(wait == 0 || wait == LOCK_WAIT); lock = lock_rec_get_first(rec); while (lock) { if (lock->trx != trx && (gap || !(lock_rec_get_gap(lock) || page_rec_is_supremum(rec))) && (wait || !lock_get_wait(lock)) && lock_mode_stronger_or_eq(lock_get_mode(lock), mode)) { return(lock); } lock = lock_rec_get_next(rec, lock); } return(NULL);}/*************************************************************************Checks if some other transaction has a conflicting explicit lock requestin the queue, so that we have to wait. */staticlock_t*lock_rec_other_has_conflicting(/*===========================*/ /* out: lock or NULL */ ulint mode, /* in: LOCK_S or LOCK_X, possibly ORed to LOCK_GAP or LOC_REC_NOT_GAP, LOCK_INSERT_INTENTION */ rec_t* rec, /* in: record to look at */ trx_t* trx) /* in: our transaction */{ lock_t* lock;#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ lock = lock_rec_get_first(rec); while (lock) { if (lock_rec_has_to_wait(trx, mode, lock, page_rec_is_supremum(rec))) { return(lock); } lock = lock_rec_get_next(rec, lock); } return(NULL);}/*************************************************************************Looks for a suitable type record lock struct by the same trx on the same page.This can be used to save space when a new record lock should be set on a page:no new struct is needed, if a suitable old is found. */UNIV_INLINElock_t*lock_rec_find_similar_on_page(/*==========================*/ /* out: lock or NULL */ ulint type_mode, /* in: lock type_mode field */ rec_t* rec, /* in: record */ trx_t* trx) /* in: transaction */{ lock_t* lock; ulint heap_no;#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ heap_no = rec_get_heap_no(rec, page_rec_is_comp(rec)); lock = lock_rec_get_first_on_page(rec); while (lock != NULL) { if (lock->trx == trx && lock->type_mode == type_mode && lock_rec_get_n_bits(lock) > heap_no) { return(lock); } lock = lock_rec_get_next_on_page(lock); } return(NULL);}/*************************************************************************Checks if some transaction has an implicit x-lock on a record in a secondaryindex. */trx_t*lock_sec_rec_some_has_impl_off_kernel(/*==================================*/ /* out: transaction which has the x-lock, or NULL */ rec_t* rec, /* in: user record */ dict_index_t* index, /* in: secondary index */ const ulint* offsets)/* in: rec_get_offsets(rec, index) */{ page_t* page; #ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ ut_ad(!(index->type & DICT_CLUSTERED)); ut_ad(page_rec_is_user_rec(rec)); ut_ad(rec_offs_validate(rec, index, offsets)); page = buf_frame_align(rec); /* Some transaction may have an implicit x-lock on the record only if the max trx id for the page >= min trx id for the trx list, or database recovery is running. We do not write the changes of a page max trx id to the log, and therefore during recovery, this value for a page may be incorrect. */ if (!(ut_dulint_cmp(page_get_max_trx_id(page), trx_list_get_min_trx_id()) >= 0) && !recv_recovery_is_on()) { return(NULL); } /* Ok, in this case it is possible that some transaction has an implicit x-lock. We have to look in the clustered index. */ if (!lock_check_trx_id_sanity(page_get_max_trx_id(page), rec, index, offsets, TRUE)) { buf_page_print(page); /* The page is corrupt: try to avoid a crash by returning NULL */ return(NULL); } return(row_vers_impl_x_locked_off_kernel(rec, index, offsets));}/*============== RECORD LOCK CREATION AND QUEUE MANAGEMENT =============*//*************************************************************************Creates a new record lock and inserts it to the lock queue. Does NOT checkfor deadlocks or lock compatibility! */staticlock_t*lock_rec_create(/*============*/ /* out: created lock, NULL if out of memory */ ulint type_mode,/* in: lock mode and wait flag, type is ignored and replaced by LOCK_REC */ rec_t* rec, /* in: record on page */ dict_index_t* index, /* in: index of record */ trx_t* trx) /* in: transaction */{ page_t* page; lock_t* lock; ulint page_no; ulint heap_no; ulint space; ulint n_bits; ulint n_bytes; #ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ page = buf_frame_align(rec); space = buf_frame_get_space_id(page); page_no = buf_frame_get_page_no(page); heap_no = rec_get_heap_no(rec, page_is_comp(page)); ut_ad(!!page_is_comp(page) == index->table->comp); /* If rec is the supremum record, then we reset the gap and LOCK_REC_NOT_GAP bits, as all locks on the supremum are automatically of the gap type */ if (rec == page_get_supremum_rec(page)) { ut_ad(!(type_mode & LOCK_REC_NOT_GAP)); type_mode = type_mode & ~(LOCK_GAP | LOCK_REC_NOT_GAP); } /* Make lock bitmap bigger by a safety margin */ n_bits = page_dir_get_n_heap(page) + LOCK_PAGE_BITMAP_MARGIN; n_bytes = 1 + n_bits / 8; lock = mem_heap_alloc(trx->lock_heap, sizeof(lock_t) + n_bytes); if (UNIV_UNLIKELY(lock == NULL)) { return(NULL); } UT_LIST_ADD_LAST(trx_locks, trx->trx_locks, lock);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -