📄 lock0lock.c
字号:
return(TRUE); } else if (mode1 == LOCK_IS && mode2 == LOCK_IS) { return(TRUE); } else if (mode1 == LOCK_IX && (mode2 == LOCK_IX || mode2 == LOCK_IS)) { return(TRUE); } return(FALSE);}/*************************************************************************Calculates if lock mode 1 is compatible with lock mode 2. */UNIV_INLINEiboollock_mode_compatible(/*=================*/ /* out: TRUE if mode1 compatible with mode2 */ ulint mode1, /* in: lock mode */ ulint mode2) /* in: lock mode */{ ut_ad(mode1 == LOCK_X || mode1 == LOCK_S || mode1 == LOCK_IX || mode1 == LOCK_IS || mode1 == LOCK_AUTO_INC); ut_ad(mode2 == LOCK_X || mode2 == LOCK_S || mode2 == LOCK_IX || mode2 == LOCK_IS || mode2 == LOCK_AUTO_INC); if (mode1 == LOCK_S && (mode2 == LOCK_IS || mode2 == LOCK_S)) { return(TRUE); } else if (mode1 == LOCK_X) { return(FALSE); } else if (mode1 == LOCK_AUTO_INC && (mode2 == LOCK_IS || mode2 == LOCK_IX)) { return(TRUE); } else if (mode1 == LOCK_IS && (mode2 == LOCK_IS || mode2 == LOCK_IX || mode2 == LOCK_AUTO_INC || mode2 == LOCK_S)) { return(TRUE); } else if (mode1 == LOCK_IX && (mode2 == LOCK_IS || mode2 == LOCK_AUTO_INC || mode2 == LOCK_IX)) { return(TRUE); } return(FALSE);}/*************************************************************************Checks if a lock request for a new lock has to wait for request lock2. */UNIV_INLINEiboollock_rec_has_to_wait(/*=================*/ /* out: TRUE if new lock has to wait for lock2 to be removed */ trx_t* trx, /* in: trx of new lock */ ulint type_mode,/* in: precise mode of the new lock to set: LOCK_S or LOCK_X, possibly ORed to LOCK_GAP or LOCK_REC_NOT_GAP, LOCK_INSERT_INTENTION */ lock_t* lock2, /* in: another record lock; NOTE that it is assumed that this has a lock bit set on the same record as in the new lock we are setting */ ibool lock_is_on_supremum) /* in: TRUE if we are setting the lock on the 'supremum' record of an index page: we know then that the lock request is really for a 'gap' type lock */{ ut_ad(trx && lock2); ut_ad(lock_get_type(lock2) == LOCK_REC); if (trx != lock2->trx && !lock_mode_compatible(LOCK_MODE_MASK & type_mode, lock_get_mode(lock2))) { /* We have somewhat complex rules when gap type record locks cause waits */ if ((lock_is_on_supremum || (type_mode & LOCK_GAP)) && !(type_mode & LOCK_INSERT_INTENTION)) { /* Gap type locks without LOCK_INSERT_INTENTION flag do not need to wait for anything. This is because different users can have conflicting lock types on gaps. */ return(FALSE); } if (!(type_mode & LOCK_INSERT_INTENTION) && lock_rec_get_gap(lock2)) { /* Record lock (LOCK_ORDINARY or LOCK_REC_NOT_GAP does not need to wait for a gap type lock */ return(FALSE); } if ((type_mode & LOCK_GAP) && lock_rec_get_rec_not_gap(lock2)) { /* Lock on gap does not need to wait for a LOCK_REC_NOT_GAP type lock */ return(FALSE); } if (lock_rec_get_insert_intention(lock2)) { /* No lock request needs to wait for an insert intention lock to be removed. This is ok since our rules allow conflicting locks on gaps. This eliminates a spurious deadlock caused by a next-key lock waiting for an insert intention lock; when the insert intention lock was granted, the insert deadlocked on the waiting next-key lock. Also, insert intention locks do not disturb each other. */ return(FALSE); } return(TRUE); } return(FALSE);}/*************************************************************************Checks if a lock request lock1 has to wait for request lock2. */staticiboollock_has_to_wait(/*=============*/ /* out: TRUE if lock1 has to wait for lock2 to be removed */ lock_t* lock1, /* in: waiting lock */ lock_t* lock2) /* in: another lock; NOTE that it is assumed that this has a lock bit set on the same record as in lock1 if the locks are record locks */{ ut_ad(lock1 && lock2); if (lock1->trx != lock2->trx && !lock_mode_compatible(lock_get_mode(lock1), lock_get_mode(lock2))) { if (lock_get_type(lock1) == LOCK_REC) { ut_ad(lock_get_type(lock2) == LOCK_REC); /* If this lock request is for a supremum record then the second bit on the lock bitmap is set */ return(lock_rec_has_to_wait(lock1->trx, lock1->type_mode, lock2, lock_rec_get_nth_bit(lock1,1))); } return(TRUE); } return(FALSE);}/*============== RECORD LOCK BASIC FUNCTIONS ============================*//*************************************************************************Gets the number of bits in a record lock bitmap. */UNIV_INLINEulintlock_rec_get_n_bits(/*================*/ /* out: number of bits */ lock_t* lock) /* in: record lock */{ return(lock->un_member.rec_lock.n_bits);}/**************************************************************************Sets the nth bit of a record lock to TRUE. */UNIV_INLINEvoidlock_rec_set_nth_bit(/*==================*/ lock_t* lock, /* in: record lock */ ulint i) /* in: index of the bit */{ ulint byte_index; ulint bit_index; byte* ptr; ulint b; ut_ad(lock); ut_ad(lock_get_type(lock) == LOCK_REC); ut_ad(i < lock->un_member.rec_lock.n_bits); byte_index = i / 8; bit_index = i % 8; ptr = (byte*)lock + sizeof(lock_t) + byte_index; b = (ulint)*ptr; b = ut_bit_set_nth(b, bit_index, TRUE); *ptr = (byte)b;} /**************************************************************************Looks for a set bit in a record lock bitmap. Returns ULINT_UNDEFINED,if none found. */staticulintlock_rec_find_set_bit(/*==================*/ /* out: bit index == heap number of the record, or ULINT_UNDEFINED if none found */ lock_t* lock) /* in: record lock with at least one bit set */{ ulint i; for (i = 0; i < lock_rec_get_n_bits(lock); i++) { if (lock_rec_get_nth_bit(lock, i)) { return(i); } } return(ULINT_UNDEFINED);}/**************************************************************************Resets the nth bit of a record lock. */UNIV_INLINEvoidlock_rec_reset_nth_bit(/*===================*/ lock_t* lock, /* in: record lock */ ulint i) /* in: index of the bit which must be set to TRUE when this function is called */{ ulint byte_index; ulint bit_index; byte* ptr; ulint b; ut_ad(lock); ut_ad(lock_get_type(lock) == LOCK_REC); ut_ad(i < lock->un_member.rec_lock.n_bits); byte_index = i / 8; bit_index = i % 8; ptr = (byte*)lock + sizeof(lock_t) + byte_index; b = (ulint)*ptr; b = ut_bit_set_nth(b, bit_index, FALSE); *ptr = (byte)b;} /*************************************************************************Gets the first or next record lock on a page. */UNIV_INLINElock_t*lock_rec_get_next_on_page(/*======================*/ /* out: next lock, NULL if none exists */ lock_t* lock) /* in: a record lock */{ ulint space; ulint page_no;#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(lock) == LOCK_REC); space = lock->un_member.rec_lock.space; page_no = lock->un_member.rec_lock.page_no; for (;;) { lock = HASH_GET_NEXT(hash, lock); if (!lock) { break; } if ((lock->un_member.rec_lock.space == space) && (lock->un_member.rec_lock.page_no == page_no)) { break; } } return(lock);}/*************************************************************************Gets the first record lock on a page, where the page is identified by itsfile address. */UNIV_INLINElock_t*lock_rec_get_first_on_page_addr(/*============================*/ /* out: first lock, NULL if none exists */ ulint space, /* in: space */ ulint page_no)/* in: page number */{ lock_t* lock;#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ lock = HASH_GET_FIRST(lock_sys->rec_hash, lock_rec_hash(space, page_no)); while (lock) { if ((lock->un_member.rec_lock.space == space) && (lock->un_member.rec_lock.page_no == page_no)) { break; } lock = HASH_GET_NEXT(hash, lock); } return(lock);} /*************************************************************************Returns TRUE if there are explicit record locks on a page. */iboollock_rec_expl_exist_on_page(/*========================*/ /* out: TRUE if there are explicit record locks on the page */ ulint space, /* in: space id */ ulint page_no)/* in: page number */{ ibool ret; mutex_enter(&kernel_mutex); if (lock_rec_get_first_on_page_addr(space, page_no)) { ret = TRUE; } else { ret = FALSE; } mutex_exit(&kernel_mutex); return(ret);}/*************************************************************************Gets the first record lock on a page, where the page is identified by apointer to it. */UNIV_INLINElock_t*lock_rec_get_first_on_page(/*=======================*/ /* out: first lock, NULL if none exists */ byte* ptr) /* in: pointer to somewhere on the page */{ ulint hash; lock_t* lock; ulint space; ulint page_no;#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ hash = buf_frame_get_lock_hash_val(ptr); lock = HASH_GET_FIRST(lock_sys->rec_hash, hash); while (lock) { space = buf_frame_get_space_id(ptr); page_no = buf_frame_get_page_no(ptr); if ((lock->un_member.rec_lock.space == space) && (lock->un_member.rec_lock.page_no == page_no)) { break; } lock = HASH_GET_NEXT(hash, lock); } return(lock);}/*************************************************************************Gets the next explicit lock request on a record. */UNIV_INLINElock_t*lock_rec_get_next(/*==============*/ /* out: next lock, NULL if none exists */ rec_t* rec, /* in: record on a page */ lock_t* lock) /* in: lock */{#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex));#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(lock) == LOCK_REC); if (page_rec_is_comp(rec)) { do { lock = lock_rec_get_next_on_page(lock); } while (lock && !lock_rec_get_nth_bit(lock, rec_get_heap_no(rec, TRUE))); } else { do { lock = lock_rec_get_next_on_page(lock); } while (lock && !lock_rec_get_nth_bit(lock, rec_get_heap_no(rec, FALSE))); } return(lock);}/*************************************************************************Gets the first explicit lock request on a record. */UNIV_INLINE
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -