⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 btr0cur.c

📁 这是linux下运行的mysql软件包,可用于linux 下安装 php + mysql + apach 的网络配置
💻 C
📖 第 1 页 / 共 5 页
字号:
	if (UNIV_UNLIKELY(err != DB_SUCCESS)) {		if (UNIV_LIKELY_NULL(heap)) {			mem_heap_free(heap);		}		return(err);	}	block = buf_block_align(rec);	ut_ad(!!page_is_comp(buf_block_get_frame(block))				== index->table->comp);	if (block->is_hashed) {		/* The function row_upd_changes_ord_field_binary works only		if the update vector was built for a clustered index, we must		NOT call it if index is secondary */	        if (!(index->type & DICT_CLUSTERED)		    || row_upd_changes_ord_field_binary(NULL, index, update)) {		        /* Remove possible hash index pointer to this record */	                btr_search_update_hash_on_delete(cursor);	        }		rw_lock_x_lock(&btr_search_latch);	}	if (!(flags & BTR_KEEP_SYS_FLAG)) {		row_upd_rec_sys_fields(rec, index, offsets, trx, roll_ptr);	}	/* FIXME: in a mixed tree, all records may not have enough ordering	fields for btr search: */	was_delete_marked = rec_get_deleted_flag(rec,				page_is_comp(buf_block_get_frame(block)));	row_upd_rec_in_place(rec, offsets, update);	if (block->is_hashed) {		rw_lock_x_unlock(&btr_search_latch);	}	btr_cur_update_in_place_log(flags, rec, index, update, trx, roll_ptr,									mtr);	if (was_delete_marked && !rec_get_deleted_flag(rec,				page_is_comp(buf_block_get_frame(block)))) {		/* The new updated record owns its possible externally		stored fields */		btr_cur_unmark_extern_fields(rec, mtr, offsets);	}	if (UNIV_LIKELY_NULL(heap)) {		mem_heap_free(heap);	}	return(DB_SUCCESS);}/*****************************************************************Tries to update a record on a page in an index tree. It is assumed that mtrholds an x-latch on the page. The operation does not succeed if there is toolittle space on the page or if the update would result in too empty a page,so that tree compression is recommended. We assume here that the orderingfields of the record do not change. */ulintbtr_cur_optimistic_update(/*======================*/				/* out: DB_SUCCESS, or DB_OVERFLOW if the				updated record does not fit, DB_UNDERFLOW				if the page would become too empty */	ulint		flags,	/* in: undo logging and locking flags */	btr_cur_t*	cursor,	/* in: cursor on the record to update;				cursor stays valid and positioned on the				same record */	upd_t*		update,	/* in: update vector; this must also				contain trx id and roll ptr fields */	ulint		cmpl_info,/* in: compiler info on secondary index				updates */	que_thr_t*	thr,	/* in: query thread */	mtr_t*		mtr)	/* in: mtr */{	dict_index_t*	index;	page_cur_t*	page_cursor;	ulint		err;	page_t*		page;	rec_t*		rec;	ulint		max_size;	ulint		new_rec_size;	ulint		old_rec_size;	dtuple_t*	new_entry;	dulint		roll_ptr;	trx_t*		trx;	mem_heap_t*	heap;	ibool		reorganized	= FALSE;	ulint		i;	ulint*		offsets;	page = btr_cur_get_page(cursor);	rec = btr_cur_get_rec(cursor);	index = cursor->index;	ut_ad(!!page_rec_is_comp(rec) == index->table->comp);		heap = mem_heap_create(1024);	offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);#ifdef UNIV_DEBUG	if (btr_cur_print_record_ops && thr) {		btr_cur_trx_report(thr_get_trx(thr), index, "update ");		rec_print_new(stderr, rec, offsets);	}#endif /* UNIV_DEBUG */	ut_ad(mtr_memo_contains(mtr, buf_block_align(page),							MTR_MEMO_PAGE_X_FIX));	if (!row_upd_changes_field_size_or_external(index, offsets, update)) {		/* The simplest and the most common case: the update does not		change the size of any field and none of the updated fields is		externally stored in rec or update */		mem_heap_free(heap);		return(btr_cur_update_in_place(flags, cursor, update,							cmpl_info, thr, mtr));	}	for (i = 0; i < upd_get_n_fields(update); i++) {		if (upd_get_nth_field(update, i)->extern_storage) {			/* Externally stored fields are treated in pessimistic			update */			mem_heap_free(heap);			return(DB_OVERFLOW);		}	}	if (rec_offs_any_extern(offsets)) {		/* Externally stored fields are treated in pessimistic		update */		mem_heap_free(heap);		return(DB_OVERFLOW);	}		page_cursor = btr_cur_get_page_cur(cursor);		new_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);	row_upd_index_replace_new_col_vals_index_pos(new_entry, index, update,									NULL);	old_rec_size = rec_offs_size(offsets);	new_rec_size = rec_get_converted_size(index, new_entry);		if (UNIV_UNLIKELY(new_rec_size >= page_get_free_space_of_empty(				page_is_comp(page)) / 2)) {		mem_heap_free(heap);				return(DB_OVERFLOW);	}	max_size = old_rec_size			+ page_get_max_insert_size_after_reorganize(page, 1);	if (UNIV_UNLIKELY(page_get_data_size(page)					- old_rec_size + new_rec_size					< BTR_CUR_PAGE_COMPRESS_LIMIT)) {		/* The page would become too empty */		mem_heap_free(heap);		return(DB_UNDERFLOW);	}	if (!(((max_size >= BTR_CUR_PAGE_REORGANIZE_LIMIT)	       				&& (max_size >= new_rec_size))	      || (page_get_n_recs(page) <= 1))) {		/* There was not enough space, or it did not pay to		reorganize: for simplicity, we decide what to do assuming a		reorganization is needed, though it might not be necessary */		mem_heap_free(heap);				return(DB_OVERFLOW);	}	/* Do lock checking and undo logging */	err = btr_cur_upd_lock_and_undo(flags, cursor, update, cmpl_info, thr,								&roll_ptr);	if (err != DB_SUCCESS) {		mem_heap_free(heap);		return(err);	}                /* Ok, we may do the replacement. Store on the page infimum the	explicit locks on rec, before deleting rec (see the comment in	.._pessimistic_update). */	lock_rec_store_on_page_infimum(page, rec);	btr_search_update_hash_on_delete(cursor);	page_cur_delete_rec(page_cursor, index, offsets, mtr);	page_cur_move_to_prev(page_cursor);        	trx = thr_get_trx(thr);	if (!(flags & BTR_KEEP_SYS_FLAG)) {		row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR,								roll_ptr);		row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID,								trx->id);	}	rec = btr_cur_insert_if_possible(cursor, new_entry, &reorganized, mtr);	ut_a(rec); /* <- We calculated above the insert would fit */	if (!rec_get_deleted_flag(rec, page_is_comp(page))) {		/* The new inserted record owns its possible externally		stored fields */		offsets = rec_get_offsets(rec, index, offsets,						ULINT_UNDEFINED, &heap);		btr_cur_unmark_extern_fields(rec, mtr, offsets);	}	/* Restore the old explicit lock state on the record */	lock_rec_restore_from_page_infimum(rec, page);        page_cur_move_to_next(page_cursor);	mem_heap_free(heap);			return(DB_SUCCESS);}/*****************************************************************If, in a split, a new supremum record was created as the predecessor of theupdated record, the supremum record must inherit exactly the locks on theupdated record. In the split it may have inherited locks from the successorof the updated record, which is not correct. This function restores theright locks for the new supremum. */staticvoidbtr_cur_pess_upd_restore_supremum(/*==============================*/	rec_t*	rec,	/* in: updated record */	mtr_t*	mtr)	/* in: mtr */{	page_t*	page;	page_t*	prev_page;	ulint	space;	ulint	prev_page_no;		page = buf_frame_align(rec);	if (page_rec_get_next(page_get_infimum_rec(page)) != rec) {		/* Updated record is not the first user record on its page */ 			return;	}	space = buf_frame_get_space_id(page);	prev_page_no = btr_page_get_prev(page, mtr);		ut_ad(prev_page_no != FIL_NULL);	prev_page = buf_page_get_with_no_latch(space, prev_page_no, mtr);	/* We must already have an x-latch to prev_page! */	ut_ad(mtr_memo_contains(mtr, buf_block_align(prev_page),		      				MTR_MEMO_PAGE_X_FIX));	lock_rec_reset_and_inherit_gap_locks(page_get_supremum_rec(prev_page),									rec);}/*****************************************************************Performs an update of a record on a page of a tree. It is assumedthat mtr holds an x-latch on the tree and on the cursor page. If theupdate is made on the leaf level, to avoid deadlocks, mtr must alsoown x-latches to brothers of page, if those brothers exist. We assumehere that the ordering fields of the record do not change. */ulintbtr_cur_pessimistic_update(/*=======================*/				/* out: DB_SUCCESS or error code */	ulint		flags,	/* in: undo logging, locking, and rollback				flags */	btr_cur_t*	cursor,	/* in: cursor on the record to update */	big_rec_t**	big_rec,/* out: big rec vector whose fields have to				be stored externally by the caller, or NULL */	upd_t*		update,	/* in: update vector; this is allowed also				contain trx id and roll ptr fields, but				the values in update vector have no effect */	ulint		cmpl_info,/* in: compiler info on secondary index				updates */	que_thr_t*	thr,	/* in: query thread */	mtr_t*		mtr)	/* in: mtr */{	big_rec_t*	big_rec_vec	= NULL;	big_rec_t*	dummy_big_rec;	dict_index_t*	index;	page_t*		page;	dict_tree_t*	tree;	rec_t*		rec;	page_cur_t*	page_cursor;	dtuple_t*	new_entry;	mem_heap_t*	heap;	ulint		err;	ulint		optim_err;	ibool		dummy_reorganized;	dulint		roll_ptr;	trx_t*		trx;	ibool		was_first;	ibool		success;	ulint		n_extents	= 0;	ulint		n_reserved;	ulint*		ext_vect;	ulint		n_ext_vect;	ulint		reserve_flag;	ulint*		offsets		= NULL;		*big_rec = NULL;		page = btr_cur_get_page(cursor);	rec = btr_cur_get_rec(cursor);	index = cursor->index;	tree = index->tree;	ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree),							MTR_MEMO_X_LOCK));	ut_ad(mtr_memo_contains(mtr, buf_block_align(page),							MTR_MEMO_PAGE_X_FIX));	optim_err = btr_cur_optimistic_update(flags, cursor, update,							cmpl_info, thr, mtr);	if (optim_err != DB_UNDERFLOW && optim_err != DB_OVERFLOW) {		return(optim_err);	}	/* Do lock checking and undo logging */	err = btr_cur_upd_lock_and_undo(flags, cursor, update, cmpl_info,							thr, &roll_ptr);	if (err != DB_SUCCESS) {		return(err);	}	if (optim_err == DB_OVERFLOW) {		/* First reserve enough free space for the file segments		of the index tree, so that the update will not fail because		of lack of space */		n_extents = cursor->tree_height / 16 + 3;		if (flags & BTR_NO_UNDO_LOG_FLAG) {			reserve_flag = FSP_CLEANING;		} else {			reserve_flag = FSP_NORMAL;		}				success = fsp_reserve_free_extents(&n_reserved,						index->space,						n_extents, reserve_flag, mtr);		if (!success) {			err = DB_OUT_OF_FILE_SPACE;			return(err);		}	}		heap = mem_heap_create(1024);	offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);	trx = thr_get_trx(thr);		new_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);	row_upd_index_replace_new_col_vals_index_pos(new_entry, index, update,									heap);	if (!(flags & BTR_KEEP_SYS_FLAG)) {		row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR,								roll_ptr);		row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID,								trx->id);	}	if (flags & BTR_NO_UNDO_LOG_FLAG) {		/* We are in a transaction rollback undoing a row		update: we must free possible externally stored fields		which got new values in the update, if they are not		inherited values. They can be inherited if we have		updated the primary key to another value, and then		update it back again. */		ut_a(big_rec_vec == NULL);				btr_rec_free_updated_extern_fields(index, rec, offsets,			update, TRUE, mtr);	}	/* We have to set appropriate extern storage bits in the new	record to be inserted: we have to remember which fields were such */	ext_vect = mem_heap_alloc(heap, sizeof(ulint)					* dict_index_get_n_fields(index));	ut_ad(!page_is_comp(page) || !rec_get_node_ptr_flag(rec));	offsets = rec_get_offsets(rec, index, offsets,					ULINT_UNDEFINED, &heap);	n_ext_vect = btr_push_update_extern_fields(ext_vect, offsets, update);	if (UNIV_UNLIKELY(rec_get_converted_size(index, new_entry) >=		ut_min(page_get_free_space_of_empty(page_is_comp(page)) / 2,		REC_MAX_DATA_SIZE))) {                big_rec_vec = dtuple_convert_big_rec(index, new_entry,                					ext_vect, n_ext_vect);		if (big_rec_vec == NULL) {			err = DB_TOO_BIG_RECORD;			goto return_after_reservations;		}	}	page_cursor = btr_cur_get_page_cur(cursor);	/* Store state of explicit locks on rec on the page infimum record,	before deleting rec. The page infimum acts as a dummy carrier of the	locks, taking care also of lock releases, before we can move the locks	back on the actual record. There is a special case: if we are	inserting on the root page and the insert causes a call of	btr_root_raise_and_insert. Therefore we cannot in the lock system	delete the lock structs set on the root page even if the root	page carries just node pointers. */	lock_rec_store_on_page_infimum(buf_frame_align(rec), rec);	btr_search_update_hash_on_delete(cursor);	page_cur_delete_rec(page_cursor, index, offsets, mtr);	page_cur_move_to_prev(page_cursor);	rec = btr_cur_insert_if_possible(cursor, new_entry,						&dummy_reorganized, mtr);	ut_a(rec || optim_err != DB_UNDERFLOW);	if (rec) {		lock_rec_restore_from_page_infimum(rec, page);		rec_set_field_extern_bits(rec, index,						ext_vect, n_ext_vect, mtr);		offsets = rec_get_offsets(rec, index, offsets,					ULINT_UNDEFINED, &heap);		if (!rec_get_deleted_flag(rec, rec_offs_comp(offsets))) {			/* The new inserted record owns its possible externally			stored fields */			btr_cur_unmark_extern_fields(rec, mtr, offsets);		}		btr_cur_compress_if_useful(cursor, mtr);		err = DB_SUCCESS;		goto return_after_reservations;	}	if (page_cur_is_before_first(page_cursor)) {		/* The record to be updated was positioned as the first user		record on its page */		was_first = TRUE;	} else {		was_first = FALSE;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -