⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 btr0sea.c

📁 这是linux下运行的mysql软件包,可用于linux 下安装 php + mysql + apach 的网络配置
💻 C
📖 第 1 页 / 共 3 页
字号:
/************************************************************************The index tree adaptive search(c) 1996 Innobase OyCreated 2/17/1996 Heikki Tuuri*************************************************************************/#include "btr0sea.h"#ifdef UNIV_NONINL#include "btr0sea.ic"#endif#include "buf0buf.h"#include "page0page.h"#include "page0cur.h"#include "btr0cur.h"#include "btr0pcur.h"#include "btr0btr.h"#include "ha0ha.h"ulint	btr_search_this_is_zero = 0;	/* A dummy variable to fool the					compiler */#ifdef UNIV_SEARCH_PERF_STATulint	btr_search_n_succ	= 0;#endif /* UNIV_SEARCH_PERF_STAT */ulint	btr_search_n_hash_fail	= 0;byte	btr_sea_pad1[64];	/* padding to prevent other memory update				hotspots from residing on the same memory				cache line as btr_search_latch *//* The latch protecting the adaptive search system: this latch protects the(1) positions of records on those pages where a hash index has been built.NOTE: It does not protect values of non-ordering fields within a record frombeing updated in-place! We can use fact (1) to perform unique searches toindexes. */rw_lock_t*	btr_search_latch_temp; /* We will allocate the latch from					dynamic memory to get it to the					same DRAM page as other hotspot					semaphores */byte	btr_sea_pad2[64];	/* padding to prevent other memory update				hotspots from residing on the same memory				cache line */btr_search_sys_t*	btr_search_sys;/* If the number of records on the page divided by this parameterwould have been successfully accessed using a hash index, the indexis then built on the page, assuming the global limit has been reached */#define BTR_SEARCH_PAGE_BUILD_LIMIT	16/* The global limit for consecutive potentially successful hash searches,before hash index building is started */#define BTR_SEARCH_BUILD_LIMIT		100/* How many cells to check before temporarily releasing btr_search_latch */#define BTR_CHUNK_SIZE			10000/************************************************************************Builds a hash index on a page with the given parameters. If the page alreadyhas a hash index with different parameters, the old hash index is removed.If index is non-NULL, this function checks if n_fields and n_bytes aresensible values, and does not build a hash index if not. */staticvoidbtr_search_build_page_hash_index(/*=============================*/	dict_index_t*	index,	/* in: index for which to build, or NULL if				not known */	page_t*		page,	/* in: index page, s- or x-latched */	ulint		n_fields,/* in: hash this many full fields */	ulint		n_bytes,/* in: hash this many bytes from the next				field */	ulint		side);	/* in: hash for searches from this side *//*********************************************************************This function should be called before reserving any btr search mutex, ifthe intended operation might add nodes to the search system hash table.Because of the latching order, once we have reserved the btr search systemlatch, we cannot allocate a free frame from the buffer pool. Checks thatthere is a free buffer frame allocated for hash table heap in the btr searchsystem. If not, allocates a free frames for the heap. This check makes itprobable that, when have reserved the btr search system latch and we need toallocate a new node to the hash table, it will succeed. However, the checkwill not guarantee success. */staticvoidbtr_search_check_free_space_in_heap(void)/*=====================================*/{	buf_frame_t*	frame;	hash_table_t*	table;	mem_heap_t*	heap;#ifdef UNIV_SYNC_DEBUG	ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));	ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));#endif /* UNIV_SYNC_DEBUG */	table = btr_search_sys->hash_index;	heap = table->heap;				/* Note that we peek the value of heap->free_block without reserving	the latch: this is ok, because we will not guarantee that there will	be enough free space in the hash table. */	if (heap->free_block == NULL) {		frame = buf_frame_alloc();		rw_lock_x_lock(&btr_search_latch);		if (heap->free_block == NULL) {			heap->free_block = frame;		} else {			buf_frame_free(frame);		}		rw_lock_x_unlock(&btr_search_latch);	}}/*********************************************************************Creates and initializes the adaptive search system at a database start. */voidbtr_search_sys_create(/*==================*/	ulint	hash_size)	/* in: hash index hash table size */{	/* We allocate the search latch from dynamic memory:	see above at the global variable definition */		btr_search_latch_temp = mem_alloc(sizeof(rw_lock_t));		rw_lock_create(&btr_search_latch);	btr_search_sys = mem_alloc(sizeof(btr_search_sys_t));	btr_search_sys->hash_index = ha_create(TRUE, hash_size, 0, 0);	rw_lock_set_level(&btr_search_latch, SYNC_SEARCH_SYS);}/*********************************************************************Creates and initializes a search info struct. */btr_search_t*btr_search_info_create(/*===================*/				/* out, own: search info struct */	mem_heap_t*	heap)	/* in: heap where created */{	btr_search_t*	info;	info = mem_heap_alloc(heap, sizeof(btr_search_t));	info->magic_n = BTR_SEARCH_MAGIC_N;	info->last_search = NULL;	info->n_direction = 0;	info->root_guess = NULL;	info->hash_analysis = 0;	info->n_hash_potential = 0;	info->last_hash_succ = FALSE;	info->n_hash_succ = 0;		info->n_hash_fail = 0;		info->n_patt_succ = 0;		info->n_searches = 0;		/* Set some sensible values */	info->n_fields = 1;	info->n_bytes = 0;	info->side = BTR_SEARCH_LEFT_SIDE;	return(info);}/*************************************************************************Updates the search info of an index about hash successes. NOTE that infois NOT protected by any semaphore, to save CPU time! Do not assume its fieldsare consistent. */staticvoidbtr_search_info_update_hash(/*========================*/	btr_search_t*	info,	/* in/out: search info */	btr_cur_t*	cursor)	/* in: cursor which was just positioned */{	dict_index_t*	index;	ulint		n_unique;	int		cmp;#ifdef UNIV_SYNC_DEBUG	ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));	ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));#endif /* UNIV_SYNC_DEBUG */	index = cursor->index;	if (index->type & DICT_IBUF) {		/* So many deletes are performed on an insert buffer tree		that we do not consider a hash index useful on it: */		return;	}	n_unique = dict_index_get_n_unique_in_tree(index);	if (info->n_hash_potential == 0) {		goto set_new_recomm;	}	/* Test if the search would have succeeded using the recommended	hash prefix */	if (info->n_fields >= n_unique && cursor->up_match >= n_unique) {					info->n_hash_potential++;		return;	}	cmp = ut_pair_cmp(info->n_fields, info->n_bytes,					cursor->low_match, cursor->low_bytes);	if ((info->side == BTR_SEARCH_LEFT_SIDE && cmp <= 0)		|| (info->side == BTR_SEARCH_RIGHT_SIDE && cmp > 0)) {		goto set_new_recomm;	}	cmp = ut_pair_cmp(info->n_fields, info->n_bytes,					cursor->up_match, cursor->up_bytes);	if ((info->side == BTR_SEARCH_LEFT_SIDE && cmp > 0)		|| (info->side == BTR_SEARCH_RIGHT_SIDE && cmp <= 0)) {	    	goto set_new_recomm;	}	info->n_hash_potential++;	return;	set_new_recomm:	/* We have to set a new recommendation; skip the hash analysis	for a while to avoid unnecessary CPU time usage when there is no	chance for success */		info->hash_analysis = 0;		cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes,					cursor->low_match, cursor->low_bytes);	if (cmp == 0) {		info->n_hash_potential = 0;		/* For extra safety, we set some sensible values here */		info->n_fields = 1;		info->n_bytes = 0;		info->side = BTR_SEARCH_LEFT_SIDE;	} else if (cmp > 0) {		info->n_hash_potential = 1;		if (cursor->up_match >= n_unique) {			info->n_fields = n_unique;			info->n_bytes = 0;		} else if (cursor->low_match < cursor->up_match) {			info->n_fields = cursor->low_match + 1;			info->n_bytes = 0;		} else {					info->n_fields = cursor->low_match;			info->n_bytes = cursor->low_bytes + 1;		}		info->side = BTR_SEARCH_LEFT_SIDE;	} else {		info->n_hash_potential = 1;		if (cursor->low_match >= n_unique) {			info->n_fields = n_unique;			info->n_bytes = 0;		} else if (cursor->low_match > cursor->up_match) {			info->n_fields = cursor->up_match + 1;			info->n_bytes = 0;		} else {					info->n_fields = cursor->up_match;			info->n_bytes = cursor->up_bytes + 1;		}		info->side = BTR_SEARCH_RIGHT_SIDE;	}}	/*************************************************************************Updates the block search info on hash successes. NOTE that info andblock->n_hash_helps, n_fields, n_bytes, side are NOT protected by anysemaphore, to save CPU time! Do not assume the fields are consistent. */staticiboolbtr_search_update_block_hash_info(/*==============================*/				/* out: TRUE if building a (new) hash index on				the block is recommended */	btr_search_t*	info,	/* in: search info */	buf_block_t*	block,	/* in: buffer block */	btr_cur_t*	cursor)	/* in: cursor */{#ifdef UNIV_SYNC_DEBUG	ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));	ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));	ut_ad(rw_lock_own(&((buf_block_t*) block)->lock, RW_LOCK_SHARED)		|| rw_lock_own(&((buf_block_t*) block)->lock, RW_LOCK_EX));#endif /* UNIV_SYNC_DEBUG */	ut_ad(cursor);	info->last_hash_succ = FALSE;	ut_a(block->magic_n == BUF_BLOCK_MAGIC_N);	ut_a(info->magic_n == BTR_SEARCH_MAGIC_N);	if ((block->n_hash_helps > 0)	    && (info->n_hash_potential > 0)	    && (block->n_fields == info->n_fields)	    && (block->n_bytes == info->n_bytes)	    && (block->side == info->side)) {			if ((block->is_hashed)		    && (block->curr_n_fields == info->n_fields)		    && (block->curr_n_bytes == info->n_bytes)		    && (block->curr_side == info->side)) {			/* The search would presumably have succeeded using			the hash index */		    			info->last_hash_succ = TRUE;		}		block->n_hash_helps++;	} else {		block->n_hash_helps = 1;		block->n_fields = info->n_fields;		block->n_bytes = info->n_bytes;		block->side = info->side;	}	if (cursor->index->table->does_not_fit_in_memory) {		block->n_hash_helps = 0;	}	if ((block->n_hash_helps > page_get_n_recs(block->frame)	    				/ BTR_SEARCH_PAGE_BUILD_LIMIT)	    && (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {	    	if ((!block->is_hashed)		    || (block->n_hash_helps					> 2 * page_get_n_recs(block->frame))		    || (block->n_fields != block->curr_n_fields)		    || (block->n_bytes != block->curr_n_bytes)		    || (block->side != block->curr_side)) {	    		/* Build a new hash index on the page */	    		return(TRUE);		}	}	return(FALSE);}/*************************************************************************Updates a hash node reference when it has been unsuccessfully used in asearch which could have succeeded with the used hash parameters. This canhappen because when building a hash index for a page, we do not checkwhat happens at page boundaries, and therefore there can be misleadinghash nodes. Also, collisions in the fold value can lead to misleadingreferences. This function lazily fixes these imperfections in the hashindex. */staticvoidbtr_search_update_hash_ref(/*=======================*/	btr_search_t*	info,	/* in: search info */	buf_block_t*	block,	/* in: buffer block where cursor positioned */	btr_cur_t*	cursor)	/* in: cursor */{	ulint	fold;	rec_t*	rec;	dulint	tree_id;	ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);#ifdef UNIV_SYNC_DEBUG	ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));	ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)				|| rw_lock_own(&(block->lock), RW_LOCK_EX));#endif /* UNIV_SYNC_DEBUG */	ut_ad(buf_block_align(btr_cur_get_rec(cursor)) == block);	ut_a(!block->is_hashed || block->index == cursor->index);	if (block->is_hashed	    && (info->n_hash_potential > 0)	    && (block->curr_n_fields == info->n_fields)	    && (block->curr_n_bytes == info->n_bytes)	    && (block->curr_side == info->side)) {		mem_heap_t*	heap		= NULL;		ulint		offsets_[REC_OFFS_NORMAL_SIZE];		*offsets_ = (sizeof offsets_) / sizeof *offsets_;	    	rec = btr_cur_get_rec(cursor);	    	if (!page_rec_is_user_rec(rec)) {	    		return;	    	}	    		tree_id = ((cursor->index)->tree)->id;		fold = rec_fold(rec, rec_get_offsets(rec, cursor->index,				offsets_, ULINT_UNDEFINED, &heap),				block->curr_n_fields,				block->curr_n_bytes, tree_id);		if (UNIV_LIKELY_NULL(heap)) {			mem_heap_free(heap);		}#ifdef UNIV_SYNC_DEBUG		ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));#endif /* UNIV_SYNC_DEBUG */		ha_insert_for_fold(btr_search_sys->hash_index, fold, rec);	}}		/*************************************************************************Updates the search info. */voidbtr_search_info_update_slow(/*========================*/	btr_search_t*	info,	/* in/out: search info */	btr_cur_t*	cursor)	/* in: cursor which was just positioned */{	buf_block_t*	block;	ibool		build_index;	ulint*		params;	ulint*		params2;	#ifdef UNIV_SYNC_DEBUG	ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));	ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));#endif /* UNIV_SYNC_DEBUG */	block = buf_block_align(btr_cur_get_rec(cursor));	/* NOTE that the following two function calls do NOT protect	info or block->n_fields etc. with any semaphore, to save CPU time!	We cannot assume the fields are consistent when we return from	those functions! */	btr_search_info_update_hash(info, cursor);	build_index = btr_search_update_block_hash_info(info, block, cursor);	if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) {		btr_search_check_free_space_in_heap();	}		if (cursor->flag == BTR_CUR_HASH_FAIL) {		/* Update the hash node reference, if appropriate */		btr_search_n_hash_fail++;		rw_lock_x_lock(&btr_search_latch);		btr_search_update_hash_ref(info, block, cursor);		rw_lock_x_unlock(&btr_search_latch);	}	if (build_index) {		/* Note that since we did not protect block->n_fields etc.		with any semaphore, the values can be inconsistent. We have		to check inside the function call that they make sense. We		also malloc an array and store the values there to make sure		the compiler does not let the function call parameters change		inside the called function. It might be that the compiler		would optimize the call just to pass pointers to block. */		params = mem_alloc(3 * sizeof(ulint));		params[0] = block->n_fields;		params[1] = block->n_bytes;		params[2] = block->side;		/* Make sure the compiler cannot deduce the values and do		optimizations */		params2 = params + btr_search_this_is_zero;				btr_search_build_page_hash_index(cursor->index,						block->frame,						params2[0],						params2[1],						params2[2]);		mem_free(params);	}}/**********************************************************************Checks if a guessed position for a tree cursor is right. Note that ifmode is PAGE_CUR_LE, which is used in inserts, and the function returnsTRUE, then cursor->up_match and cursor->low_match both have sensible values. */staticiboolbtr_search_check_guess(/*===================*/				/* out: TRUE if success */	btr_cur_t*	cursor,	/* in: guessed cursor position */	ibool           can_only_compare_to_cursor_rec,	                        /* in: if we do not have a latch on the page				of cursor, but only a latch on			        btr_search_latch, then ONLY the columns				of the record UNDER the cursor are				protected, not the next or previous record				in the chain: we cannot look at the next or				previous record to check our guess! */	dtuple_t* 	tuple,	/* in: data tuple */	ulint		mode,	/* in: PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G,				or PAGE_CUR_GE */	mtr_t*		mtr)	/* in: mtr */{	rec_t*		rec;	ulint		n_unique;	ulint		match;	ulint		bytes;	int		cmp;	mem_heap_t*	heap		= NULL;	ulint		offsets_[REC_OFFS_NORMAL_SIZE];	ulint*		offsets		= offsets_;	ibool		success		= FALSE;	*offsets_ = (sizeof offsets_) / sizeof *offsets_;	n_unique = dict_index_get_n_unique_in_tree(cursor->index);		rec = btr_cur_get_rec(cursor);	ut_ad(page_rec_is_user_rec(rec));	match = 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -