⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 hash.c

📁 这是linux下运行的mysql软件包,可用于linux 下安装 php + mysql + apach 的网络配置
💻 C
📖 第 1 页 / 共 4 页
字号:
			} else				myval = (DBT *)data;			if (ret == 0)				ret = __ham_add_el(dbc, key, myval, H_KEYDATA);			goto done;		}		break;	case DB_BEFORE:	case DB_AFTER:	case DB_CURRENT:		ret = __ham_item(dbc, DB_LOCK_WRITE, pgnop);		break;	}	if (*pgnop == PGNO_INVALID && ret == 0) {		if (flags == DB_CURRENT ||		    ((flags == DB_KEYFIRST ||		    flags == DB_KEYLAST || flags == DB_NODUPDATA) &&		    !(F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))))			ret = __ham_overwrite(dbc, data, flags);		else			ret = __ham_add_dup(dbc, data, flags, pgnop);	}done:	if (ret == 0 && F_ISSET(hcp, H_EXPAND)) {		ret = __ham_expand_table(dbc);		F_CLR(hcp, H_EXPAND);	}	if (hcp->page != NULL &&	    (t_ret = mpf->set(mpf, hcp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)		ret = t_ret;err2:	if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)		ret = t_ret;err1:	return (ret);}/********************************* UTILITIES ************************//* * __ham_expand_table -- */static int__ham_expand_table(dbc)	DBC *dbc;{	DB *dbp;	DB_LOCK metalock;	DB_LSN lsn;	DB_MPOOLFILE *mpf;	DBMETA *mmeta;	HASH_CURSOR *hcp;	PAGE *h;	db_pgno_t pgno, mpgno;	u_int32_t newalloc, new_bucket, old_bucket;	int dirty_meta, got_meta, logn, new_double, ret;	dbp = dbc->dbp;	mpf = dbp->mpf;	hcp = (HASH_CURSOR *)dbc->internal;	if ((ret = __ham_dirty_meta(dbc)) != 0)		return (ret);	LOCK_INIT(metalock);	mmeta = (DBMETA *) hcp->hdr;	mpgno = mmeta->pgno;	h = NULL;	dirty_meta = 0;	got_meta = 0;	newalloc = 0;	/*	 * If the split point is about to increase, make sure that we	 * have enough extra pages.  The calculation here is weird.	 * We'd like to do this after we've upped max_bucket, but it's	 * too late then because we've logged the meta-data split.  What	 * we'll do between then and now is increment max bucket and then	 * see what the log of one greater than that is; here we have to	 * look at the log of max + 2.  VERY NASTY STUFF.	 *	 * We figure out what we need to do, then we log it, then request	 * the pages from mpool.  We don't want to fail after extending	 * the file.	 *	 * If the page we are about to split into has already been allocated,	 * then we simply need to get it to get its LSN.  If it hasn't yet	 * been allocated, then we know it's LSN (0,0).	 */	new_bucket = hcp->hdr->max_bucket + 1;	old_bucket = new_bucket & hcp->hdr->low_mask;	new_double = hcp->hdr->max_bucket == hcp->hdr->high_mask;	logn = __db_log2(new_bucket);	if (!new_double || hcp->hdr->spares[logn + 1] != PGNO_INVALID) {		/* Page exists; get it so we can get its LSN */		pgno = BUCKET_TO_PAGE(hcp, new_bucket);		if ((ret =		    mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)			goto err;		lsn = h->lsn;	} else {		/* Get the master meta-data page to do allocation. */		if (F_ISSET(dbp, DB_AM_SUBDB)) {			mpgno = PGNO_BASE_MD;			if ((ret = __db_lget(dbc,			   0, mpgno, DB_LOCK_WRITE, 0, &metalock)) != 0)				goto err;			if ((ret =			    mpf->get(mpf, &mpgno, 0, (PAGE **)&mmeta)) != 0)				goto err;			got_meta = 1;		}		pgno = mmeta->last_pgno + 1;		ZERO_LSN(lsn);		newalloc = 1;	}	/* Log the meta-data split first. */	if (DBC_LOGGING(dbc)) {		/*		 * We always log the page number of the first page of		 * the allocation group.  However, the LSN that we log		 * is either the LSN on the first page (if we did not		 * do the actual allocation here) or the LSN on the last		 * page of the unit (if we did do the allocation here).		 */		if ((ret = __ham_metagroup_log(dbp, dbc->txn,		    &lsn, 0, hcp->hdr->max_bucket, mpgno, &mmeta->lsn,		    hcp->hdr->dbmeta.pgno, &hcp->hdr->dbmeta.lsn,		    pgno, &lsn, newalloc)) != 0)			goto err;	} else		LSN_NOT_LOGGED(lsn);	hcp->hdr->dbmeta.lsn = lsn;	if (new_double && hcp->hdr->spares[logn + 1] == PGNO_INVALID) {		/*		 * We need to begin a new doubling and we have not allocated		 * any pages yet.  Read the last page in and initialize it to		 * make the allocation contiguous.  The pgno we calculated		 * above is the first page allocated. The entry in spares is		 * that page number minus any buckets already allocated (it		 * simplifies bucket to page transaction).  After we've set		 * that, we calculate the last pgno.		 */		hcp->hdr->spares[logn + 1] = pgno - new_bucket;		pgno += hcp->hdr->max_bucket;		mmeta->last_pgno = pgno;		mmeta->lsn = lsn;		dirty_meta = DB_MPOOL_DIRTY;		if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)			goto err;		P_INIT(h, dbp->pgsize,		    pgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);	}	/* Write out whatever page we ended up modifying. */	h->lsn = lsn;	if ((ret = mpf->put(mpf, h, DB_MPOOL_DIRTY)) != 0)		goto err;	h = NULL;	/*	 * Update the meta-data page of this hash database.	 */	hcp->hdr->max_bucket = new_bucket;	if (new_double) {		hcp->hdr->low_mask = hcp->hdr->high_mask;		hcp->hdr->high_mask = new_bucket | hcp->hdr->low_mask;	}	/* Relocate records to the new bucket */	ret = __ham_split_page(dbc, old_bucket, new_bucket);err:	if (got_meta)		(void)mpf->put(mpf, mmeta, dirty_meta);	if (LOCK_ISSET(metalock))		(void)__TLPUT(dbc, metalock);	if (h != NULL)		(void)mpf->put(mpf, h, 0);	return (ret);}/* * PUBLIC: u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, int32_t)); */u_int32_t__ham_call_hash(dbc, k, len)	DBC *dbc;	u_int8_t *k;	int32_t len;{	DB *dbp;	u_int32_t n, bucket;	HASH_CURSOR *hcp;	HASH *hashp;	dbp = dbc->dbp;	hcp = (HASH_CURSOR *)dbc->internal;	hashp = dbp->h_internal;	n = (u_int32_t)(hashp->h_hash(dbp, k, len));	bucket = n & hcp->hdr->high_mask;	if (bucket > hcp->hdr->max_bucket)		bucket = bucket & hcp->hdr->low_mask;	return (bucket);}/* * Check for duplicates, and call __db_ret appropriately.  Release * everything held by the cursor. */static int__ham_dup_return(dbc, val, flags)	DBC *dbc;	DBT *val;	u_int32_t flags;{	DB *dbp;	HASH_CURSOR *hcp;	PAGE *pp;	DBT *myval, tmp_val;	db_indx_t ndx;	db_pgno_t pgno;	u_int32_t off, tlen;	u_int8_t *hk, type;	int cmp, ret;	db_indx_t len;	/* Check for duplicate and return the first one. */	dbp = dbc->dbp;	hcp = (HASH_CURSOR *)dbc->internal;	ndx = H_DATAINDEX(hcp->indx);	type = HPAGE_TYPE(dbp, hcp->page, ndx);	pp = hcp->page;	myval = val;	/*	 * There are 4 cases:	 * 1. We are not in duplicate, simply return; the upper layer	 *    will do the right thing.	 * 2. We are looking at keys and stumbled onto a duplicate.	 * 3. We are in the middle of a duplicate set. (ISDUP set)	 * 4. We need to check for particular data match.	 */	/* We should never get here with off-page dups. */	DB_ASSERT(type != H_OFFDUP);	/* Case 1 */	if (type != H_DUPLICATE && flags != DB_GET_BOTH &&	    flags != DB_GET_BOTHC && flags != DB_GET_BOTH_RANGE)		return (0);	/*	 * Here we check for the case where we just stumbled onto a	 * duplicate.  In this case, we do initialization and then	 * let the normal duplicate code handle it. (Case 2)	 */	if (!F_ISSET(hcp, H_ISDUP) && type == H_DUPLICATE) {		F_SET(hcp, H_ISDUP);		hcp->dup_tlen = LEN_HDATA(dbp, hcp->page,		    hcp->hdr->dbmeta.pagesize, hcp->indx);		hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);		if (flags == DB_LAST ||		    flags == DB_PREV || flags == DB_PREV_NODUP) {			hcp->dup_off = 0;			do {				memcpy(&len,				    HKEYDATA_DATA(hk) + hcp->dup_off,				    sizeof(db_indx_t));				hcp->dup_off += DUP_SIZE(len);			} while (hcp->dup_off < hcp->dup_tlen);			hcp->dup_off -= DUP_SIZE(len);		} else {			memcpy(&len,			    HKEYDATA_DATA(hk), sizeof(db_indx_t));			hcp->dup_off = 0;		}		hcp->dup_len = len;	}	/*	 * If we are retrieving a specific key/data pair, then we	 * may need to adjust the cursor before returning data.	 * Case 4	 */	if (flags == DB_GET_BOTH ||	    flags == DB_GET_BOTHC || flags == DB_GET_BOTH_RANGE) {		if (F_ISSET(hcp, H_ISDUP)) {			/*			 * If we're doing a join, search forward from the			 * current position, not the beginning of the dup set.			 */			if (flags == DB_GET_BOTHC)				F_SET(hcp, H_CONTINUE);			__ham_dsearch(dbc, val, &off, &cmp, flags);			/*			 * This flag is set nowhere else and is safe to			 * clear unconditionally.			 */			F_CLR(hcp, H_CONTINUE);			hcp->dup_off = off;		} else {			hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);			if (((HKEYDATA *)hk)->type == H_OFFPAGE) {				memcpy(&tlen,				    HOFFPAGE_TLEN(hk), sizeof(u_int32_t));				memcpy(&pgno,				    HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));				if ((ret = __db_moff(dbp, val,				    pgno, tlen, dbp->dup_compare, &cmp)) != 0)					return (ret);			} else {				/*				 * We do not zero tmp_val since the comparison				 * routines may only look at data and size.				 */				tmp_val.data = HKEYDATA_DATA(hk);				tmp_val.size = LEN_HDATA(dbp, hcp->page,				    dbp->pgsize, hcp->indx);				cmp = dbp->dup_compare == NULL ?				    __bam_defcmp(dbp, &tmp_val, val) :				    dbp->dup_compare(dbp, &tmp_val, val);			}		}		if (cmp != 0)			return (DB_NOTFOUND);	}	/*	 * If we're doing a bulk get, we don't want to actually return	 * the data:  __ham_bulk will take care of cracking out the	 * duplicates appropriately.	 *	 * The rest of this function calculates partial offsets and	 * handles the actual __db_ret, so just return if	 * DB_MULTIPLE(_KEY) is set.	 */	if (F_ISSET(dbc, DBC_MULTIPLE | DBC_MULTIPLE_KEY))		return (0);	/*	 * Now, everything is initialized, grab a duplicate if	 * necessary.	 */	if (F_ISSET(hcp, H_ISDUP)) {	/* Case 3 */		/*		 * Copy the DBT in case we are retrieving into user		 * memory and we need the parameters for it.  If the		 * user requested a partial, then we need to adjust		 * the user's parameters to get the partial of the		 * duplicate which is itself a partial.		 */		memcpy(&tmp_val, val, sizeof(*val));		if (F_ISSET(&tmp_val, DB_DBT_PARTIAL)) {			/*			 * Take the user's length unless it would go			 * beyond the end of the duplicate.			 */			if (tmp_val.doff + hcp->dup_off > hcp->dup_len)				tmp_val.dlen = 0;			else if (tmp_val.dlen + tmp_val.doff >			    hcp->dup_len)				tmp_val.dlen =				    hcp->dup_len - tmp_val.doff;			/*			 * Calculate the new offset.			 */			tmp_val.doff += hcp->dup_off;		} else {			F_SET(&tmp_val, DB_DBT_PARTIAL);			tmp_val.dlen = hcp->dup_len;			tmp_val.doff = hcp->dup_off + sizeof(db_indx_t);		}		myval = &tmp_val;	}	/*	 * Finally, if we had a duplicate, pp, ndx, and myval should be	 * set appropriately.	 */	if ((ret = __db_ret(dbp, pp, ndx, myval, &dbc->rdata->data,	    &dbc->rdata->ulen)) != 0)		return (ret);	/*	 * In case we sent a temporary off to db_ret, set the real	 * return values.	 */	val->data = myval->data;	val->size = myval->size;	F_SET(val, DB_DBT_ISSET);	return (0);}static int__ham_overwrite(dbc, nval, flags)	DBC *dbc;	DBT *nval;	u_int32_t flags;{	DB *dbp;	DB_ENV *dbenv;	HASH_CURSOR *hcp;	DBT *myval, tmp_val, tmp_val2;	void *newrec;	u_int8_t *hk, *p;	u_int32_t len, nondup_size;	db_indx_t newsize;	int ret;	dbp = dbc->dbp;	dbenv = dbp->dbenv;	hcp = (HASH_CURSOR *)dbc->internal;	if (F_ISSET(hcp, H_ISDUP)) {		/*		 * This is an overwrite of a duplicate. We should never		 * be off-page at this point.		 */		DB_ASSERT(hcp->opd == NULL);		/* On page dups */		if (F_ISSET(nval, DB_DBT_PARTIAL)) {			/*			 * We're going to have to get the current item, then			 * construct the record, do any padding and do a			 * replace.			 */			memset(&tmp_val, 0, sizeof(tmp_val));			if ((ret =			    __ham_dup_return(dbc, &tmp_val, DB_CURRENT)) != 0)				return (ret);			/* Figure out new size. */			nondup_size = tmp_val.size;			newsize = nondup_size;			/*			 * Three cases:			 * 1. strictly append (may need to allocate space			 *	for pad bytes; really gross).			 * 2. overwrite some and append.			 * 3. strictly overwrite.			 */			if (nval->doff > nondup_size)				newsize +=				    (nval->doff - nondup_size + nval->size);			else if (nval->doff + nval->dlen > nondup_size)				newsize += nval->size -				    (nondup_size - nval->doff);			else				newsize += nval->size - nval->dlen;			/*			 * Make sure that the new size doesn't put us over			 * the onpage duplicate size in which case we need			 * to convert to off-page duplicates.			 */			if (ISBIG(hcp, hcp->dup_tlen - nondup_size + newsize)) {				if ((ret = __ham_dup_convert(dbc)) != 0)					return (ret);				return (hcp->opd->c_am_put(hcp->opd,				    NULL, nval, flags, NULL));			}			if ((ret = __os_malloc(dbp->dbenv,			    DUP_SIZE(newsize), &newrec)) != 0)				return (ret);			memset(&tmp_val2, 0, sizeof(tmp_val2));			F_SET(&tmp_val2, DB_DBT_PARTIAL);			/* Construct the record. */			p = newrec;			/* Initial size. */			memcpy(p, &newsize, sizeof(db_indx_t));			p += sizeof(db_indx_t);			/* First part of original record. */			len = nval->doff > tmp_val.size			    ? tmp_val.size : nval->doff;			memcpy(p, tmp_val.data, len);			p += len;			if (nval->doff > tmp_val.size) {				/* Padding */				memset(p, 0, nval->doff - tmp_val.size);				p += nval->doff - tmp_val.size;			}			/* New bytes */			memcpy(p, nval->data, nval->size);			p += nval->size;			/* End of original record (if there is any) */			if (nval->doff + nval->dlen < tmp_val.size) {				len = tmp_val.size - nval->doff - nval->dlen;				memcpy(p, (u_int8_t *)tmp_val.data +

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -