⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nodehashjoin.c

📁 关系型数据库 Postgresql 6.5.2
💻 C
📖 第 1 页 / 共 2 页
字号:
	 * ----------------	 */	ExecAssignResultTypeFromTL((Plan *) node, &hjstate->jstate);	ExecAssignProjectionInfo((Plan *) node, &hjstate->jstate);	/* ----------------	 *	initialize hash-specific info	 * ----------------	 */	node->hashdone = false;	hjstate->hj_HashTable = (HashJoinTable) NULL;	hjstate->hj_CurBucketNo = 0;	hjstate->hj_CurTuple = (HashJoinTuple) NULL;	hjstate->hj_InnerHashKey = (Var *) NULL;	hjstate->jstate.cs_OuterTupleSlot = (TupleTableSlot *) NULL;	hjstate->jstate.cs_TupFromTlist = (bool) false;	return TRUE;}intExecCountSlotsHashJoin(HashJoin *node){	return ExecCountSlotsNode(outerPlan(node)) +	ExecCountSlotsNode(innerPlan(node)) +	HASHJOIN_NSLOTS;}/* ---------------------------------------------------------------- *		ExecEndHashJoin * *		clean up routine for HashJoin node * ---------------------------------------------------------------- */voidExecEndHashJoin(HashJoin *node){	HashJoinState *hjstate;	/* ----------------	 *	get info from the HashJoin state	 * ----------------	 */	hjstate = node->hashjoinstate;	/* ----------------	 * free hash table in case we end plan before all tuples are retrieved	 * ---------------	 */	if (hjstate->hj_HashTable)	{		ExecHashTableDestroy(hjstate->hj_HashTable);		hjstate->hj_HashTable = NULL;	}	/* ----------------	 *	Free the projection info and the scan attribute info	 *	 *	Note: we don't ExecFreeResultType(hjstate)	 *		  because the rule manager depends on the tupType	 *		  returned by ExecMain().  So for now, this	 *		  is freed at end-transaction time.  -cim 6/2/91	 * ----------------	 */	ExecFreeProjectionInfo(&hjstate->jstate);	/* ----------------	 * clean up subtrees	 * ----------------	 */	ExecEndNode(outerPlan((Plan *) node), (Plan *) node);	ExecEndNode(innerPlan((Plan *) node), (Plan *) node);	/* ----------------	 *	clean out the tuple table	 * ----------------	 */	ExecClearTuple(hjstate->jstate.cs_ResultTupleSlot);	ExecClearTuple(hjstate->hj_OuterTupleSlot);	ExecClearTuple(hjstate->hj_HashTupleSlot);}/* ---------------------------------------------------------------- *		ExecHashJoinOuterGetTuple * *		get the next outer tuple for hashjoin: either by *		executing a plan node as in the first pass, or from *		the tmp files for the hashjoin batches. * ---------------------------------------------------------------- */static TupleTableSlot *ExecHashJoinOuterGetTuple(Plan *node, Plan *parent, HashJoinState *hjstate){	HashJoinTable hashtable = hjstate->hj_HashTable;	int			curbatch = hashtable->curbatch;	TupleTableSlot *slot;	if (curbatch == 0)	{							/* if it is the first pass */		slot = ExecProcNode(node, parent);		if (!TupIsNull(slot))			return slot;		/*		 * We have just reached the end of the first pass. Try to switch		 * to a saved batch.		 */		curbatch = ExecHashJoinNewBatch(hjstate);	}	/*	 * Try to read from a temp file. Loop allows us to advance to new	 * batch as needed.	 */	while (curbatch <= hashtable->nbatch)	{		slot = ExecHashJoinGetSavedTuple(hjstate,								 hashtable->outerBatchFile[curbatch - 1],										 hjstate->hj_OuterTupleSlot);		if (!TupIsNull(slot))			return slot;		curbatch = ExecHashJoinNewBatch(hjstate);	}	/* Out of batches... */	return NULL;}/* ---------------------------------------------------------------- *		ExecHashJoinGetSavedTuple * *		read the next tuple from a tmp file * ---------------------------------------------------------------- */static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,						  BufFile *file,						  TupleTableSlot *tupleSlot){	HeapTupleData htup;	size_t		nread;	HeapTuple	heapTuple;	nread = BufFileRead(file, (void *) &htup, sizeof(HeapTupleData));	if (nread == 0)		return NULL;			/* end of file */	if (nread != sizeof(HeapTupleData))		elog(ERROR, "Read from hashjoin temp file failed");	heapTuple = palloc(HEAPTUPLESIZE + htup.t_len);	memcpy((char *) heapTuple, (char *) &htup, sizeof(HeapTupleData));	heapTuple->t_data = (HeapTupleHeader)		((char *) heapTuple + HEAPTUPLESIZE);	nread = BufFileRead(file, (void *) heapTuple->t_data, htup.t_len);	if (nread != (size_t) htup.t_len)		elog(ERROR, "Read from hashjoin temp file failed");	return ExecStoreTuple(heapTuple, tupleSlot, InvalidBuffer, true);}/* ---------------------------------------------------------------- *		ExecHashJoinNewBatch * *		switch to a new hashjoin batch * ---------------------------------------------------------------- */static intExecHashJoinNewBatch(HashJoinState *hjstate){	HashJoinTable hashtable = hjstate->hj_HashTable;	int			nbatch = hashtable->nbatch;	int			newbatch = hashtable->curbatch + 1;	long	   *innerBatchSize = hashtable->innerBatchSize;	long	   *outerBatchSize = hashtable->outerBatchSize;	BufFile    *innerFile;	TupleTableSlot *slot;	ExprContext *econtext;	Var		   *innerhashkey;	if (newbatch > 1)	{		/*		 * We no longer need the previous outer batch file; close it right		 * away to free disk space.		 */		BufFileClose(hashtable->outerBatchFile[newbatch - 2]);		hashtable->outerBatchFile[newbatch - 2] = NULL;	}	/* --------------	 *	We can skip over any batches that are empty on either side.	 *	Release associated temp files right away.	 * --------------	 */	while (newbatch <= nbatch &&		   (innerBatchSize[newbatch - 1] == 0L ||			outerBatchSize[newbatch - 1] == 0L))	{		BufFileClose(hashtable->innerBatchFile[newbatch - 1]);		hashtable->innerBatchFile[newbatch - 1] = NULL;		BufFileClose(hashtable->outerBatchFile[newbatch - 1]);		hashtable->outerBatchFile[newbatch - 1] = NULL;		newbatch++;	}	if (newbatch > nbatch)		return newbatch;		/* no more batches */	/*	 * Rewind inner and outer batch files for this batch, so that we can	 * start reading them.	 */	if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0L,					SEEK_SET) != 0L)		elog(ERROR, "Failed to rewind hash temp file");	innerFile = hashtable->innerBatchFile[newbatch - 1];	if (BufFileSeek(innerFile, 0L, SEEK_SET) != 0L)		elog(ERROR, "Failed to rewind hash temp file");	/*	 * Reload the hash table with the new inner batch	 */	ExecHashTableReset(hashtable, innerBatchSize[newbatch - 1]);	econtext = hjstate->jstate.cs_ExprContext;	innerhashkey = hjstate->hj_InnerHashKey;	while ((slot = ExecHashJoinGetSavedTuple(hjstate,											 innerFile,											 hjstate->hj_HashTupleSlot))		   && !TupIsNull(slot))	{		econtext->ecxt_innertuple = slot;		ExecHashTableInsert(hashtable, econtext, innerhashkey);	}	/*	 * after we build the hash table, the inner batch file is no longer	 * needed	 */	BufFileClose(innerFile);	hashtable->innerBatchFile[newbatch - 1] = NULL;	hashtable->curbatch = newbatch;	return newbatch;}/* ---------------------------------------------------------------- *		ExecHashJoinGetBatch * *		determine the batch number for a bucketno *		+----------------+-------+-------+ ... +-------+ *		0			  nbuckets						 totalbuckets * batch		 0			 1		 2	   ... * ---------------------------------------------------------------- */static intExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable){	int			b;	if (bucketno < hashtable->nbuckets || hashtable->nbatch == 0)		return 0;	b = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /		(hashtable->totalbuckets - hashtable->nbuckets);	return b + 1;}/* ---------------------------------------------------------------- *		ExecHashJoinSaveTuple * *		save a tuple to a tmp file. * * The data recorded in the file for each tuple is an image of its * HeapTupleData (with meaningless t_data pointer) followed by the * HeapTupleHeader and tuple data. * ---------------------------------------------------------------- */voidExecHashJoinSaveTuple(HeapTuple heapTuple,					  BufFile *file){	size_t		written;	written = BufFileWrite(file, (void *) heapTuple, sizeof(HeapTupleData));	if (written != sizeof(HeapTupleData))		elog(ERROR, "Write to hashjoin temp file failed");	written = BufFileWrite(file, (void *) heapTuple->t_data, heapTuple->t_len);	if (written != (size_t) heapTuple->t_len)		elog(ERROR, "Write to hashjoin temp file failed");}voidExecReScanHashJoin(HashJoin *node, ExprContext *exprCtxt, Plan *parent){	HashJoinState *hjstate = node->hashjoinstate;	if (!node->hashdone)		return;	node->hashdone = false;	/*	 * Unfortunately, currently we have to destroy hashtable in all	 * cases...	 */	if (hjstate->hj_HashTable)	{		ExecHashTableDestroy(hjstate->hj_HashTable);		hjstate->hj_HashTable = NULL;	}	hjstate->hj_CurBucketNo = 0;	hjstate->hj_CurTuple = (HashJoinTuple) NULL;	hjstate->hj_InnerHashKey = (Var *) NULL;	hjstate->jstate.cs_OuterTupleSlot = (TupleTableSlot *) NULL;	hjstate->jstate.cs_TupFromTlist = (bool) false;	/*	 * if chgParam of subnodes is not null then plans will be re-scanned	 * by first ExecProcNode.	 */	if (((Plan *) node)->lefttree->chgParam == NULL)		ExecReScan(((Plan *) node)->lefttree, exprCtxt, (Plan *) node);	if (((Plan *) node)->righttree->chgParam == NULL)		ExecReScan(((Plan *) node)->righttree, exprCtxt, (Plan *) node);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -