⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rewriteheap.c

📁 postgresql8.3.4源码,开源数据库
💻 C
📖 第 1 页 / 共 2 页
字号:
	new_tuple->t_data->t_infomask |=		old_tuple->t_data->t_infomask & HEAP_XACT_MASK;	/*	 * While we have our hands on the tuple, we may as well freeze any	 * very-old xmin or xmax, so that future VACUUM effort can be saved.	 *	 * Note we abuse heap_freeze_tuple() a bit here, since it's expecting to	 * be given a pointer to a tuple in a disk buffer.	It happens though that	 * we can get the right things to happen by passing InvalidBuffer for the	 * buffer.	 */	heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, InvalidBuffer);	/*	 * Invalid ctid means that ctid should point to the tuple itself. We'll	 * override it later if the tuple is part of an update chain.	 */	ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);	/*	 * If the tuple has been updated, check the old-to-new mapping hash table.	 */	if (!(old_tuple->t_data->t_infomask & (HEAP_XMAX_INVALID |										   HEAP_IS_LOCKED)) &&		!(ItemPointerEquals(&(old_tuple->t_self),							&(old_tuple->t_data->t_ctid))))	{		OldToNewMapping mapping;		memset(&hashkey, 0, sizeof(hashkey));		hashkey.xmin = HeapTupleHeaderGetXmax(old_tuple->t_data);		hashkey.tid = old_tuple->t_data->t_ctid;		mapping = (OldToNewMapping)			hash_search(state->rs_old_new_tid_map, &hashkey,						HASH_FIND, NULL);		if (mapping != NULL)		{			/*			 * We've already copied the tuple that t_ctid points to, so we can			 * set the ctid of this tuple to point to the new location, and			 * insert it right away.			 */			new_tuple->t_data->t_ctid = mapping->new_tid;			/* We don't need the mapping entry anymore */			hash_search(state->rs_old_new_tid_map, &hashkey,						HASH_REMOVE, &found);			Assert(found);		}		else		{			/*			 * We haven't seen the tuple t_ctid points to yet. Stash this			 * tuple into unresolved_tups to be written later.			 */			UnresolvedTup unresolved;			unresolved = hash_search(state->rs_unresolved_tups, &hashkey,									 HASH_ENTER, &found);			Assert(!found);			unresolved->old_tid = old_tuple->t_self;			unresolved->tuple = heap_copytuple(new_tuple);			/*			 * We can't do anything more now, since we don't know where the			 * tuple will be written.			 */			MemoryContextSwitchTo(old_cxt);			return;		}	}	/*	 * Now we will write the tuple, and then check to see if it is the B tuple	 * in any new or known pair.  When we resolve a known pair, we will be	 * able to write that pair's A tuple, and then we have to check if it	 * resolves some other pair.  Hence, we need a loop here.	 */	old_tid = old_tuple->t_self;	free_new = false;	for (;;)	{		ItemPointerData new_tid;		/* Insert the tuple and find out where it's put in new_heap */		raw_heap_insert(state, new_tuple);		new_tid = new_tuple->t_self;		/*		 * If the tuple is the updated version of a row, and the prior version		 * wouldn't be DEAD yet, then we need to either resolve the prior		 * version (if it's waiting in rs_unresolved_tups), or make an entry		 * in rs_old_new_tid_map (so we can resolve it when we do see it). The		 * previous tuple's xmax would equal this one's xmin, so it's		 * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.		 */		if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&			!TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),								   state->rs_oldest_xmin))		{			/*			 * Okay, this is B in an update pair.  See if we've seen A.			 */			UnresolvedTup unresolved;			memset(&hashkey, 0, sizeof(hashkey));			hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);			hashkey.tid = old_tid;			unresolved = hash_search(state->rs_unresolved_tups, &hashkey,									 HASH_FIND, NULL);			if (unresolved != NULL)			{				/*				 * We have seen and memorized the previous tuple already. Now				 * that we know where we inserted the tuple its t_ctid points				 * to, fix its t_ctid and insert it to the new heap.				 */				if (free_new)					heap_freetuple(new_tuple);				new_tuple = unresolved->tuple;				free_new = true;				old_tid = unresolved->old_tid;				new_tuple->t_data->t_ctid = new_tid;				/*				 * We don't need the hash entry anymore, but don't free its				 * tuple just yet.				 */				hash_search(state->rs_unresolved_tups, &hashkey,							HASH_REMOVE, &found);				Assert(found);				/* loop back to insert the previous tuple in the chain */				continue;			}			else			{				/*				 * Remember the new tid of this tuple. We'll use it to set the				 * ctid when we find the previous tuple in the chain.				 */				OldToNewMapping mapping;				mapping = hash_search(state->rs_old_new_tid_map, &hashkey,									  HASH_ENTER, &found);				Assert(!found);				mapping->new_tid = new_tid;			}		}		/* Done with this (chain of) tuples, for now */		if (free_new)			heap_freetuple(new_tuple);		break;	}	MemoryContextSwitchTo(old_cxt);}/* * Register a dead tuple with an ongoing rewrite. Dead tuples are not * copied to the new table, but we still make note of them so that we * can release some resources earlier. */voidrewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple){	/*	 * If we have already seen an earlier tuple in the update chain that	 * points to this tuple, let's forget about that earlier tuple. It's in	 * fact dead as well, our simple xmax < OldestXmin test in	 * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens	 * when xmin of a tuple is greater than xmax, which sounds	 * counter-intuitive but is perfectly valid.	 *	 * We don't bother to try to detect the situation the other way round,	 * when we encounter the dead tuple first and then the recently dead one	 * that points to it. If that happens, we'll have some unmatched entries	 * in the UnresolvedTups hash table at the end. That can happen anyway,	 * because a vacuum might have removed the dead tuple in the chain before	 * us.	 */	UnresolvedTup unresolved;	TidHashKey	hashkey;	bool		found;	memset(&hashkey, 0, sizeof(hashkey));	hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);	hashkey.tid = old_tuple->t_self;	unresolved = hash_search(state->rs_unresolved_tups, &hashkey,							 HASH_FIND, NULL);	if (unresolved != NULL)	{		/* Need to free the contained tuple as well as the hashtable entry */		heap_freetuple(unresolved->tuple);		hash_search(state->rs_unresolved_tups, &hashkey,					HASH_REMOVE, &found);		Assert(found);	}}/* * Insert a tuple to the new relation.	This has to track heap_insert * and its subsidiary functions! * * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the * tuple is invalid on entry, it's replaced with the new TID as well (in * the inserted data only, not in the caller's copy). */static voidraw_heap_insert(RewriteState state, HeapTuple tup){	Page		page = state->rs_buffer;	Size		pageFreeSpace,				saveFreeSpace;	Size		len;	OffsetNumber newoff;	HeapTuple	heaptup;	/*	 * If the new tuple is too big for storage or contains already toasted	 * out-of-line attributes from some other relation, invoke the toaster.	 *	 * Note: below this point, heaptup is the data we actually intend to store	 * into the relation; tup is the caller's original untoasted data.	 */	if (state->rs_new_rel->rd_rel->relkind == RELKIND_TOASTVALUE)	{		/* toast table entries should never be recursively toasted */		Assert(!HeapTupleHasExternal(tup));		heaptup = tup;	}	else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)		heaptup = toast_insert_or_update(state->rs_new_rel, tup, NULL,										 state->rs_use_wal, false);	else		heaptup = tup;	len = MAXALIGN(heaptup->t_len);		/* be conservative */	/*	 * If we're gonna fail for oversize tuple, do it right away	 */	if (len > MaxHeapTupleSize)		ereport(ERROR,				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),				 errmsg("row is too big: size %lu, maximum size %lu",						(unsigned long) len,						(unsigned long) MaxHeapTupleSize)));	/* Compute desired extra freespace due to fillfactor option */	saveFreeSpace = RelationGetTargetPageFreeSpace(state->rs_new_rel,												   HEAP_DEFAULT_FILLFACTOR);	/* Now we can check to see if there's enough free space already. */	if (state->rs_buffer_valid)	{		pageFreeSpace = PageGetHeapFreeSpace(page);		if (len + saveFreeSpace > pageFreeSpace)		{			/* Doesn't fit, so write out the existing page */			/* XLOG stuff */			if (state->rs_use_wal)				log_newpage(&state->rs_new_rel->rd_node,							state->rs_blockno,							page);			/*			 * Now write the page. We say isTemp = true even if it's not a			 * temp table, because there's no need for smgr to schedule an			 * fsync for this write; we'll do it ourselves in			 * end_heap_rewrite.			 */			RelationOpenSmgr(state->rs_new_rel);			smgrextend(state->rs_new_rel->rd_smgr, state->rs_blockno,					   (char *) page, true);			state->rs_blockno++;			state->rs_buffer_valid = false;		}	}	if (!state->rs_buffer_valid)	{		/* Initialize a new empty page */		PageInit(page, BLCKSZ, 0);		state->rs_buffer_valid = true;	}	/* And now we can insert the tuple into the page */	newoff = PageAddItem(page, (Item) heaptup->t_data, len,						 InvalidOffsetNumber, false, true);	if (newoff == InvalidOffsetNumber)		elog(ERROR, "failed to add tuple");	/* Update caller's t_self to the actual position where it was stored */	ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);	/*	 * Insert the correct position into CTID of the stored tuple, too, if the	 * caller didn't supply a valid CTID.	 */	if (!ItemPointerIsValid(&tup->t_data->t_ctid))	{		ItemId		newitemid;		HeapTupleHeader onpage_tup;		newitemid = PageGetItemId(page, newoff);		onpage_tup = (HeapTupleHeader) PageGetItem(page, newitemid);		onpage_tup->t_ctid = tup->t_self;	}	/* If heaptup is a private copy, release it. */	if (heaptup != tup)		heap_freetuple(heaptup);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -