catcache.c

来自「PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统」· C语言 代码 · 共 1,927 行 · 第 1/4 页

C
1,927
字号
				elog(FATAL, "only sys attr supported in caches is OID");			keytype = OIDOID;		}		GetCCHashEqFuncs(keytype,						 &cache->cc_hashfunc[i],						 &eqfunc);		cache->cc_isname[i] = (keytype == NAMEOID);		/*		 * Do equality-function lookup (we assume this won't need a catalog		 * lookup for any supported type)		 */		fmgr_info_cxt(eqfunc,					  &cache->cc_skey[i].sk_func,					  CacheMemoryContext);		/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */		cache->cc_skey[i].sk_attno = cache->cc_key[i];		/* Fill in sk_strategy as well --- always standard equality */		cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;		cache->cc_skey[i].sk_subtype = InvalidOid;		CACHE4_elog(DEBUG2, "CatalogCacheInit %s %d %p",					cache->cc_relname,					i,					cache);	}	/*	 * mark this cache fully initialized	 */	cache->cc_tupdesc = tupdesc;}/* * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache * * The only reason to call this routine is to ensure that the relcache * has created entries for all the catalogs and indexes referenced by * catcaches.  Therefore, open the index too.  An exception is the indexes * on pg_am, which we don't use (cf. IndexScanOK). */voidInitCatCachePhase2(CatCache *cache){	if (cache->cc_tupdesc == NULL)		CatalogCacheInitializeCache(cache);	if (cache->id != AMOID &&		cache->id != AMNAME)	{		Relation	idesc;		idesc = index_open(cache->cc_indexoid);		index_close(idesc);	}}/* *		IndexScanOK * *		This function checks for tuples that will be fetched by *		IndexSupportInitialize() during relcache initialization for *		certain system indexes that support critical syscaches. *		We can't use an indexscan to fetch these, else we'll get into *		infinite recursion.  A plain heap scan will work, however. * *		Once we have completed relcache initialization (signaled by *		criticalRelcachesBuilt), we don't have to worry anymore. */static boolIndexScanOK(CatCache *cache, ScanKey cur_skey){	if (cache->id == INDEXRELID)	{		/*		 * Since the OIDs of indexes aren't hardwired, it's painful to figure		 * out which is which.	Just force all pg_index searches to be heap		 * scans while building the relcaches.		 */		if (!criticalRelcachesBuilt)			return false;	}	else if (cache->id == AMOID ||			 cache->id == AMNAME)	{		/*		 * Always do heap scans in pg_am, because it's so small there's not		 * much point in an indexscan anyway.  We *must* do this when		 * initially building critical relcache entries, but we might as well		 * just always do it.		 */		return false;	}	else if (cache->id == OPEROID)	{		if (!criticalRelcachesBuilt)		{			/* Looking for an OID comparison function? */			Oid			lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);			if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)				return false;		}	}	/* Normal case, allow index scan */	return true;}/* *	SearchCatCache * *		This call searches a system cache for a tuple, opening the relation *		if necessary (on the first access to a particular cache). * *		The result is NULL if not found, or a pointer to a HeapTuple in *		the cache.	The caller must not modify the tuple, and must call *		ReleaseCatCache() when done with it. * * The search key values should be expressed as Datums of the key columns' * datatype(s).  (Pass zeroes for any unused parameters.)  As a special * exception, the passed-in key for a NAME column can be just a C string; * the caller need not go to the trouble of converting it to a fully * null-padded NAME. */HeapTupleSearchCatCache(CatCache *cache,			   Datum v1,			   Datum v2,			   Datum v3,			   Datum v4){	ScanKeyData cur_skey[4];	uint32		hashValue;	Index		hashIndex;	Dlelem	   *elt;	CatCTup    *ct;	Relation	relation;	SysScanDesc scandesc;	HeapTuple	ntp;	/*	 * one-time startup overhead for each cache	 */	if (cache->cc_tupdesc == NULL)		CatalogCacheInitializeCache(cache);#ifdef CATCACHE_STATS	cache->cc_searches++;#endif	/*	 * initialize the search key information	 */	memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));	cur_skey[0].sk_argument = v1;	cur_skey[1].sk_argument = v2;	cur_skey[2].sk_argument = v3;	cur_skey[3].sk_argument = v4;	/*	 * find the hash bucket in which to look for the tuple	 */	hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);	hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);	/*	 * scan the hash bucket until we find a match or exhaust our tuples	 */	for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);		 elt;		 elt = DLGetSucc(elt))	{		bool		res;		ct = (CatCTup *) DLE_VAL(elt);		if (ct->dead)			continue;			/* ignore dead entries */		if (ct->hash_value != hashValue)			continue;			/* quickly skip entry if wrong hash val */		/*		 * see if the cached tuple matches our key.		 */		HeapKeyTest(&ct->tuple,					cache->cc_tupdesc,					cache->cc_nkeys,					cur_skey,					res);		if (!res)			continue;		/*		 * we found a match in the cache: move it to the front of the global		 * LRU list.  We also move it to the front of the list for its		 * hashbucket, in order to speed subsequent searches.  (The most		 * frequently accessed elements in any hashbucket will tend to be near		 * the front of the hashbucket's list.)		 */		DLMoveToFront(&ct->lrulist_elem);		DLMoveToFront(&ct->cache_elem);		/*		 * If it's a positive entry, bump its refcount and return it. If it's		 * negative, we can report failure to the caller.		 */		if (!ct->negative)		{			ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);			ct->refcount++;			ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);			CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",						cache->cc_relname, hashIndex);#ifdef CATCACHE_STATS			cache->cc_hits++;#endif			return &ct->tuple;		}		else		{			CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",						cache->cc_relname, hashIndex);#ifdef CATCACHE_STATS			cache->cc_neg_hits++;#endif			return NULL;		}	}	/*	 * Tuple was not found in cache, so we have to try to retrieve it directly	 * from the relation.  If found, we will add it to the cache; if not	 * found, we will add a negative cache entry instead.	 *	 * NOTE: it is possible for recursive cache lookups to occur while reading	 * the relation --- for example, due to shared-cache-inval messages being	 * processed during heap_open().  This is OK.  It's even possible for one	 * of those lookups to find and enter the very same tuple we are trying to	 * fetch here.	If that happens, we will enter a second copy of the tuple	 * into the cache.	The first copy will never be referenced again, and	 * will eventually age out of the cache, so there's no functional problem.	 * This case is rare enough that it's not worth expending extra cycles to	 * detect.	 */	relation = heap_open(cache->cc_reloid, AccessShareLock);	scandesc = systable_beginscan(relation,								  cache->cc_indexoid,								  IndexScanOK(cache, cur_skey),								  SnapshotNow,								  cache->cc_nkeys,								  cur_skey);	ct = NULL;	while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))	{		ct = CatalogCacheCreateEntry(cache, ntp,									 hashValue, hashIndex,									 false);		/* immediately set the refcount to 1 */		ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);		ct->refcount++;		ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);		break;					/* assume only one match */	}	systable_endscan(scandesc);	heap_close(relation, AccessShareLock);	/*	 * If tuple was not found, we need to build a negative cache entry	 * containing a fake tuple.  The fake tuple has the correct key columns,	 * but nulls everywhere else.	 *	 * In bootstrap mode, we don't build negative entries, because the cache	 * invalidation mechanism isn't alive and can't clear them if the tuple	 * gets created later.	(Bootstrap doesn't do UPDATEs, so it doesn't need	 * cache inval for that.)	 */	if (ct == NULL)	{		if (IsBootstrapProcessingMode())			return NULL;		ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);		ct = CatalogCacheCreateEntry(cache, ntp,									 hashValue, hashIndex,									 true);		heap_freetuple(ntp);		CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",					cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);		CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",					cache->cc_relname, hashIndex);		/*		 * We are not returning the negative entry to the caller, so leave its		 * refcount zero.		 */		return NULL;	}	CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",				cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);	CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",				cache->cc_relname, hashIndex);#ifdef CATCACHE_STATS	cache->cc_newloads++;#endif	return &ct->tuple;}/* *	ReleaseCatCache * *	Decrement the reference count of a catcache entry (releasing the *	hold grabbed by a successful SearchCatCache). * *	NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries *	will be freed as soon as their refcount goes to zero.  In combination *	with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test *	to catch references to already-released catcache entries. */voidReleaseCatCache(HeapTuple tuple){	CatCTup    *ct = (CatCTup *) (((char *) tuple) -								  offsetof(CatCTup, tuple));	/* Safety checks to ensure we were handed a cache entry */	Assert(ct->ct_magic == CT_MAGIC);	Assert(ct->refcount > 0);	ct->refcount--;	ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);	if (#ifndef CATCACHE_FORCE_RELEASE		ct->dead &&#endif		ct->refcount == 0 &&		(ct->c_list == NULL || ct->c_list->refcount == 0))		CatCacheRemoveCTup(ct->my_cache, ct);}/* *	SearchCatCacheList * *		Generate a list of all tuples matching a partial key (that is, *		a key specifying just the first K of the cache's N key columns). * *		The caller must not modify the list object or the pointed-to tuples, *		and must call ReleaseCatCacheList() when done with the list. */CatCList *SearchCatCacheList(CatCache *cache,				   int nkeys,				   Datum v1,				   Datum v2,				   Datum v3,				   Datum v4){	ScanKeyData cur_skey[4];	uint32		lHashValue;	Dlelem	   *elt;	CatCList   *cl;	CatCTup    *ct;	List	   *volatile ctlist;	ListCell   *ctlist_item;	int			nmembers;	bool		ordered;	HeapTuple	ntp;	MemoryContext oldcxt;	int			i;	/*	 * one-time startup overhead for each cache	 */	if (cache->cc_tupdesc == NULL)		CatalogCacheInitializeCache(cache);	Assert(nkeys > 0 && nkeys < cache->cc_nkeys);#ifdef CATCACHE_STATS	cache->cc_lsearches++;#endif	/*	 * initialize the search key information	 */	memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));	cur_skey[0].sk_argument = v1;	cur_skey[1].sk_argument = v2;	cur_skey[2].sk_argument = v3;	cur_skey[3].sk_argument = v4;	/*	 * compute a hash value of the given keys for faster search.  We don't	 * presently divide the CatCList items into buckets, but this still lets	 * us skip non-matching items quickly most of the time.	 */	lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);	/*	 * scan the items until we find a match or exhaust our list	 */	for (elt = DLGetHead(&cache->cc_lists);		 elt;		 elt = DLGetSucc(elt))	{		bool		res;		cl = (CatCList *) DLE_VAL(elt);		if (cl->dead)			continue;			/* ignore dead entries */		if (cl->hash_value != lHashValue)			continue;			/* quickly skip entry if wrong hash val */		/*		 * see if the cached list matches our key.		 */		if (cl->nkeys != nkeys)			continue;		HeapKeyTest(&cl->tuple,					cache->cc_tupdesc,					nkeys,					cur_skey,					res);		if (!res)			continue;		/*		 * We found a matching list: mark it as touched since the last		 * CatalogCacheCleanup() sweep.  Also move the list to the front of		 * the cache's list-of-lists, to speed subsequent searches. (We do not		 * move the members to the fronts of their hashbucket lists, however,		 * since there's no point in that unless they are searched for		 * individually.)		 */		cl->touched = true;		DLMoveToFront(&cl->cache_elem);		/* Bump the list's refcount and return it */		ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);		cl->refcount++;		ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);		CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",					cache->cc_relname);#ifdef CATCACHE_STATS		cache->cc_lhits++;#endif		return cl;	}	/*	 * List was not found in cache, so we have to build it by reading the	 * relation.  For each matching tuple found in the relation, use an	 * existing cache entry if possible, else build a new one.	 *

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?