⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 catcache.c

📁 postgresql8.3.4源码,开源数据库
💻 C
📖 第 1 页 / 共 4 页
字号:
	CatalogCacheInitializeCache_DEBUG1;	relation = heap_open(cache->cc_reloid, AccessShareLock);	/*	 * switch to the cache context so our allocations do not vanish at the end	 * of a transaction	 */	Assert(CacheMemoryContext != NULL);	oldcxt = MemoryContextSwitchTo(CacheMemoryContext);	/*	 * copy the relcache's tuple descriptor to permanent cache storage	 */	tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));	/*	 * save the relation's name and relisshared flag, too (cc_relname is used	 * only for debugging purposes)	 */	cache->cc_relname = pstrdup(RelationGetRelationName(relation));	cache->cc_relisshared = RelationGetForm(relation)->relisshared;	/*	 * return to the caller's memory context and close the rel	 */	MemoryContextSwitchTo(oldcxt);	heap_close(relation, AccessShareLock);	CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",				cache->cc_relname, cache->cc_nkeys);	/*	 * initialize cache's key information	 */	for (i = 0; i < cache->cc_nkeys; ++i)	{		Oid			keytype;		RegProcedure eqfunc;		CatalogCacheInitializeCache_DEBUG2;		if (cache->cc_key[i] > 0)			keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;		else		{			if (cache->cc_key[i] != ObjectIdAttributeNumber)				elog(FATAL, "only sys attr supported in caches is OID");			keytype = OIDOID;		}		GetCCHashEqFuncs(keytype,						 &cache->cc_hashfunc[i],						 &eqfunc);		cache->cc_isname[i] = (keytype == NAMEOID);		/*		 * Do equality-function lookup (we assume this won't need a catalog		 * lookup for any supported type)		 */		fmgr_info_cxt(eqfunc,					  &cache->cc_skey[i].sk_func,					  CacheMemoryContext);		/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */		cache->cc_skey[i].sk_attno = cache->cc_key[i];		/* Fill in sk_strategy as well --- always standard equality */		cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;		cache->cc_skey[i].sk_subtype = InvalidOid;		CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",					cache->cc_relname,					i,					cache);	}	/*	 * mark this cache fully initialized	 */	cache->cc_tupdesc = tupdesc;}/* * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache * * One reason to call this routine is to ensure that the relcache has * created entries for all the catalogs and indexes referenced by catcaches. * Therefore, provide an option to open the index as well as fixing the * cache itself.  An exception is the indexes on pg_am, which we don't use * (cf. IndexScanOK). */voidInitCatCachePhase2(CatCache *cache, bool touch_index){	if (cache->cc_tupdesc == NULL)		CatalogCacheInitializeCache(cache);	if (touch_index &&		cache->id != AMOID &&		cache->id != AMNAME)	{		Relation	idesc;		idesc = index_open(cache->cc_indexoid, AccessShareLock);		index_close(idesc, AccessShareLock);	}}/* *		IndexScanOK * *		This function checks for tuples that will be fetched by *		IndexSupportInitialize() during relcache initialization for *		certain system indexes that support critical syscaches. *		We can't use an indexscan to fetch these, else we'll get into *		infinite recursion.  A plain heap scan will work, however. * *		Once we have completed relcache initialization (signaled by *		criticalRelcachesBuilt), we don't have to worry anymore. */static boolIndexScanOK(CatCache *cache, ScanKey cur_skey){	if (cache->id == INDEXRELID)	{		/*		 * Rather than tracking exactly which indexes have to be loaded before		 * we can use indexscans (which changes from time to time), just force		 * all pg_index searches to be heap scans until we've built the		 * critical relcaches.		 */		if (!criticalRelcachesBuilt)			return false;	}	else if (cache->id == AMOID ||			 cache->id == AMNAME)	{		/*		 * Always do heap scans in pg_am, because it's so small there's not		 * much point in an indexscan anyway.  We *must* do this when		 * initially building critical relcache entries, but we might as well		 * just always do it.		 */		return false;	}	/* Normal case, allow index scan */	return true;}/* *	SearchCatCache * *		This call searches a system cache for a tuple, opening the relation *		if necessary (on the first access to a particular cache). * *		The result is NULL if not found, or a pointer to a HeapTuple in *		the cache.	The caller must not modify the tuple, and must call *		ReleaseCatCache() when done with it. * * The search key values should be expressed as Datums of the key columns' * datatype(s).  (Pass zeroes for any unused parameters.)  As a special * exception, the passed-in key for a NAME column can be just a C string; * the caller need not go to the trouble of converting it to a fully * null-padded NAME. */HeapTupleSearchCatCache(CatCache *cache,			   Datum v1,			   Datum v2,			   Datum v3,			   Datum v4){	ScanKeyData cur_skey[4];	uint32		hashValue;	Index		hashIndex;	Dlelem	   *elt;	CatCTup    *ct;	Relation	relation;	SysScanDesc scandesc;	HeapTuple	ntp;	/*	 * one-time startup overhead for each cache	 */	if (cache->cc_tupdesc == NULL)		CatalogCacheInitializeCache(cache);#ifdef CATCACHE_STATS	cache->cc_searches++;#endif	/*	 * initialize the search key information	 */	memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));	cur_skey[0].sk_argument = v1;	cur_skey[1].sk_argument = v2;	cur_skey[2].sk_argument = v3;	cur_skey[3].sk_argument = v4;	/*	 * find the hash bucket in which to look for the tuple	 */	hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);	hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);	/*	 * scan the hash bucket until we find a match or exhaust our tuples	 */	for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);		 elt;		 elt = DLGetSucc(elt))	{		bool		res;		ct = (CatCTup *) DLE_VAL(elt);		if (ct->dead)			continue;			/* ignore dead entries */		if (ct->hash_value != hashValue)			continue;			/* quickly skip entry if wrong hash val */		/*		 * see if the cached tuple matches our key.		 */		HeapKeyTest(&ct->tuple,					cache->cc_tupdesc,					cache->cc_nkeys,					cur_skey,					res);		if (!res)			continue;		/*		 * We found a match in the cache.  Move it to the front of the list		 * for its hashbucket, in order to speed subsequent searches.  (The		 * most frequently accessed elements in any hashbucket will tend to be		 * near the front of the hashbucket's list.)		 */		DLMoveToFront(&ct->cache_elem);		/*		 * If it's a positive entry, bump its refcount and return it. If it's		 * negative, we can report failure to the caller.		 */		if (!ct->negative)		{			ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);			ct->refcount++;			ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);			CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",						cache->cc_relname, hashIndex);#ifdef CATCACHE_STATS			cache->cc_hits++;#endif			return &ct->tuple;		}		else		{			CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",						cache->cc_relname, hashIndex);#ifdef CATCACHE_STATS			cache->cc_neg_hits++;#endif			return NULL;		}	}	/*	 * Tuple was not found in cache, so we have to try to retrieve it directly	 * from the relation.  If found, we will add it to the cache; if not	 * found, we will add a negative cache entry instead.	 *	 * NOTE: it is possible for recursive cache lookups to occur while reading	 * the relation --- for example, due to shared-cache-inval messages being	 * processed during heap_open().  This is OK.  It's even possible for one	 * of those lookups to find and enter the very same tuple we are trying to	 * fetch here.	If that happens, we will enter a second copy of the tuple	 * into the cache.	The first copy will never be referenced again, and	 * will eventually age out of the cache, so there's no functional problem.	 * This case is rare enough that it's not worth expending extra cycles to	 * detect.	 */	relation = heap_open(cache->cc_reloid, AccessShareLock);	scandesc = systable_beginscan(relation,								  cache->cc_indexoid,								  IndexScanOK(cache, cur_skey),								  SnapshotNow,								  cache->cc_nkeys,								  cur_skey);	ct = NULL;	while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))	{		ct = CatalogCacheCreateEntry(cache, ntp,									 hashValue, hashIndex,									 false);		/* immediately set the refcount to 1 */		ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);		ct->refcount++;		ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);		break;					/* assume only one match */	}	systable_endscan(scandesc);	heap_close(relation, AccessShareLock);	/*	 * If tuple was not found, we need to build a negative cache entry	 * containing a fake tuple.  The fake tuple has the correct key columns,	 * but nulls everywhere else.	 *	 * In bootstrap mode, we don't build negative entries, because the cache	 * invalidation mechanism isn't alive and can't clear them if the tuple	 * gets created later.	(Bootstrap doesn't do UPDATEs, so it doesn't need	 * cache inval for that.)	 */	if (ct == NULL)	{		if (IsBootstrapProcessingMode())			return NULL;		ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);		ct = CatalogCacheCreateEntry(cache, ntp,									 hashValue, hashIndex,									 true);		heap_freetuple(ntp);		CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",					cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);		CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",					cache->cc_relname, hashIndex);		/*		 * We are not returning the negative entry to the caller, so leave its		 * refcount zero.		 */		return NULL;	}	CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",				cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);	CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",				cache->cc_relname, hashIndex);#ifdef CATCACHE_STATS	cache->cc_newloads++;#endif	return &ct->tuple;}/* *	ReleaseCatCache * *	Decrement the reference count of a catcache entry (releasing the *	hold grabbed by a successful SearchCatCache). * *	NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries *	will be freed as soon as their refcount goes to zero.  In combination *	with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test *	to catch references to already-released catcache entries. */voidReleaseCatCache(HeapTuple tuple){	CatCTup    *ct = (CatCTup *) (((char *) tuple) -								  offsetof(CatCTup, tuple));	/* Safety checks to ensure we were handed a cache entry */	Assert(ct->ct_magic == CT_MAGIC);	Assert(ct->refcount > 0);	ct->refcount--;	ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);	if (#ifndef CATCACHE_FORCE_RELEASE		ct->dead &&#endif		ct->refcount == 0 &&		(ct->c_list == NULL || ct->c_list->refcount == 0))		CatCacheRemoveCTup(ct->my_cache, ct);}/* *	SearchCatCacheList * *		Generate a list of all tuples matching a partial key (that is, *		a key specifying just the first K of the cache's N key columns). * *		The caller must not modify the list object or the pointed-to tuples, *		and must call ReleaseCatCacheList() when done with the list. */CatCList *SearchCatCacheList(CatCache *cache,				   int nkeys,				   Datum v1,				   Datum v2,				   Datum v3,				   Datum v4){	ScanKeyData cur_skey[4];	uint32		lHashValue;	Dlelem	   *elt;	CatCList   *cl;	CatCTup    *ct;	List	   *volatile ctlist;	ListCell   *ctlist_item;	int			nmembers;	bool		ordered;	HeapTuple	ntp;	MemoryContext oldcxt;	int			i;	/*	 * one-time startup overhead for each cache	 */	if (cache->cc_tupdesc == NULL)		CatalogCacheInitializeCache(cache);	Assert(nkeys > 0 && nkeys < cache->cc_nkeys);#ifdef CATCACHE_STATS	cache->cc_lsearches++;#endif	/*	 * initialize the search key information	 */	memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));	cur_skey[0].sk_argument = v1;	cur_skey[1].sk_argument = v2;	cur_skey[2].sk_argument = v3;	cur_skey[3].sk_argument = v4;	/*	 * compute a hash value of the given keys for faster search.  We don't	 * presently divide the CatCList items into buckets, but this still lets	 * us skip non-matching items quickly most of the time.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -