⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dtrace.c

📁 Sun Solaris 10 中的 DTrace 组件的源代码。请参看: http://www.sun.com/software/solaris/observability.jsp
💻 C
📖 第 1 页 / 共 5 页
字号:
			uintptr_t base = (uintptr_t)key[i].dttk_value;			for (j = 0; j < size; j++) {				hashval += dtrace_load8(base + j);				hashval += (hashval << 10);				hashval ^= (hashval >> 6);			}		}	}	hashval += (hashval << 3);	hashval ^= (hashval >> 11);	hashval += (hashval << 15);	/*	 * There is a remote chance (ideally, 1 in 2^32) that our hashval	 * comes out to be 0.  We rely on a zero hashval denoting a free	 * element; if this actually happens, we set the hashval to 1.	 */	if (hashval == 0)		hashval = 1;	/*	 * Yes, it's painful to do a divide here.  If the cycle count becomes	 * important here, tricks can be pulled to reduce it.  (However, it's	 * critical that hash collisions be kept to an absolute minimum;	 * they're much more painful than a divide.)  It's better to have a	 * solution that generates few collisions and still keeps things	 * relatively simple.	 */	bucket = hashval % dstate->dtds_hashsize;	if (op == DTRACE_DYNVAR_DEALLOC) {		volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;		for (;;) {			while ((lock = *lockp) & 1)				continue;			if (dtrace_casptr((void *)lockp,			    (void *)lock, (void *)(lock + 1)) == (void *)lock)				break;		}		dtrace_membar_producer();	}top:	prev = NULL;	lock = hash[bucket].dtdh_lock;	dtrace_membar_consumer();	start = hash[bucket].dtdh_chain;	ASSERT(start == NULL || start->dtdv_hashval != 0 ||	    op != DTRACE_DYNVAR_DEALLOC);	for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {		dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;		dtrace_key_t *dkey = &dtuple->dtt_key[0];		if (dvar->dtdv_hashval != hashval) {			if (dvar->dtdv_hashval == 0) {				/*				 * We've gone off the rails.  Somewhere				 * along the line, one of the members of this				 * hash chain was deleted.  We could assert				 * that either the dirty list or the rinsing				 * list is non-NULL.  (The dtrace_sync() in				 * dtrace_dynvar_clean() would validate this				 * assertion.)				 */				ASSERT(op != DTRACE_DYNVAR_DEALLOC);				goto top;			}			goto next;		}		if (dtuple->dtt_nkeys != nkeys)			goto next;		for (i = 0; i < nkeys; i++, dkey++) {			if (dkey->dttk_size != key[i].dttk_size)				goto next; /* size or type mismatch */			if (dkey->dttk_size != 0) {				if (dtrace_bcmp(				    (void *)(uintptr_t)key[i].dttk_value,				    (void *)(uintptr_t)dkey->dttk_value,				    dkey->dttk_size))					goto next;			} else {				if (dkey->dttk_value != key[i].dttk_value)					goto next;			}		}		if (op != DTRACE_DYNVAR_DEALLOC)			return (dvar);		ASSERT(dvar->dtdv_next == NULL ||		    dvar->dtdv_next->dtdv_hashval != 0);		if (prev != NULL) {			ASSERT(hash[bucket].dtdh_chain != dvar);			ASSERT(start != dvar);			ASSERT(prev->dtdv_next == dvar);			prev->dtdv_next = dvar->dtdv_next;		} else {			if (dtrace_casptr(&hash[bucket].dtdh_chain,			    start, dvar->dtdv_next) != start) {				/*				 * We have failed to atomically swing the				 * hash table head pointer, presumably because				 * of a conflicting allocation on another CPU.				 * We need to reread the hash chain and try				 * again.				 */				goto top;			}		}		dtrace_membar_producer();		/*		 * Now clear the hash value to indicate that it's free.		 */		ASSERT(hash[bucket].dtdh_chain != dvar);		dvar->dtdv_hashval = 0;		dtrace_membar_producer();		/*		 * Set the next pointer to point at the dirty list, and		 * atomically swing the dirty pointer to the newly freed dvar.		 */		do {			next = dcpu->dtdsc_dirty;			dvar->dtdv_next = next;		} while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);		/*		 * Finally, unlock this hash bucket.		 */		ASSERT(hash[bucket].dtdh_lock == lock);		ASSERT(lock & 1);		hash[bucket].dtdh_lock++;		return (NULL);next:		prev = dvar;		continue;	}	if (op != DTRACE_DYNVAR_ALLOC) {		/*		 * If we are not to allocate a new variable, we want to		 * return NULL now.  Before we return, check that the value		 * of the lock word hasn't changed.  If it has, we may have		 * seen an inconsistent snapshot.		 */		if (op == DTRACE_DYNVAR_NOALLOC) {			if (hash[bucket].dtdh_lock != lock)				goto top;		} else {			ASSERT(op == DTRACE_DYNVAR_DEALLOC);			ASSERT(hash[bucket].dtdh_lock == lock);			ASSERT(lock & 1);			hash[bucket].dtdh_lock++;		}		return (NULL);	}	/*	 * We need to allocate a new dynamic variable.  The size we need is the	 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the	 * size of any auxiliary key data (rounded up to 8-byte alignment) plus	 * the size of any referred-to data (dsize).  We then round the final	 * size up to the chunksize for allocation.	 */	for (ksize = 0, i = 0; i < nkeys; i++)		ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));	/*	 * This should be pretty much impossible, but could happen if, say,	 * strange DIF specified the tuple.  Ideally, this should be an	 * assertion and not an error condition -- but that requires that the	 * chunksize calculation in dtrace_difo_chunksize() be absolutely	 * bullet-proof.  (That is, it must not be able to be fooled by	 * malicious DIF.)  Given the lack of backwards branches in DIF,	 * solving this would presumably not amount to solving the Halting	 * Problem -- but it still seems awfully hard.	 */	if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +	    ksize + dsize > chunksize) {		dcpu->dtdsc_drops++;		return (NULL);	}	nstate = DTRACE_DSTATE_EMPTY;	do {retry:		free = dcpu->dtdsc_free;		if (free == NULL) {			dtrace_dynvar_t *clean = dcpu->dtdsc_clean;			void *rval;			if (clean == NULL) {				/*				 * We're out of dynamic variable space on				 * this CPU.  Unless we have tried all CPUs,				 * we'll try to allocate from a different				 * CPU.				 */				switch (dstate->dtds_state) {				case DTRACE_DSTATE_CLEAN: {					void *sp = &dstate->dtds_state;					if (++cpu >= NCPU)						cpu = 0;					if (dcpu->dtdsc_dirty != NULL &&					    nstate == DTRACE_DSTATE_EMPTY)						nstate = DTRACE_DSTATE_DIRTY;					if (dcpu->dtdsc_rinsing != NULL)						nstate = DTRACE_DSTATE_RINSING;					dcpu = &dstate->dtds_percpu[cpu];					if (cpu != me)						goto retry;					(void) dtrace_cas32(sp,					    DTRACE_DSTATE_CLEAN, nstate);					/*					 * To increment the correct bean					 * counter, take another lap.					 */					goto retry;				}				case DTRACE_DSTATE_DIRTY:					dcpu->dtdsc_dirty_drops++;					break;				case DTRACE_DSTATE_RINSING:					dcpu->dtdsc_rinsing_drops++;					break;				case DTRACE_DSTATE_EMPTY:					dcpu->dtdsc_drops++;					break;				}				DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);				return (NULL);			}			/*			 * The clean list appears to be non-empty.  We want to			 * move the clean list to the free list; we start by			 * moving the clean pointer aside.			 */			if (dtrace_casptr(&dcpu->dtdsc_clean,			    clean, NULL) != clean) {				/*				 * We are in one of two situations:				 *				 *  (a)	The clean list was switched to the				 *	free list by another CPU.				 *				 *  (b)	The clean list was added to by the				 *	cleansing cyclic.				 *				 * In either of these situations, we can				 * just reattempt the free list allocation.				 */				goto retry;			}			ASSERT(clean->dtdv_hashval == 0);			/*			 * Now we'll move the clean list to the free list.			 * It's impossible for this to fail:  the only way			 * the free list can be updated is through this			 * code path, and only one CPU can own the clean list.			 * Thus, it would only be possible for this to fail if			 * this code were racing with dtrace_dynvar_clean().			 * (That is, if dtrace_dynvar_clean() updated the clean			 * list, and we ended up racing to update the free			 * list.)  This race is prevented by the dtrace_sync()			 * in dtrace_dynvar_clean() -- which flushes the			 * owners of the clean lists out before resetting			 * the clean lists.			 */			rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);			ASSERT(rval == NULL);			goto retry;		}		dvar = free;		new_free = dvar->dtdv_next;	} while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);	/*	 * We have now allocated a new chunk.  We copy the tuple keys into the	 * tuple array and copy any referenced key data into the data space	 * following the tuple array.  As we do this, we relocate dttk_value	 * in the final tuple to point to the key data address in the chunk.	 */	kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];	dvar->dtdv_data = (void *)(kdata + ksize);	dvar->dtdv_tuple.dtt_nkeys = nkeys;	for (i = 0; i < nkeys; i++) {		dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];		size_t kesize = key[i].dttk_size;		if (kesize != 0) {			dtrace_bcopy(			    (const void *)(uintptr_t)key[i].dttk_value,			    (void *)kdata, kesize);			dkey->dttk_value = kdata;			kdata += P2ROUNDUP(kesize, sizeof (uint64_t));		} else {			dkey->dttk_value = key[i].dttk_value;		}		dkey->dttk_size = kesize;	}	ASSERT(dvar->dtdv_hashval == 0);	dvar->dtdv_hashval = hashval;	dvar->dtdv_next = start;	if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)		return (dvar);	/*	 * The cas has failed.  Either another CPU is adding an element to	 * this hash chain, or another CPU is deleting an element from this	 * hash chain.  The simplest way to deal with both of these cases	 * (though not necessarily the most efficient) is to free our	 * allocated block and tail-call ourselves.  Note that the free is	 * to the dirty list and _not_ to the free list.  This is to prevent	 * races with allocators, above.	 */	dvar->dtdv_hashval = 0;	dtrace_membar_producer();	do {		free = dcpu->dtdsc_dirty;		dvar->dtdv_next = free;	} while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);	return (dtrace_dynvar(dstate, nkeys, key, dsize, op));}static voiddtrace_aggregate_min(uint64_t *oval, uint64_t nval){	if (nval < *oval)		*oval = nval;}static voiddtrace_aggregate_max(uint64_t *oval, uint64_t nval){	if (nval > *oval)		*oval = nval;}static voiddtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval){	int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;	int64_t val = (int64_t)nval;	if (val < 0) {		for (i = 0; i < zero; i++) {			if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {				quanta[i]++;				return;			}		}	} else {		for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {			if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {				quanta[i - 1]++;				return;			}		}		quanta[DTRACE_QUANTIZE_NBUCKETS - 1]++;		return;	}	ASSERT(0);}static voiddtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval){	uint64_t arg = *lquanta++;	int32_t base = DTRACE_LQUANTIZE_BASE(arg);	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);	int32_t val = (int32_t)nval, level;	ASSERT(step != 0);	ASSERT(levels != 0);	if (val < base) {		/*		 * This is an underflow.		 */		lquanta[0]++;		return;	}	level = (val - base) / step;	if (level < levels) {		lquanta[level + 1]++;		return;	}	/*	 * This is an overflow.	 */	lquanta[levels + 1]++;}static voiddtrace_aggregate_avg(uint64_t *data, uint64_t nval){	data[0]++;	data[1] += nval;}/*ARGSUSED*/static voiddtrace_aggregate_count(uint64_t *oval, uint64_t nval){	*oval = *oval + 1;}/*ARGSUSED*/static voiddtrace_aggregate_sum(uint64_t *oval, uint64_t nval){	*oval += nval;}/* * Aggregate given the tuple in the principal data buffer, and the aggregating * action denoted by the specified dtrace_aggregation_t.  The aggregation * buffer is specified as the buf parameter.  This routine does not return

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -