📄 tclthreadalloc.c
字号:
*/ new = TclpAlloc(reqsize); if (new != NULL) { if (reqsize > blockPtr->b_reqsize) { reqsize = blockPtr->b_reqsize; } memcpy(new, ptr, reqsize); TclpFree(ptr); } return new;}/* *---------------------------------------------------------------------- * * TclThreadAllocObj -- * * Allocate a Tcl_Obj from the per-thread cache. * * Results: * Pointer to uninitialized Tcl_Obj. * * Side effects: * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's * if list is empty. * *---------------------------------------------------------------------- */Tcl_Obj *TclThreadAllocObj(void){ register Cache *cachePtr = TclpGetAllocCache(); register int nmove; register Tcl_Obj *objPtr; Tcl_Obj *newObjsPtr; if (cachePtr == NULL) { cachePtr = GetCache(); } /* * Get this thread's obj list structure and move * or allocate new objs if necessary. */ if (cachePtr->nobjs == 0) { Tcl_MutexLock(objLockPtr); nmove = sharedPtr->nobjs; if (nmove > 0) { if (nmove > NOBJALLOC) { nmove = NOBJALLOC; } MoveObjs(sharedPtr, cachePtr, nmove); } Tcl_MutexUnlock(objLockPtr); if (cachePtr->nobjs == 0) { cachePtr->nobjs = nmove = NOBJALLOC; newObjsPtr = malloc(sizeof(Tcl_Obj) * nmove); if (newObjsPtr == NULL) { panic("alloc: could not allocate %d new objects", nmove); } while (--nmove >= 0) { objPtr = &newObjsPtr[nmove]; objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; cachePtr->firstObjPtr = objPtr; } } } /* * Pop the first object. */ objPtr = cachePtr->firstObjPtr; cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr; --cachePtr->nobjs; return objPtr;}/* *---------------------------------------------------------------------- * * TclThreadFreeObj -- * * Return a free Tcl_Obj to the per-thread cache. * * Results: * None. * * Side effects: * May move free Tcl_Obj's to shared list upon hitting high * water mark. * *---------------------------------------------------------------------- */voidTclThreadFreeObj(Tcl_Obj *objPtr){ Cache *cachePtr = TclpGetAllocCache(); if (cachePtr == NULL) { cachePtr = GetCache(); } /* * Get this thread's list and push on the free Tcl_Obj. */ objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; cachePtr->firstObjPtr = objPtr; ++cachePtr->nobjs; /* * If the number of free objects has exceeded the high * water mark, move some blocks to the shared list. */ if (cachePtr->nobjs > NOBJHIGH) { Tcl_MutexLock(objLockPtr); MoveObjs(cachePtr, sharedPtr, NOBJALLOC); Tcl_MutexUnlock(objLockPtr); }}/* *---------------------------------------------------------------------- * * Tcl_GetMemoryInfo -- * * Return a list-of-lists of memory stats. * * Results: * None. * * Side effects: * List appended to given dstring. * *---------------------------------------------------------------------- */voidTcl_GetMemoryInfo(Tcl_DString *dsPtr){ Cache *cachePtr; char buf[200]; int n; Tcl_MutexLock(listLockPtr); cachePtr = firstCachePtr; while (cachePtr != NULL) { Tcl_DStringStartSublist(dsPtr); if (cachePtr == sharedPtr) { Tcl_DStringAppendElement(dsPtr, "shared"); } else { sprintf(buf, "thread%d", (int) cachePtr->owner); Tcl_DStringAppendElement(dsPtr, buf); } for (n = 0; n < NBUCKETS; ++n) { sprintf(buf, "%d %d %d %d %d %d %d", (int) binfo[n].blocksize, cachePtr->buckets[n].nfree, cachePtr->buckets[n].nget, cachePtr->buckets[n].nput, cachePtr->buckets[n].nrequest, cachePtr->buckets[n].nlock, cachePtr->buckets[n].nwait); Tcl_DStringAppendElement(dsPtr, buf); } Tcl_DStringEndSublist(dsPtr); cachePtr = cachePtr->nextPtr; } Tcl_MutexUnlock(listLockPtr);}/* *---------------------------------------------------------------------- * * MoveObjs -- * * Move Tcl_Obj's between caches. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */static voidMoveObjs(Cache *fromPtr, Cache *toPtr, int nmove){ register Tcl_Obj *objPtr = fromPtr->firstObjPtr; Tcl_Obj *fromFirstObjPtr = objPtr; toPtr->nobjs += nmove; fromPtr->nobjs -= nmove; /* * Find the last object to be moved; set the next one * (the first one not to be moved) as the first object * in the 'from' cache. */ while (--nmove) { objPtr = objPtr->internalRep.otherValuePtr; } fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr; /* * Move all objects as a block - they are already linked to * each other, we just have to update the first and last. */ objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr; toPtr->firstObjPtr = fromFirstObjPtr;}/* *---------------------------------------------------------------------- * * Block2Ptr, Ptr2Block -- * * Convert between internal blocks and user pointers. * * Results: * User pointer or internal block. * * Side effects: * Invalid blocks will abort the server. * *---------------------------------------------------------------------- */static char *Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize) { register void *ptr; blockPtr->b_magic1 = blockPtr->b_magic2 = MAGIC; blockPtr->b_bucket = bucket; blockPtr->b_reqsize = reqsize; ptr = ((void *) (blockPtr + 1));#if RCHECK ((unsigned char *)(ptr))[reqsize] = MAGIC;#endif return (char *) ptr;}static Block *Ptr2Block(char *ptr){ register Block *blockPtr; blockPtr = (((Block *) ptr) - 1); if (blockPtr->b_magic1 != MAGIC#if RCHECK || ((unsigned char *) ptr)[blockPtr->b_reqsize] != MAGIC#endif || blockPtr->b_magic2 != MAGIC) { panic("alloc: invalid block: %p: %x %x %x\n", blockPtr, blockPtr->b_magic1, blockPtr->b_magic2, ((unsigned char *) ptr)[blockPtr->b_reqsize]); } return blockPtr;}/* *---------------------------------------------------------------------- * * LockBucket, UnlockBucket -- * * Set/unset the lock to access a bucket in the shared cache. * * Results: * None. * * Side effects: * Lock activity and contention are monitored globally and on * a per-cache basis. * *---------------------------------------------------------------------- */static voidLockBucket(Cache *cachePtr, int bucket){#if 0 if (Tcl_MutexTryLock(binfo[bucket].lockPtr) != TCL_OK) { Tcl_MutexLock(binfo[bucket].lockPtr); ++cachePtr->buckets[bucket].nwait; ++sharedPtr->buckets[bucket].nwait; }#else Tcl_MutexLock(binfo[bucket].lockPtr);#endif ++cachePtr->buckets[bucket].nlock; ++sharedPtr->buckets[bucket].nlock;}static voidUnlockBucket(Cache *cachePtr, int bucket){ Tcl_MutexUnlock(binfo[bucket].lockPtr);}/* *---------------------------------------------------------------------- * * PutBlocks -- * * Return unused blocks to the shared cache. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */static voidPutBlocks(Cache *cachePtr, int bucket, int nmove){ register Block *lastPtr, *firstPtr; register int n = nmove; /* * Before acquiring the lock, walk the block list to find * the last block to be moved. */ firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr; while (--n > 0) { lastPtr = lastPtr->b_next; } cachePtr->buckets[bucket].firstPtr = lastPtr->b_next; cachePtr->buckets[bucket].nfree -= nmove; /* * Aquire the lock and place the list of blocks at the front * of the shared cache bucket. */ LockBucket(cachePtr, bucket); lastPtr->b_next = sharedPtr->buckets[bucket].firstPtr; sharedPtr->buckets[bucket].firstPtr = firstPtr; sharedPtr->buckets[bucket].nfree += nmove; UnlockBucket(cachePtr, bucket);}/* *---------------------------------------------------------------------- * * GetBlocks -- * * Get more blocks for a bucket. * * Results: * 1 if blocks where allocated, 0 otherwise. * * Side effects: * Cache may be filled with available blocks. * *---------------------------------------------------------------------- */static intGetBlocks(Cache *cachePtr, int bucket){ register Block *blockPtr; register int n; register size_t size; /* * First, atttempt to move blocks from the shared cache. Note * the potentially dirty read of nfree before acquiring the lock * which is a slight performance enhancement. The value is * verified after the lock is actually acquired. */ if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].nfree > 0) { LockBucket(cachePtr, bucket); if (sharedPtr->buckets[bucket].nfree > 0) { /* * Either move the entire list or walk the list to find * the last block to move. */ n = binfo[bucket].nmove; if (n >= sharedPtr->buckets[bucket].nfree) { cachePtr->buckets[bucket].firstPtr = sharedPtr->buckets[bucket].firstPtr; cachePtr->buckets[bucket].nfree = sharedPtr->buckets[bucket].nfree; sharedPtr->buckets[bucket].firstPtr = NULL; sharedPtr->buckets[bucket].nfree = 0; } else { blockPtr = sharedPtr->buckets[bucket].firstPtr; cachePtr->buckets[bucket].firstPtr = blockPtr; sharedPtr->buckets[bucket].nfree -= n; cachePtr->buckets[bucket].nfree = n; while (--n > 0) { blockPtr = blockPtr->b_next; } sharedPtr->buckets[bucket].firstPtr = blockPtr->b_next; blockPtr->b_next = NULL; } } UnlockBucket(cachePtr, bucket); } if (cachePtr->buckets[bucket].nfree == 0) { /* * If no blocks could be moved from shared, first look for a * larger block in this cache to split up. */ blockPtr = NULL; n = NBUCKETS; size = 0; /* lint */ while (--n > bucket) { if (cachePtr->buckets[n].nfree > 0) { size = binfo[n].blocksize; blockPtr = cachePtr->buckets[n].firstPtr; cachePtr->buckets[n].firstPtr = blockPtr->b_next; --cachePtr->buckets[n].nfree; break; } } /* * Otherwise, allocate a big new block directly. */ if (blockPtr == NULL) { size = MAXALLOC; blockPtr = malloc(size); if (blockPtr == NULL) { return 0; } } /* * Split the larger block into smaller blocks for this bucket. */ n = size / binfo[bucket].blocksize; cachePtr->buckets[bucket].nfree = n; cachePtr->buckets[bucket].firstPtr = blockPtr; while (--n > 0) { blockPtr->b_next = (Block *) ((char *) blockPtr + binfo[bucket].blocksize); blockPtr = blockPtr->b_next; } blockPtr->b_next = NULL; } return 1;}#endif /* TCL_THREADS */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -