⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 obmalloc.c

📁 python s60 1.4.5版本的源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
to get the correct result:  AO.address is 0 in this case, so the macro
correctly reports that P is not controlled by obmalloc (despite that P lies in
slice AO.address : AO.address + ARENA_SIZE).

Note:  The third (AO.address != 0) clause was added in Python 2.5.  Before
2.5, arenas were never free()'ed, and an arenaindex < maxarena always
corresponded to a currently-allocated arena, so the "P is not controlled by
obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
was impossible.

Note that the logic is excruciating, and reading up possibly uninitialized
memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
creates problems for some memory debuggers.  The overwhelming advantage is
that this test determines whether an arbitrary address is controlled by
obmalloc in a small constant time, independent of the number of arenas
obmalloc controls.  Since this test is needed at every entry point, it's
extremely desirable that it be this fast.
*/
#define Py_ADDRESS_IN_RANGE(P, POOL)			\
	((POOL)->arenaindex < maxarenas &&		\
	 (uptr)(P) - arenas[(POOL)->arenaindex].address < (uptr)ARENA_SIZE && \
	 arenas[(POOL)->arenaindex].address != 0)


/* This is only useful when running memory debuggers such as
 * Purify or Valgrind.  Uncomment to use.
 *
#define Py_USING_MEMORY_DEBUGGER
 */

#ifdef Py_USING_MEMORY_DEBUGGER

/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
 * This leads to thousands of spurious warnings when using
 * Purify or Valgrind.  By making a function, we can easily
 * suppress the uninitialized memory reads in this one function.
 * So we won't ignore real errors elsewhere.
 *
 * Disable the macro and use a function.
 */

#undef Py_ADDRESS_IN_RANGE

#if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \
			  (__GNUC__ >= 4))
#define Py_NO_INLINE __attribute__((__noinline__))
#else
#define Py_NO_INLINE
#endif

/* Don't make static, to try to ensure this isn't inlined. */
int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
#undef Py_NO_INLINE
#endif

/*==========================================================================*/

/* malloc.  Note that nbytes==0 tries to return a non-NULL pointer, distinct
 * from all other currently live pointers.  This may not be possible.
 */

/*
 * The basic blocks are ordered by decreasing execution frequency,
 * which minimizes the number of jumps in the most common cases,
 * improves branching prediction and instruction scheduling (small
 * block allocations typically result in a couple of instructions).
 * Unless the optimizer reorders everything, being too smart...
 */

/*#undef PyObject_Malloc*/
void *
_THIS_MALLOC(size_t nbytes)
{
	block *bp;
	poolp pool;
	poolp next;
	uint size;

	/*
	 * This implicitly redirects malloc(0).
	 */
	if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
		LOCK();
		/*
		 * Most frequent paths first
		 */
		size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
		pool = usedpools[size + size];
		if (pool != pool->nextpool) {
			/*
			 * There is a used pool for this size class.
			 * Pick up the head block of its free list.
			 */
			++pool->ref.count;
			bp = pool->freeblock;
			assert(bp != NULL);
			if ((pool->freeblock = *(block **)bp) != NULL) {
				UNLOCK();
				return (void *)bp;
			}
			/*
			 * Reached the end of the free list, try to extend it.
			 */
			if (pool->nextoffset <= pool->maxnextoffset) {
				/* There is room for another block. */
				pool->freeblock = (block*)pool +
						  pool->nextoffset;
				pool->nextoffset += INDEX2SIZE(size);
				*(block **)(pool->freeblock) = NULL;
				UNLOCK();
				return (void *)bp;
			}
			/* Pool is full, unlink from used pools. */
			next = pool->nextpool;
			pool = pool->prevpool;
			next->prevpool = pool;
			pool->nextpool = next;
			UNLOCK();
			return (void *)bp;
		}

		/* There isn't a pool of the right size class immediately
		 * available:  use a free pool.
		 */
		if (usable_arenas == NULL) {
			/* No arena has a free pool:  allocate a new arena. */
#ifdef WITH_MEMORY_LIMITS
			if (narenas_currently_allocated >= MAX_ARENAS) {
				UNLOCK();
				goto redirect;
			}
#endif
			usable_arenas = new_arena();
			if (usable_arenas == NULL) {
				UNLOCK();
				goto redirect;
			}
			usable_arenas->nextarena =
				usable_arenas->prevarena = NULL;
		}
		assert(usable_arenas->address != 0);

		/* Try to get a cached free pool. */
		pool = usable_arenas->freepools;
		if (pool != NULL) {
			/* Unlink from cached pools. */
			usable_arenas->freepools = pool->nextpool;

			/* This arena already had the smallest nfreepools
			 * value, so decreasing nfreepools doesn't change
			 * that, and we don't need to rearrange the
			 * usable_arenas list.  However, if the arena has
			 * become wholly allocated, we need to remove its
			 * arena_object from usable_arenas.
			 */
			--usable_arenas->nfreepools;
			if (usable_arenas->nfreepools == 0) {
				/* Wholly allocated:  remove. */
				assert(usable_arenas->freepools == NULL);
				assert(usable_arenas->nextarena == NULL ||
				       usable_arenas->nextarena->prevarena ==
					   usable_arenas);

				usable_arenas = usable_arenas->nextarena;
				if (usable_arenas != NULL) {
					usable_arenas->prevarena = NULL;
					assert(usable_arenas->address != 0);
				}
			}
			else {
				/* nfreepools > 0:  it must be that freepools
				 * isn't NULL, or that we haven't yet carved
				 * off all the arena's pools for the first
				 * time.
				 */
				assert(usable_arenas->freepools != NULL ||
				       usable_arenas->pool_address <=
				           (block*)usable_arenas->address +
				               ARENA_SIZE - POOL_SIZE);
			}
		init_pool:
			/* Frontlink to used pools. */
			next = usedpools[size + size]; /* == prev */
			pool->nextpool = next;
			pool->prevpool = next;
			next->nextpool = pool;
			next->prevpool = pool;
			pool->ref.count = 1;
			if (pool->szidx == size) {
				/* Luckily, this pool last contained blocks
				 * of the same size class, so its header
				 * and free list are already initialized.
				 */
				bp = pool->freeblock;
				pool->freeblock = *(block **)bp;
				UNLOCK();
				return (void *)bp;
			}
			/*
			 * Initialize the pool header, set up the free list to
			 * contain just the second block, and return the first
			 * block.
			 */
			pool->szidx = size;
			size = INDEX2SIZE(size);
			bp = (block *)pool + POOL_OVERHEAD;
			pool->nextoffset = POOL_OVERHEAD + (size << 1);
			pool->maxnextoffset = POOL_SIZE - size;
			pool->freeblock = bp + size;
			*(block **)(pool->freeblock) = NULL;
			UNLOCK();
			return (void *)bp;
		}

		/* Carve off a new pool. */
		assert(usable_arenas->nfreepools > 0);
		assert(usable_arenas->freepools == NULL);
		pool = (poolp)usable_arenas->pool_address;
		assert((block*)pool <= (block*)usable_arenas->address +
		                       ARENA_SIZE - POOL_SIZE);
		pool->arenaindex = usable_arenas - arenas;
		assert(&arenas[pool->arenaindex] == usable_arenas);
		pool->szidx = DUMMY_SIZE_IDX;
		usable_arenas->pool_address += POOL_SIZE;
		--usable_arenas->nfreepools;

		if (usable_arenas->nfreepools == 0) {
			assert(usable_arenas->nextarena == NULL ||
			       usable_arenas->nextarena->prevarena ==
			       	   usable_arenas);
			/* Unlink the arena:  it is completely allocated. */
			usable_arenas = usable_arenas->nextarena;
			if (usable_arenas != NULL) {
				usable_arenas->prevarena = NULL;
				assert(usable_arenas->address != 0);
			}
		}

		goto init_pool;
	}

        /* The small block allocator ends here. */

redirect:
	/* Redirect the original request to the underlying (libc) allocator.
	 * We jump here on bigger requests, on error in the code above (as a
	 * last chance to serve the request) or when the max memory limit
	 * has been reached.
	 */
	if (nbytes == 0)
		nbytes = 1;
	return (void *)_SYSTEM_MALLOC(nbytes);
}

/* free */

/*#undef PyObject_Free*/
void
_THIS_FREE(void *p)
{
	poolp pool;
	block *lastfree;
	poolp next, prev;
	uint size;

	if (p == NULL)	/* free(NULL) has no effect */
		return;

	pool = POOL_ADDR(p);
	if (Py_ADDRESS_IN_RANGE(p, pool)) {
		/* We allocated this address. */
		LOCK();
		/* Link p to the start of the pool's freeblock list.  Since
		 * the pool had at least the p block outstanding, the pool
		 * wasn't empty (so it's already in a usedpools[] list, or
		 * was full and is in no list -- it's not in the freeblocks
		 * list in any case).
		 */
		assert(pool->ref.count > 0);	/* else it was empty */
		*(block **)p = lastfree = pool->freeblock;
		pool->freeblock = (block *)p;
		if (lastfree) {
			struct arena_object* ao;
			uint nf;  /* ao->nfreepools */

			/* freeblock wasn't NULL, so the pool wasn't full,
			 * and the pool is in a usedpools[] list.
			 */
			if (--pool->ref.count != 0) {
				/* pool isn't empty:  leave it in usedpools */
				UNLOCK();
				return;
			}
			/* Pool is now empty:  unlink from usedpools, and
			 * link to the front of freepools.  This ensures that
			 * previously freed pools will be allocated later
			 * (being not referenced, they are perhaps paged out).
			 */
			next = pool->nextpool;
			prev = pool->prevpool;
			next->prevpool = prev;
			prev->nextpool = next;

			/* Link the pool to freepools.  This is a singly-linked
			 * list, and pool->prevpool isn't used there.
			 */
			ao = &arenas[pool->arenaindex];
			pool->nextpool = ao->freepools;
			ao->freepools = pool;
			nf = ++ao->nfreepools;

			/* All the rest is arena management.  We just freed
			 * a pool, and there are 4 cases for arena mgmt:
			 * 1. If all the pools are free, return the arena to
			 *    the system free().
			 * 2. If this is the only free pool in the arena,
			 *    add the arena back to the `usable_arenas` list.
			 * 3. If the "next" arena has a smaller count of free
			 *    pools, we have to "slide this arena right" to
			 *    restore that usable_arenas is sorted in order of
			 *    nfreepools.
			 * 4. Else there's nothing more to do.
			 */
			if (nf == ao->ntotalpools) {
				/* Case 1.  First unlink ao from usable_arenas.
				 */
				assert(ao->prevarena == NULL ||
				       ao->prevarena->address != 0);
				assert(ao ->nextarena == NULL ||
				       ao->nextarena->address != 0);

				/* Fix the pointer in the prevarena, or the
				 * usable_arenas pointer.
				 */
				if (ao->prevarena == NULL) {
					usable_arenas = ao->nextarena;
					assert(usable_arenas == NULL ||
					       usable_arenas->address != 0);
				}
				else {
					assert(ao->prevarena->nextarena == ao);
					ao->prevarena->nextarena =
						ao->nextarena;
				}
				/* Fix the pointer in the nextarena. */
				if (ao->nextarena != NULL) {
					assert(ao->nextarena->prevarena == ao);
					ao->nextarena->prevarena =
						ao->prevarena;
				}
				/* Record that this arena_object slot is
				 * available to be reused.
				 */
				ao->nextarena = unused_arena_objects;
				unused_arena_objects = ao;

				/* Free the entire arena. */
#ifdef WITH_DLC
                dlc_free((void *)ao->address);
#else
                _SYSTEM_FREE((void *)ao->address);
#endif
               
				ao->address = 0;	/* mark unassociated */
				--narenas_currently_allocated;

				UNLOCK();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -