⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 slab.c

📁 ARM 嵌入式 系统 设计与实例开发 实验教材 二源码
💻 C
📖 第 1 页 / 共 4 页
字号:
		/* FIXME: change to			slabp = objp		 * if you enable OPTIMIZE		 */		slabp = objp+colour_off;		colour_off += L1_CACHE_ALIGN(cachep->num *				sizeof(kmem_bufctl_t) + sizeof(slab_t));	}	slabp->inuse = 0;	slabp->colouroff = colour_off;	slabp->s_mem = objp+colour_off;	return slabp;}static inline void kmem_cache_init_objs (kmem_cache_t * cachep,			slab_t * slabp, unsigned long ctor_flags){	int i;	for (i = 0; i < cachep->num; i++) {		void* objp = slabp->s_mem+cachep->objsize*i;#if DEBUG		if (cachep->flags & SLAB_RED_ZONE) {			*((unsigned long*)(objp)) = RED_MAGIC1;			*((unsigned long*)(objp + cachep->objsize -					BYTES_PER_WORD)) = RED_MAGIC1;			objp += BYTES_PER_WORD;		}#endif		/*		 * Constructors are not allowed to allocate memory from		 * the same cache which they are a constructor for.		 * Otherwise, deadlock. They must also be threaded.		 */		if (cachep->ctor)			cachep->ctor(objp, cachep, ctor_flags);#if DEBUG		if (cachep->flags & SLAB_RED_ZONE)			objp -= BYTES_PER_WORD;		if (cachep->flags & SLAB_POISON)			/* need to poison the objs */			kmem_poison_obj(cachep, objp);		if (cachep->flags & SLAB_RED_ZONE) {			if (*((unsigned long*)(objp)) != RED_MAGIC1)				BUG();			if (*((unsigned long*)(objp + cachep->objsize -					BYTES_PER_WORD)) != RED_MAGIC1)				BUG();		}#endif		slab_bufctl(slabp)[i] = i+1;	}	slab_bufctl(slabp)[i-1] = BUFCTL_END;	slabp->free = 0;}/* * Grow (by 1) the number of slabs within a cache.  This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */static int kmem_cache_grow (kmem_cache_t * cachep, int flags){	slab_t	*slabp;	struct page	*page;	void		*objp;	size_t		 offset;	unsigned int	 i, local_flags;	unsigned long	 ctor_flags;	unsigned long	 save_flags;	/* Be lazy and only check for valid flags here, 	 * keeping it out of the critical path in kmem_cache_alloc().	 */	if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))		BUG();	if (flags & SLAB_NO_GROW)		return 0;	/*	 * The test for missing atomic flag is performed here, rather than	 * the more obvious place, simply to reduce the critical path length	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they	 * will eventually be caught here (where it matters).	 */	if (in_interrupt() && (flags & SLAB_LEVEL_MASK) != SLAB_ATOMIC)		BUG();	ctor_flags = SLAB_CTOR_CONSTRUCTOR;	local_flags = (flags & SLAB_LEVEL_MASK);	if (local_flags == SLAB_ATOMIC)		/*		 * Not allowed to sleep.  Need to tell a constructor about		 * this - it might need to know...		 */		ctor_flags |= SLAB_CTOR_ATOMIC;	/* About to mess with non-constant members - lock. */	spin_lock_irqsave(&cachep->spinlock, save_flags);	/* Get colour for the slab, and cal the next value. */	offset = cachep->colour_next;	cachep->colour_next++;	if (cachep->colour_next >= cachep->colour)		cachep->colour_next = 0;	offset *= cachep->colour_off;	cachep->dflags |= DFLGS_GROWN;	cachep->growing++;	spin_unlock_irqrestore(&cachep->spinlock, save_flags);	/* A series of memory allocations for a new slab.	 * Neither the cache-chain semaphore, or cache-lock, are	 * held, but the incrementing c_growing prevents this	 * cache from being reaped or shrunk.	 * Note: The cache could be selected in for reaping in	 * kmem_cache_reap(), but when the final test is made the	 * growing value will be seen.	 */	/* Get mem for the objs. */	if (!(objp = kmem_getpages(cachep, flags)))		goto failed;	/* Get slab management. */	if (!(slabp = kmem_cache_slabmgmt(cachep, objp, offset, local_flags)))		goto opps1;	/* Nasty!!!!!! I hope this is OK. */	i = 1 << cachep->gfporder;	page = virt_to_page(objp);	do {		SET_PAGE_CACHE(page, cachep);		SET_PAGE_SLAB(page, slabp);		PageSetSlab(page);		page++;	} while (--i);	kmem_cache_init_objs(cachep, slabp, ctor_flags);	spin_lock_irqsave(&cachep->spinlock, save_flags);	cachep->growing--;	/* Make slab active. */	list_add_tail(&slabp->list, &cachep->slabs_free);	STATS_INC_GROWN(cachep);	cachep->failures = 0;	spin_unlock_irqrestore(&cachep->spinlock, save_flags);	return 1;opps1:	kmem_freepages(cachep, objp);failed:	spin_lock_irqsave(&cachep->spinlock, save_flags);	cachep->growing--;	spin_unlock_irqrestore(&cachep->spinlock, save_flags);	return 0;}/* * Perform extra freeing checks: * - detect double free * - detect bad pointers. * Called with the cache-lock held. */#if DEBUGstatic int kmem_extra_free_checks (kmem_cache_t * cachep,			slab_t *slabp, void * objp){	int i;	unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;	if (objnr >= cachep->num)		BUG();	if (objp != slabp->s_mem + objnr*cachep->objsize)		BUG();	/* Check slab's freelist to see if this obj is there. */	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {		if (i == objnr)			BUG();	}	return 0;}#endifstatic inline void kmem_cache_alloc_head(kmem_cache_t *cachep, int flags){	if (flags & SLAB_DMA) {		if (!(cachep->gfpflags & GFP_DMA))			BUG();	} else {		if (cachep->gfpflags & GFP_DMA)			BUG();	}}static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,						slab_t *slabp){	void *objp;	STATS_INC_ALLOCED(cachep);	STATS_INC_ACTIVE(cachep);	STATS_SET_HIGH(cachep);	/* get obj pointer */	slabp->inuse++;	objp = slabp->s_mem + slabp->free*cachep->objsize;	slabp->free=slab_bufctl(slabp)[slabp->free];	if (unlikely(slabp->free == BUFCTL_END)) {		list_del(&slabp->list);		list_add(&slabp->list, &cachep->slabs_full);	}#if DEBUG	if (cachep->flags & SLAB_POISON)		if (kmem_check_poison_obj(cachep, objp))			BUG();	if (cachep->flags & SLAB_RED_ZONE) {		/* Set alloc red-zone, and check old one. */		if (xchg((unsigned long *)objp, RED_MAGIC2) !=							 RED_MAGIC1)			BUG();		if (xchg((unsigned long *)(objp+cachep->objsize -			  BYTES_PER_WORD), RED_MAGIC2) != RED_MAGIC1)			BUG();		objp += BYTES_PER_WORD;	}#endif	return objp;}/* * Returns a ptr to an obj in the given cache. * caller must guarantee synchronization * #define for the goto optimization 8-) */#define kmem_cache_alloc_one(cachep)				\({								\	struct list_head * slabs_partial, * entry;		\	slab_t *slabp;						\								\	slabs_partial = &(cachep)->slabs_partial;		\	entry = slabs_partial->next;				\	if (unlikely(entry == slabs_partial)) {			\		struct list_head * slabs_free;			\		slabs_free = &(cachep)->slabs_free;		\		entry = slabs_free->next;			\		if (unlikely(entry == slabs_free))		\			goto alloc_new_slab;			\		list_del(entry);				\		list_add(entry, slabs_partial);			\	}							\								\	slabp = list_entry(entry, slab_t, list);		\	kmem_cache_alloc_one_tail(cachep, slabp);		\})#ifdef CONFIG_SMPvoid* kmem_cache_alloc_batch(kmem_cache_t* cachep, cpucache_t* cc, int flags){	int batchcount = cachep->batchcount;	spin_lock(&cachep->spinlock);	while (batchcount--) {		struct list_head * slabs_partial, * entry;		slab_t *slabp;		/* Get slab alloc is to come from. */		slabs_partial = &(cachep)->slabs_partial;		entry = slabs_partial->next;		if (unlikely(entry == slabs_partial)) {			struct list_head * slabs_free;			slabs_free = &(cachep)->slabs_free;			entry = slabs_free->next;			if (unlikely(entry == slabs_free))				break;			list_del(entry);			list_add(entry, slabs_partial);		}		slabp = list_entry(entry, slab_t, list);		cc_entry(cc)[cc->avail++] =				kmem_cache_alloc_one_tail(cachep, slabp);	}	spin_unlock(&cachep->spinlock);	if (cc->avail)		return cc_entry(cc)[--cc->avail];	return NULL;}#endifstatic inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags){	unsigned long save_flags;	void* objp;	kmem_cache_alloc_head(cachep, flags);try_again:	local_irq_save(save_flags);#ifdef CONFIG_SMP	{		cpucache_t *cc = cc_data(cachep);		if (cc) {			if (cc->avail) {				STATS_INC_ALLOCHIT(cachep);				objp = cc_entry(cc)[--cc->avail];			} else {				STATS_INC_ALLOCMISS(cachep);				objp = kmem_cache_alloc_batch(cachep,cc,flags);				if (!objp)					goto alloc_new_slab_nolock;			}		} else {			spin_lock(&cachep->spinlock);			objp = kmem_cache_alloc_one(cachep);			spin_unlock(&cachep->spinlock);		}	}#else	objp = kmem_cache_alloc_one(cachep);#endif	local_irq_restore(save_flags);	return objp;alloc_new_slab:#ifdef CONFIG_SMP	spin_unlock(&cachep->spinlock);alloc_new_slab_nolock:#endif	local_irq_restore(save_flags);	if (kmem_cache_grow(cachep, flags))		/* Someone may have stolen our objs.  Doesn't matter, we'll		 * just come back here again.		 */		goto try_again;	return NULL;}/* * Release an obj back to its cache. If the obj has a constructed * state, it should be in this state _before_ it is released. * - caller is responsible for the synchronization */#if DEBUG# define CHECK_NR(pg)						\	do {							\		if (!VALID_PAGE(pg)) {				\			printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \				(unsigned long)objp);		\			BUG();					\		} \	} while (0)# define CHECK_PAGE(page)					\	do {							\		CHECK_NR(page);					\		if (!PageSlab(page)) {				\			printk(KERN_ERR "kfree: bad ptr %lxh.\n", \				(unsigned long)objp);		\			BUG();					\		}						\	} while (0)#else# define CHECK_PAGE(pg)	do { } while (0)#endifstatic inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp){	slab_t* slabp;	CHECK_PAGE(virt_to_page(objp));	/* reduces memory footprint	 *	if (OPTIMIZE(cachep))		slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));	 else	 */	slabp = GET_PAGE_SLAB(virt_to_page(objp));#if DEBUG	if (cachep->flags & SLAB_DEBUG_INITIAL)		/* Need to call the slab's constructor so the		 * caller can perform a verify of its state (debugging).		 * Called without the cache-lock held.		 */		cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);	if (cachep->flags & SLAB_RED_ZONE) {		objp -= BYTES_PER_WORD;		if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)			/* Either write before start, or a double free. */			BUG();		if (xchg((unsigned long *)(objp+cachep->objsize -				BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)			/* Either write past end, or a double free. */			BUG();	}	if (cachep->flags & SLAB_POISON)		kmem_poison_obj(cachep, objp);	if (kmem_extra_free_checks(cachep, slabp, objp))		return;#endif	{		unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;		slab_bufctl(slabp)[objnr] = slabp->free;		slabp->free = objnr;	}	STATS_DEC_ACTIVE(cachep);		/* fixup slab chains */	{		int inuse = slabp->inuse;		if (unlikely(!--slabp->inuse)) {			/* Was partial or full, now empty. */			list_del(&slabp->list);			list_add(&slabp->list, &cachep->slabs_free);		} else if (unlikely(inuse == cachep->num)) {			/* Was full. */			list_del(&slabp->list);			list_add(&slabp->list, &cachep->slabs_partial);		}	}}#ifdef CONFIG_SMPstatic inline void __free_block (kmem_cache_t* cachep,							void** objpp, int len){	for ( ; len > 0; len--, objpp++)		kmem_cache_free_one(cachep, *objpp);}static void free_block (kmem_cache_t* cachep, void** objpp, int len){	spin_lock(&cachep->spinlock);	__free_block(cachep, objpp, len);	spin_unlock(&cachep->spinlock);}#endif/* * __kmem_cache_free * called with disabled ints */static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp){#ifdef CONFIG_SMP	cpucache_t *cc = cc_data(cachep);	CHECK_PAGE(virt_to_page(objp));	if (cc) {		int batchcount;		if (cc->avail < cc->limit) {			STATS_INC_FREEHIT(cachep);			cc_entry(cc)[cc->avail++] = objp;			return;		}		STATS_INC_FREEMISS(cachep);		batchcount = cachep->batchcount;		cc->avail -= batchcount;		free_block(cachep,					&cc_entry(cc)[cc->avail],batchcount);		cc_entry(cc)[cc->avail++] = objp;		return;	} else {		free_block(cachep, &objp, 1);	}#else	kmem_cache_free_one(cachep, objp);#endif}/** * kmem_cache_alloc - Allocate an object * @cachep: The cache to allocate from. * @flags: See kmalloc(). * * Allocate an object from this cache.  The flags are only relevant * if the cache has no available objects. */void * kmem_cache_alloc (kmem_cache_t *cachep, int flags){	return __kmem_cache_alloc(cachep, flags);}/** * kmalloc - allocate memory * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. * * kmalloc is the normal method of allocating memory * in the kernel. * * The @flags argument may be one of: * * %GFP_USER - Allocate memory on behalf of user.  May sleep. * * %GFP_KERNEL - Allocate normal kernel ram.  May sleep. * * %GFP_ATOMIC - Allocation will not sleep.  Use inside interrupt handlers. * * Additionally, the %GFP_DMA flag may be set to indicate the memory * must be suitable for DMA.  This can mean different things on different * platforms.  For example, on i386, it means that the memory must come * from the first 16MB.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -