⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 slab.c

📁 ARM 嵌入式 系统 设计与实例开发 实验教材 二源码
💻 C
📖 第 1 页 / 共 4 页
字号:
	 * The pages have been unlinked from their cache-slab,	 * but their 'struct page's might be accessed in	 * vm_scan(). Shouldn't be a worry.	 */	while (i--) {		PageClearSlab(page);		page++;	}	free_pages((unsigned long)addr, cachep->gfporder);}#if DEBUGstatic inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr){	int size = cachep->objsize;	if (cachep->flags & SLAB_RED_ZONE) {		addr += BYTES_PER_WORD;		size -= 2*BYTES_PER_WORD;	}	memset(addr, POISON_BYTE, size);	*(unsigned char *)(addr+size-1) = POISON_END;}static inline int kmem_check_poison_obj (kmem_cache_t *cachep, void *addr){	int size = cachep->objsize;	void *end;	if (cachep->flags & SLAB_RED_ZONE) {		addr += BYTES_PER_WORD;		size -= 2*BYTES_PER_WORD;	}	end = memchr(addr, POISON_END, size);	if (end != (addr+size-1))		return 1;	return 0;}#endif/* Destroy all the objs in a slab, and release the mem back to the system. * Before calling the slab must have been unlinked from the cache. * The cache-lock is not held/needed. */static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp){	if (cachep->dtor#if DEBUG		|| cachep->flags & (SLAB_POISON | SLAB_RED_ZONE)#endif	) {		int i;		for (i = 0; i < cachep->num; i++) {			void* objp = slabp->s_mem+cachep->objsize*i;#if DEBUG			if (cachep->flags & SLAB_RED_ZONE) {				if (*((unsigned long*)(objp)) != RED_MAGIC1)					BUG();				if (*((unsigned long*)(objp + cachep->objsize						-BYTES_PER_WORD)) != RED_MAGIC1)					BUG();				objp += BYTES_PER_WORD;			}#endif			if (cachep->dtor)				(cachep->dtor)(objp, cachep, 0);#if DEBUG			if (cachep->flags & SLAB_RED_ZONE) {				objp -= BYTES_PER_WORD;			}				if ((cachep->flags & SLAB_POISON)  &&				kmem_check_poison_obj(cachep, objp))				BUG();#endif		}	}	kmem_freepages(cachep, slabp->s_mem-slabp->colouroff);	if (OFF_SLAB(cachep))		kmem_cache_free(cachep->slabp_cache, slabp);}/** * kmem_cache_create - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @offset: The offset to use within the page. * @flags: SLAB flags * @ctor: A constructor for the objects. * @dtor: A destructor for the objects. * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. * The @ctor is run when new pages are allocated by the cache * and the @dtor is run before the pages are handed back. * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * to catch references to uninitialised memory. * * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check * for buffer overruns. * * %SLAB_NO_REAP - Don't automatically reap this cache when we're under * memory pressure. * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline.  This can be beneficial if you're counting cycles as closely * as davem. */kmem_cache_t *kmem_cache_create (const char *name, size_t size, size_t offset,	unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),	void (*dtor)(void*, kmem_cache_t *, unsigned long)){	const char *func_nm = KERN_ERR "kmem_create: ";	size_t left_over, align, slab_size;	kmem_cache_t *cachep = NULL;	/*	 * Sanity checks... these are all serious usage bugs.	 */	if ((!name) ||		((strlen(name) >= CACHE_NAMELEN - 1)) ||		in_interrupt() ||		(size < BYTES_PER_WORD) ||		(size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||		(dtor && !ctor) ||		(offset < 0 || offset > size))			BUG();#if DEBUG	if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {		/* No constructor, but inital state check requested */		printk("%sNo con, but init state check requested - %s\n", func_nm, name);		flags &= ~SLAB_DEBUG_INITIAL;	}	if ((flags & SLAB_POISON) && ctor) {		/* request for poisoning, but we can't do that with a constructor */		printk("%sPoisoning requested, but con given - %s\n", func_nm, name);		flags &= ~SLAB_POISON;	}#if FORCED_DEBUG	if ((size < (PAGE_SIZE>>3)) && !(flags & SLAB_MUST_HWCACHE_ALIGN))		/*		 * do not red zone large object, causes severe		 * fragmentation.		 */		flags |= SLAB_RED_ZONE;	if (!ctor)		flags |= SLAB_POISON;#endif#endif	/*	 * Always checks flags, a caller might be expecting debug	 * support which isn't available.	 */	if (flags & ~CREATE_MASK)		BUG();	/* Get cache's description obj. */	cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);	if (!cachep)		goto opps;	memset(cachep, 0, sizeof(kmem_cache_t));	/* Check that size is in terms of words.  This is needed to avoid	 * unaligned accesses for some archs when redzoning is used, and makes	 * sure any on-slab bufctl's are also correctly aligned.	 */	if (size & (BYTES_PER_WORD-1)) {		size += (BYTES_PER_WORD-1);		size &= ~(BYTES_PER_WORD-1);		printk("%sForcing size word alignment - %s\n", func_nm, name);	}	#if DEBUG	if (flags & SLAB_RED_ZONE) {		/*		 * There is no point trying to honour cache alignment		 * when redzoning.		 */		flags &= ~SLAB_HWCACHE_ALIGN;		size += 2*BYTES_PER_WORD;	/* words for redzone */	}#endif	align = BYTES_PER_WORD;	if (flags & SLAB_HWCACHE_ALIGN)		align = L1_CACHE_BYTES;	/* Determine if the slab management is 'on' or 'off' slab. */	if (size >= (PAGE_SIZE>>3))		/*		 * Size is large, assume best to place the slab management obj		 * off-slab (should allow better packing of objs).		 */		flags |= CFLGS_OFF_SLAB;	if (flags & SLAB_HWCACHE_ALIGN) {		/* Need to adjust size so that objs are cache aligned. */		/* Small obj size, can get at least two per cache line. */		/* FIXME: only power of 2 supported, was better */		while (size < align/2)			align /= 2;		size = (size+align-1)&(~(align-1));	}	/* Cal size (in pages) of slabs, and the num of objs per slab.	 * This could be made much more intelligent.  For now, try to avoid	 * using high page-orders for slabs.  When the gfp() funcs are more	 * friendly towards high-order requests, this should be changed.	 */	do {		unsigned int break_flag = 0;cal_wastage:		kmem_cache_estimate(cachep->gfporder, size, flags,						&left_over, &cachep->num);		if (break_flag)			break;		if (cachep->gfporder >= MAX_GFP_ORDER)			break;		if (!cachep->num)			goto next;		if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) {			/* Oops, this num of objs will cause problems. */			cachep->gfporder--;			break_flag++;			goto cal_wastage;		}		/*		 * Large num of objs is good, but v. large slabs are currently		 * bad for the gfp()s.		 */		if (cachep->gfporder >= slab_break_gfp_order)			break;		if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))			break;	/* Acceptable internal fragmentation. */next:		cachep->gfporder++;	} while (1);	if (!cachep->num) {		printk("kmem_cache_create: couldn't create cache %s.\n", name);		kmem_cache_free(&cache_cache, cachep);		cachep = NULL;		goto opps;	}	slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(kmem_bufctl_t)+sizeof(slab_t));	/*	 * If the slab has been placed off-slab, and we have enough space then	 * move it on-slab. This is at the expense of any extra colouring.	 */	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {		flags &= ~CFLGS_OFF_SLAB;		left_over -= slab_size;	}	/* Offset must be a multiple of the alignment. */	offset += (align-1);	offset &= ~(align-1);	if (!offset)		offset = L1_CACHE_BYTES;	cachep->colour_off = offset;	cachep->colour = left_over/offset;	/* init remaining fields */	if (!cachep->gfporder && !(flags & CFLGS_OFF_SLAB))		flags |= CFLGS_OPTIMIZE;	cachep->flags = flags;	cachep->gfpflags = 0;	if (flags & SLAB_CACHE_DMA)		cachep->gfpflags |= GFP_DMA;	spin_lock_init(&cachep->spinlock);	cachep->objsize = size;	INIT_LIST_HEAD(&cachep->slabs_full);	INIT_LIST_HEAD(&cachep->slabs_partial);	INIT_LIST_HEAD(&cachep->slabs_free);	if (flags & CFLGS_OFF_SLAB)		cachep->slabp_cache = kmem_find_general_cachep(slab_size,0);	cachep->ctor = ctor;	cachep->dtor = dtor;	/* Copy name over so we don't have problems with unloaded modules */	strcpy(cachep->name, name);#ifdef CONFIG_SMP	if (g_cpucache_up)		enable_cpucache(cachep);#endif	/* Need the semaphore to access the chain. */	down(&cache_chain_sem);	{		struct list_head *p;		list_for_each(p, &cache_chain) {			kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);			/* The name field is constant - no lock needed. */			if (!strcmp(pc->name, name))				BUG();		}	}	/* There is no reason to lock our new cache before we	 * link it in - no one knows about it yet...	 */	list_add(&cachep->next, &cache_chain);	up(&cache_chain_sem);opps:	return cachep;}#if DEBUG/* * This check if the kmem_cache_t pointer is chained in the cache_cache * list. -arca */static int is_chained_kmem_cache(kmem_cache_t * cachep){	struct list_head *p;	int ret = 0;	/* Find the cache in the chain of caches. */	down(&cache_chain_sem);	list_for_each(p, &cache_chain) {		if (p == &cachep->next) {			ret = 1;			break;		}	}	up(&cache_chain_sem);	return ret;}#else#define is_chained_kmem_cache(x) 1#endif#ifdef CONFIG_SMP/* * Waits for all CPUs to execute func(). */static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg){	local_irq_disable();	func(arg);	local_irq_enable();	if (smp_call_function(func, arg, 1, 1))		BUG();}typedef struct ccupdate_struct_s{	kmem_cache_t *cachep;	cpucache_t *new[NR_CPUS];} ccupdate_struct_t;static void do_ccupdate_local(void *info){	ccupdate_struct_t *new = (ccupdate_struct_t *)info;	cpucache_t *old = cc_data(new->cachep);		cc_data(new->cachep) = new->new[smp_processor_id()];	new->new[smp_processor_id()] = old;}static void free_block (kmem_cache_t* cachep, void** objpp, int len);static void drain_cpu_caches(kmem_cache_t *cachep){	ccupdate_struct_t new;	int i;	memset(&new.new,0,sizeof(new.new));	new.cachep = cachep;	down(&cache_chain_sem);	smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);	for (i = 0; i < smp_num_cpus; i++) {		cpucache_t* ccold = new.new[cpu_logical_map(i)];		if (!ccold || (ccold->avail == 0))			continue;		local_irq_disable();		free_block(cachep, cc_entry(ccold), ccold->avail);		local_irq_enable();		ccold->avail = 0;	}	smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);	up(&cache_chain_sem);}#else#define drain_cpu_caches(cachep)	do { } while (0)#endifstatic int __kmem_cache_shrink(kmem_cache_t *cachep){	slab_t *slabp;	int ret;	drain_cpu_caches(cachep);	spin_lock_irq(&cachep->spinlock);	/* If the cache is growing, stop shrinking. */	while (!cachep->growing) {		struct list_head *p;		p = cachep->slabs_free.prev;		if (p == &cachep->slabs_free)			break;		slabp = list_entry(cachep->slabs_free.prev, slab_t, list);#if DEBUG		if (slabp->inuse)			BUG();#endif		list_del(&slabp->list);		spin_unlock_irq(&cachep->spinlock);		kmem_slab_destroy(cachep, slabp);		spin_lock_irq(&cachep->spinlock);	}	ret = !list_empty(&cachep->slabs_full) || !list_empty(&cachep->slabs_partial);	spin_unlock_irq(&cachep->spinlock);	return ret;}/** * kmem_cache_shrink - Shrink a cache. * @cachep: The cache to shrink. * * Releases as many slabs as possible for a cache. * To help debugging, a zero exit status indicates all slabs were released. */int kmem_cache_shrink(kmem_cache_t *cachep){	if (!cachep || in_interrupt() || !is_chained_kmem_cache(cachep))		BUG();	return __kmem_cache_shrink(cachep);}/** * kmem_cache_destroy - delete a cache * @cachep: the cache to destroy * * Remove a kmem_cache_t object from the slab cache. * Returns 0 on success. * * It is expected this function will be called by a module when it is * unloaded.  This will remove the cache completely, and avoid a duplicate * cache being allocated each time a module is loaded and unloaded, if the * module doesn't have persistent in-kernel storage across loads and unloads. * * The caller must guarantee that noone will allocate memory from the cache * during the kmem_cache_destroy(). */int kmem_cache_destroy (kmem_cache_t * cachep){	if (!cachep || in_interrupt() || cachep->growing)		BUG();	/* Find the cache in the chain of caches. */	down(&cache_chain_sem);	/* the chain is never empty, cache_cache is never destroyed */	if (clock_searchp == cachep)		clock_searchp = list_entry(cachep->next.next,						kmem_cache_t, next);	list_del(&cachep->next);	up(&cache_chain_sem);	if (__kmem_cache_shrink(cachep)) {		printk(KERN_ERR "kmem_cache_destroy: Can't free all objects %p\n",		       cachep);		down(&cache_chain_sem);		list_add(&cachep->next,&cache_chain);		up(&cache_chain_sem);		return 1;	}#ifdef CONFIG_SMP	{		int i;		for (i = 0; i < NR_CPUS; i++)			kfree(cachep->cpudata[i]);	}#endif	kmem_cache_free(&cache_cache, cachep);	return 0;}/* Get the memory for a slab management obj. */static inline slab_t * kmem_cache_slabmgmt (kmem_cache_t *cachep,			void *objp, int colour_off, int local_flags){	slab_t *slabp;		if (OFF_SLAB(cachep)) {		/* Slab management obj is off-slab. */		slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);		if (!slabp)			return NULL;	} else {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -