⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 slab.c

📁 ARM 嵌入式 系统 设计与实例开发 实验教材 二源码
💻 C
📖 第 1 页 / 共 4 页
字号:
 */void * kmalloc (size_t size, int flags){	cache_sizes_t *csizep = cache_sizes;	for (; csizep->cs_size; csizep++) {		if (size > csizep->cs_size)			continue;		return __kmem_cache_alloc(flags & GFP_DMA ?			 csizep->cs_dmacachep : csizep->cs_cachep, flags);	}	return NULL;}/** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. * @objp: The previously allocated object. * * Free an object which was previously allocated from this * cache. */void kmem_cache_free (kmem_cache_t *cachep, void *objp){	unsigned long flags;#if DEBUG	CHECK_PAGE(virt_to_page(objp));	if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))		BUG();#endif	local_irq_save(flags);	__kmem_cache_free(cachep, objp);	local_irq_restore(flags);}/** * kfree - free previously allocated memory * @objp: pointer returned by kmalloc. * * Don't free memory not originally allocated by kmalloc() * or you will run into trouble. */void kfree (const void *objp){	kmem_cache_t *c;	unsigned long flags;	if (!objp)		return;	local_irq_save(flags);	CHECK_PAGE(virt_to_page(objp));	c = GET_PAGE_CACHE(virt_to_page(objp));	__kmem_cache_free(c, (void*)objp);	local_irq_restore(flags);}kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags){	cache_sizes_t *csizep = cache_sizes;	/* This function could be moved to the header file, and	 * made inline so consumers can quickly determine what	 * cache pointer they require.	 */	for ( ; csizep->cs_size; csizep++) {		if (size > csizep->cs_size)			continue;		break;	}	return (gfpflags & GFP_DMA) ? csizep->cs_dmacachep : csizep->cs_cachep;}#ifdef CONFIG_SMP/* called with cache_chain_sem acquired.  */static int kmem_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount){	ccupdate_struct_t new;	int i;	/*	 * These are admin-provided, so we are more graceful.	 */	if (limit < 0)		return -EINVAL;	if (batchcount < 0)		return -EINVAL;	if (batchcount > limit)		return -EINVAL;	if (limit != 0 && !batchcount)		return -EINVAL;	memset(&new.new,0,sizeof(new.new));	if (limit) {		for (i = 0; i< smp_num_cpus; i++) {			cpucache_t* ccnew;			ccnew = kmalloc(sizeof(void*)*limit+					sizeof(cpucache_t), GFP_KERNEL);			if (!ccnew)				goto oom;			ccnew->limit = limit;			ccnew->avail = 0;			new.new[cpu_logical_map(i)] = ccnew;		}	}	new.cachep = cachep;	spin_lock_irq(&cachep->spinlock);	cachep->batchcount = batchcount;	spin_unlock_irq(&cachep->spinlock);	smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);	for (i = 0; i < smp_num_cpus; i++) {		cpucache_t* ccold = new.new[cpu_logical_map(i)];		if (!ccold)			continue;		local_irq_disable();		free_block(cachep, cc_entry(ccold), ccold->avail);		local_irq_enable();		kfree(ccold);	}	return 0;oom:	for (i--; i >= 0; i--)		kfree(new.new[cpu_logical_map(i)]);	return -ENOMEM;}static void enable_cpucache (kmem_cache_t *cachep){	int err;	int limit;	/* FIXME: optimize */	if (cachep->objsize > PAGE_SIZE)		return;	if (cachep->objsize > 1024)		limit = 60;	else if (cachep->objsize > 256)		limit = 124;	else		limit = 252;	err = kmem_tune_cpucache(cachep, limit, limit/2);	if (err)		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",					cachep->name, -err);}static void enable_all_cpucaches (void){	struct list_head* p;	down(&cache_chain_sem);	p = &cache_cache.next;	do {		kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);		enable_cpucache(cachep);		p = cachep->next.next;	} while (p != &cache_cache.next);	up(&cache_chain_sem);}#endif/** * kmem_cache_reap - Reclaim memory from caches. * @gfp_mask: the type of memory required. * * Called from do_try_to_free_pages() and __alloc_pages() */int kmem_cache_reap (int gfp_mask){	slab_t *slabp;	kmem_cache_t *searchp;	kmem_cache_t *best_cachep;	unsigned int best_pages;	unsigned int best_len;	unsigned int scan;	int ret = 0;	if (gfp_mask & __GFP_WAIT)		down(&cache_chain_sem);	else		if (down_trylock(&cache_chain_sem))			return 0;	scan = REAP_SCANLEN;	best_len = 0;	best_pages = 0;	best_cachep = NULL;	searchp = clock_searchp;	do {		unsigned int pages;		struct list_head* p;		unsigned int full_free;		/* It's safe to test this without holding the cache-lock. */		if (searchp->flags & SLAB_NO_REAP)			goto next;		spin_lock_irq(&searchp->spinlock);		if (searchp->growing)			goto next_unlock;		if (searchp->dflags & DFLGS_GROWN) {			searchp->dflags &= ~DFLGS_GROWN;			goto next_unlock;		}#ifdef CONFIG_SMP		{			cpucache_t *cc = cc_data(searchp);			if (cc && cc->avail) {				__free_block(searchp, cc_entry(cc), cc->avail);				cc->avail = 0;			}		}#endif		full_free = 0;		p = searchp->slabs_free.next;		while (p != &searchp->slabs_free) {			slabp = list_entry(p, slab_t, list);#if DEBUG			if (slabp->inuse)				BUG();#endif			full_free++;			p = p->next;		}		/*		 * Try to avoid slabs with constructors and/or		 * more than one page per slab (as it can be difficult		 * to get high orders from gfp()).		 */		pages = full_free * (1<<searchp->gfporder);		if (searchp->ctor)			pages = (pages*4+1)/5;		if (searchp->gfporder)			pages = (pages*4+1)/5;		if (pages > best_pages) {			best_cachep = searchp;			best_len = full_free;			best_pages = pages;			if (pages >= REAP_PERFECT) {				clock_searchp = list_entry(searchp->next.next,							kmem_cache_t,next);				goto perfect;			}		}next_unlock:		spin_unlock_irq(&searchp->spinlock);next:		searchp = list_entry(searchp->next.next,kmem_cache_t,next);	} while (--scan && searchp != clock_searchp);	clock_searchp = searchp;	if (!best_cachep)		/* couldn't find anything to reap */		goto out;	spin_lock_irq(&best_cachep->spinlock);perfect:	/* free only 50% of the free slabs */	best_len = (best_len + 1)/2;	for (scan = 0; scan < best_len; scan++) {		struct list_head *p;		if (best_cachep->growing)			break;		p = best_cachep->slabs_free.prev;		if (p == &best_cachep->slabs_free)			break;		slabp = list_entry(p,slab_t,list);#if DEBUG		if (slabp->inuse)			BUG();#endif		list_del(&slabp->list);		STATS_INC_REAPED(best_cachep);		/* Safe to drop the lock. The slab is no longer linked to the		 * cache.		 */		spin_unlock_irq(&best_cachep->spinlock);		kmem_slab_destroy(best_cachep, slabp);		spin_lock_irq(&best_cachep->spinlock);	}	spin_unlock_irq(&best_cachep->spinlock);	ret = scan * (1 << best_cachep->gfporder);out:	up(&cache_chain_sem);	return ret;}#ifdef CONFIG_PROC_FS/* /proc/slabinfo *	cache-name num-active-objs total-objs *	obj-size num-active-slabs total-slabs *	num-pages-per-slab */#define FIXUP(t)				\	do {					\		if (len <= off) {		\			off -= len;		\			len = 0;		\		} else {			\			if (len-off > count)	\				goto t;		\		}				\	} while (0)static int proc_getdata (char*page, char**start, off_t off, int count){	struct list_head *p;	int len = 0;	/* Output format version, so at least we can change it without _too_	 * many complaints.	 */	len += sprintf(page+len, "slabinfo - version: 1.1"#if STATS				" (statistics)"#endif#ifdef CONFIG_SMP				" (SMP)"#endif				"\n");	FIXUP(got_data);	down(&cache_chain_sem);	p = &cache_cache.next;	do {		kmem_cache_t	*cachep;		struct list_head *q;		slab_t		*slabp;		unsigned long	active_objs;		unsigned long	num_objs;		unsigned long	active_slabs = 0;		unsigned long	num_slabs;		cachep = list_entry(p, kmem_cache_t, next);		spin_lock_irq(&cachep->spinlock);		active_objs = 0;		num_slabs = 0;		list_for_each(q,&cachep->slabs_full) {			slabp = list_entry(q, slab_t, list);			if (slabp->inuse != cachep->num)				BUG();			active_objs += cachep->num;			active_slabs++;		}		list_for_each(q,&cachep->slabs_partial) {			slabp = list_entry(q, slab_t, list);			if (slabp->inuse == cachep->num || !slabp->inuse)				BUG();			active_objs += slabp->inuse;			active_slabs++;		}		list_for_each(q,&cachep->slabs_free) {			slabp = list_entry(q, slab_t, list);			if (slabp->inuse)				BUG();			num_slabs++;		}		num_slabs+=active_slabs;		num_objs = num_slabs*cachep->num;		len += sprintf(page+len, "%-17s %6lu %6lu %6u %4lu %4lu %4u",			cachep->name, active_objs, num_objs, cachep->objsize,			active_slabs, num_slabs, (1<<cachep->gfporder));#if STATS		{			unsigned long errors = cachep->errors;			unsigned long high = cachep->high_mark;			unsigned long grown = cachep->grown;			unsigned long reaped = cachep->reaped;			unsigned long allocs = cachep->num_allocations;			len += sprintf(page+len, " : %6lu %7lu %5lu %4lu %4lu",					high, allocs, grown, reaped, errors);		}#endif#ifdef CONFIG_SMP		{			cpucache_t *cc = cc_data(cachep);			unsigned int batchcount = cachep->batchcount;			unsigned int limit;			if (cc)				limit = cc->limit;			else				limit = 0;			len += sprintf(page+len, " : %4u %4u",					limit, batchcount);		}#endif#if STATS && defined(CONFIG_SMP)		{			unsigned long allochit = atomic_read(&cachep->allochit);			unsigned long allocmiss = atomic_read(&cachep->allocmiss);			unsigned long freehit = atomic_read(&cachep->freehit);			unsigned long freemiss = atomic_read(&cachep->freemiss);			len += sprintf(page+len, " : %6lu %6lu %6lu %6lu",					allochit, allocmiss, freehit, freemiss);		}#endif		len += sprintf(page+len,"\n");		spin_unlock_irq(&cachep->spinlock);		FIXUP(got_data_up);		p = cachep->next.next;	} while (p != &cache_cache.next);got_data_up:	up(&cache_chain_sem);got_data:	*start = page+off;	return len;}/** * slabinfo_read_proc - generates /proc/slabinfo * @page: scratch area, one page long * @start: pointer to the pointer to the output buffer * @off: offset within /proc/slabinfo the caller is interested in * @count: requested len in bytes * @eof: eof marker * @data: unused * * The contents of the buffer are * cache-name * num-active-objs * total-objs * object size * num-active-slabs * total-slabs * num-pages-per-slab * + further values on SMP and with statistics enabled */int slabinfo_read_proc (char *page, char **start, off_t off,				 int count, int *eof, void *data){	int len = proc_getdata(page, start, off, count);	len -= (*start-page);	if (len <= count)		*eof = 1;	if (len>count) len = count;	if (len<0) len = 0;	return len;}#define MAX_SLABINFO_WRITE 128/** * slabinfo_write_proc - SMP tuning for the slab allocator * @file: unused * @buffer: user buffer * @count: data len * @data: unused */int slabinfo_write_proc (struct file *file, const char *buffer,				unsigned long count, void *data){#ifdef CONFIG_SMP	char kbuf[MAX_SLABINFO_WRITE+1], *tmp;	int limit, batchcount, res;	struct list_head *p;		if (count > MAX_SLABINFO_WRITE)		return -EINVAL;	if (copy_from_user(&kbuf, buffer, count))		return -EFAULT;	kbuf[MAX_SLABINFO_WRITE] = '\0'; 	tmp = strchr(kbuf, ' ');	if (!tmp)		return -EINVAL;	*tmp = '\0';	tmp++;	limit = simple_strtol(tmp, &tmp, 10);	while (*tmp == ' ')		tmp++;	batchcount = simple_strtol(tmp, &tmp, 10);	/* Find the cache in the chain of caches. */	down(&cache_chain_sem);	res = -EINVAL;	list_for_each(p,&cache_chain) {		kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);		if (!strcmp(cachep->name, kbuf)) {			res = kmem_tune_cpucache(cachep, limit, batchcount);			break;		}	}	up(&cache_chain_sem);	if (res >= 0)		res = count;	return res;#else	return -EINVAL;#endif}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -