⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 slab.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			if (cachep->buffer_size % PAGE_SIZE == 0 &&					OFF_SLAB(cachep))				kernel_map_pages(virt_to_page(objp),					cachep->buffer_size / PAGE_SIZE, 1);			else				check_poison_obj(cachep, objp);#else			check_poison_obj(cachep, objp);#endif		}		if (cachep->flags & SLAB_RED_ZONE) {			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)				slab_error(cachep, "start of a freed object "					   "was overwritten");			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)				slab_error(cachep, "end of a freed object "					   "was overwritten");		}	}}#elsestatic void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp){}#endif/** * slab_destroy - destroy and release all objects in a slab * @cachep: cache pointer being destroyed * @slabp: slab pointer being destroyed * * Destroy all the objs in a slab, and release the mem back to the system. * Before calling the slab must have been unlinked from the cache.  The * cache-lock is not held/needed. */static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp){	void *addr = slabp->s_mem - slabp->colouroff;	slab_destroy_debugcheck(cachep, slabp);	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {		struct slab_rcu *slab_rcu;		slab_rcu = (struct slab_rcu *)slabp;		slab_rcu->cachep = cachep;		slab_rcu->addr = addr;		call_rcu(&slab_rcu->head, kmem_rcu_free);	} else {		kmem_freepages(cachep, addr);		if (OFF_SLAB(cachep))			kmem_cache_free(cachep->slabp_cache, slabp);	}}static void __kmem_cache_destroy(struct kmem_cache *cachep){	int i;	struct kmem_list3 *l3;	for_each_online_cpu(i)	    kfree(cachep->array[i]);	/* NUMA: free the list3 structures */	for_each_online_node(i) {		l3 = cachep->nodelists[i];		if (l3) {			kfree(l3->shared);			free_alien_cache(l3->alien);			kfree(l3);		}	}	kmem_cache_free(&cache_cache, cachep);}/** * calculate_slab_order - calculate size (page order) of slabs * @cachep: pointer to the cache that is being created * @size: size of objects to be created in this cache. * @align: required alignment for the objects. * @flags: slab allocation flags * * Also calculates the number of objects per slab. * * This could be made much more intelligent.  For now, try to avoid using * high order pages for slabs.  When the gfp() functions are more friendly * towards high-order requests, this should be changed. */static size_t calculate_slab_order(struct kmem_cache *cachep,			size_t size, size_t align, unsigned long flags){	unsigned long offslab_limit;	size_t left_over = 0;	int gfporder;	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {		unsigned int num;		size_t remainder;		cache_estimate(gfporder, size, align, flags, &remainder, &num);		if (!num)			continue;		if (flags & CFLGS_OFF_SLAB) {			/*			 * Max number of objs-per-slab for caches which			 * use off-slab slabs. Needed to avoid a possible			 * looping condition in cache_grow().			 */			offslab_limit = size - sizeof(struct slab);			offslab_limit /= sizeof(kmem_bufctl_t); 			if (num > offslab_limit)				break;		}		/* Found something acceptable - save it away */		cachep->num = num;		cachep->gfporder = gfporder;		left_over = remainder;		/*		 * A VFS-reclaimable slab tends to have most allocations		 * as GFP_NOFS and we really don't want to have to be allocating		 * higher-order pages when we are unable to shrink dcache.		 */		if (flags & SLAB_RECLAIM_ACCOUNT)			break;		/*		 * Large number of objects is good, but very large slabs are		 * currently bad for the gfp()s.		 */		if (gfporder >= slab_break_gfp_order)			break;		/*		 * Acceptable internal fragmentation?		 */		if (left_over * 8 <= (PAGE_SIZE << gfporder))			break;	}	return left_over;}static int __init_refok setup_cpu_cache(struct kmem_cache *cachep){	if (g_cpucache_up == FULL)		return enable_cpucache(cachep);	if (g_cpucache_up == NONE) {		/*		 * Note: the first kmem_cache_create must create the cache		 * that's used by kmalloc(24), otherwise the creation of		 * further caches will BUG().		 */		cachep->array[smp_processor_id()] = &initarray_generic.cache;		/*		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is		 * the first cache, then we need to set up all its list3s,		 * otherwise the creation of further caches will BUG().		 */		set_up_list3s(cachep, SIZE_AC);		if (INDEX_AC == INDEX_L3)			g_cpucache_up = PARTIAL_L3;		else			g_cpucache_up = PARTIAL_AC;	} else {		cachep->array[smp_processor_id()] =			kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);		if (g_cpucache_up == PARTIAL_AC) {			set_up_list3s(cachep, SIZE_L3);			g_cpucache_up = PARTIAL_L3;		} else {			int node;			for_each_online_node(node) {				cachep->nodelists[node] =				    kmalloc_node(sizeof(struct kmem_list3),						GFP_KERNEL, node);				BUG_ON(!cachep->nodelists[node]);				kmem_list3_init(cachep->nodelists[node]);			}		}	}	cachep->nodelists[numa_node_id()]->next_reap =			jiffies + REAPTIMEOUT_LIST3 +			((unsigned long)cachep) % REAPTIMEOUT_LIST3;	cpu_cache_get(cachep)->avail = 0;	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;	cpu_cache_get(cachep)->batchcount = 1;	cpu_cache_get(cachep)->touched = 0;	cachep->batchcount = 1;	cachep->limit = BOOT_CPUCACHE_ENTRIES;	return 0;}/** * kmem_cache_create - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags * @ctor: A constructor for the objects. * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. * The @ctor is run when new pages are allocated by the cache. * * @name must be valid until the cache is destroyed. This implies that * the module calling this has to destroy the cache before getting unloaded. * Note that kmem_cache_name() is not guaranteed to return the same pointer, * therefore applications must manage it themselves. * * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * to catch references to uninitialised memory. * * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check * for buffer overruns. * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline.  This can be beneficial if you're counting cycles as closely * as davem. */struct kmem_cache *kmem_cache_create (const char *name, size_t size, size_t align,	unsigned long flags, void (*ctor)(void *)){	size_t left_over, slab_size, ralign;	struct kmem_cache *cachep = NULL, *pc;	/*	 * Sanity checks... these are all serious usage bugs.	 */	if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||	    size > KMALLOC_MAX_SIZE) {		printk(KERN_ERR "%s: Early error in slab %s\n", __func__,				name);		BUG();	}	/*	 * We use cache_chain_mutex to ensure a consistent view of	 * cpu_online_mask as well.  Please see cpuup_callback	 */	get_online_cpus();	mutex_lock(&cache_chain_mutex);	list_for_each_entry(pc, &cache_chain, next) {		char tmp;		int res;		/*		 * This happens when the module gets unloaded and doesn't		 * destroy its slab cache and no-one else reuses the vmalloc		 * area of the module.  Print a warning.		 */		res = probe_kernel_address(pc->name, tmp);		if (res) {			printk(KERN_ERR			       "SLAB: cache with size %d has lost its name\n",			       pc->buffer_size);			continue;		}		if (!strcmp(pc->name, name)) {			printk(KERN_ERR			       "kmem_cache_create: duplicate cache %s\n", name);			dump_stack();			goto oops;		}	}#if DEBUG	WARN_ON(strchr(name, ' '));	/* It confuses parsers */#if FORCED_DEBUG	/*	 * Enable redzoning and last user accounting, except for caches with	 * large objects, if the increased size would increase the object size	 * above the next power of two: caches with object sizes just above a	 * power of two have a significant amount of internal fragmentation.	 */	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +						2 * sizeof(unsigned long long)))		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;	if (!(flags & SLAB_DESTROY_BY_RCU))		flags |= SLAB_POISON;#endif	if (flags & SLAB_DESTROY_BY_RCU)		BUG_ON(flags & SLAB_POISON);#endif	/*	 * Always checks flags, a caller might be expecting debug support which	 * isn't available.	 */	BUG_ON(flags & ~CREATE_MASK);	/*	 * Check that size is in terms of words.  This is needed to avoid	 * unaligned accesses for some archs when redzoning is used, and makes	 * sure any on-slab bufctl's are also correctly aligned.	 */	if (size & (BYTES_PER_WORD - 1)) {		size += (BYTES_PER_WORD - 1);		size &= ~(BYTES_PER_WORD - 1);	}	/* calculate the final buffer alignment: */	/* 1) arch recommendation: can be overridden for debug */	if (flags & SLAB_HWCACHE_ALIGN) {		/*		 * Default alignment: as specified by the arch code.  Except if		 * an object is really small, then squeeze multiple objects into		 * one cacheline.		 */		ralign = cache_line_size();		while (size <= ralign / 2)			ralign /= 2;	} else {		ralign = BYTES_PER_WORD;	}	/*	 * Redzoning and user store require word alignment or possibly larger.	 * Note this will be overridden by architecture or caller mandated	 * alignment if either is greater than BYTES_PER_WORD.	 */	if (flags & SLAB_STORE_USER)		ralign = BYTES_PER_WORD;	if (flags & SLAB_RED_ZONE) {		ralign = REDZONE_ALIGN;		/* If redzoning, ensure that the second redzone is suitably		 * aligned, by adjusting the object size accordingly. */		size += REDZONE_ALIGN - 1;		size &= ~(REDZONE_ALIGN - 1);	}	/* 2) arch mandated alignment */	if (ralign < ARCH_SLAB_MINALIGN) {		ralign = ARCH_SLAB_MINALIGN;	}	/* 3) caller mandated alignment */	if (ralign < align) {		ralign = align;	}	/* disable debug if necessary */	if (ralign > __alignof__(unsigned long long))		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);	/*	 * 4) Store it.	 */	align = ralign;	/* Get cache's description obj. */	cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);	if (!cachep)		goto oops;#if DEBUG	cachep->obj_size = size;	/*	 * Both debugging options require word-alignment which is calculated	 * into align above.	 */	if (flags & SLAB_RED_ZONE) {		/* add space for red zone words */		cachep->obj_offset += sizeof(unsigned long long);		size += 2 * sizeof(unsigned long long);	}	if (flags & SLAB_STORE_USER) {		/* user store requires one word storage behind the end of		 * the real object. But if the second red zone needs to be		 * aligned to 64 bits, we must allow that much space.		 */		if (flags & SLAB_RED_ZONE)			size += REDZONE_ALIGN;		else			size += BYTES_PER_WORD;	}#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {		cachep->obj_offset += PAGE_SIZE - size;		size = PAGE_SIZE;	}#endif#endif	/*	 * Determine if the slab management is 'on' or 'off' slab.	 * (bootstrapping cannot cope with offslab caches so don't do	 * it too early on.)	 */	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)		/*		 * Size is large, assume best to place the slab management obj		 * off-slab (should allow better packing of objs).		 */		flags |= CFLGS_OFF_SLAB;	size = ALIGN(size, align);	left_over = calculate_slab_order(cachep, size, align, flags);	if (!cachep->num) {		printk(KERN_ERR		       "kmem_cache_create: couldn't create cache %s.\n", name);		kmem_cache_free(&cache_cache, cachep);		cachep = NULL;		goto oops;	}	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)			  + sizeof(struct slab), align);	/*	 * If the slab has been placed off-slab, and we have enough space then	 * move it on-slab. This is at the expense of any extra colouring.	 */	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {		flags &= ~CFLGS_OFF_SLAB;		left_over -= slab_size;	}	if (flags & CFLGS_OFF_SLAB) {		/* really off slab. No need for manual alignment */		slab_size =		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);	}	cachep->colour_off = cache_line_size();	/* Offset must be a multiple of the alignment. */	if (cachep->colour_off < align)		cachep->colour_off = align;	cachep->colour = left_over / cachep->colour_off;	cachep->slab_size = slab_size;	cachep->flags = flags;	cachep->gfpflags = 0;	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))		cachep->gfpflags |= GFP_DMA;	cachep->buffer_size = size;	cachep->reciprocal_buffer_size = reciprocal_value(size);	if (flags & CFLGS_OFF_SLAB) {		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);		/*		 * This is a possibility for one of the malloc_sizes caches.		 * But since we go off slab only for object size greater than		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,		 * this should not happen at all.		 * But leave a BUG_ON for some lucky dude.		 */		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));	}	cachep->ctor = ctor;	cachep->name = name;	if (setup_cpu_cache(cachep)) {		__kmem_cache_destroy(cachep);		cachep = NULL;		goto oops;	}	/* cache setup completed, link it into the list */	list_add(&cachep->next, &cache_chain);oops:	if (!cachep && (flags & SLAB_PANIC))		panic("kmem_cache_create():

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -