⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 slub.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		goto another_slab;	c->page->inuse++;	c->page->freelist = object[c->offset];	c->node = -1;	goto unlock_out;}/* * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) * have the fastpath folded into their functions. So no function call * overhead for requests that can be satisfied on the fastpath. * * The fastpath works by first checking if the lockless freelist can be used. * If not then __slab_alloc is called for slow processing. * * Otherwise we can simply pick the next object from the lockless free list. */static __always_inline void *slab_alloc(struct kmem_cache *s,		gfp_t gfpflags, int node, unsigned long addr){	void **object;	struct kmem_cache_cpu *c;	unsigned long flags;	unsigned int objsize;	might_sleep_if(gfpflags & __GFP_WAIT);	if (should_failslab(s->objsize, gfpflags))		return NULL;	local_irq_save(flags);	c = get_cpu_slab(s, smp_processor_id());	objsize = c->objsize;	if (unlikely(!c->freelist || !node_match(c, node)))		object = __slab_alloc(s, gfpflags, node, addr, c);	else {		object = c->freelist;		c->freelist = object[c->offset];		stat(c, ALLOC_FASTPATH);	}	local_irq_restore(flags);	if (unlikely((gfpflags & __GFP_ZERO) && object))		memset(object, 0, objsize);	return object;}void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags){	return slab_alloc(s, gfpflags, -1, _RET_IP_);}EXPORT_SYMBOL(kmem_cache_alloc);#ifdef CONFIG_NUMAvoid *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node){	return slab_alloc(s, gfpflags, node, _RET_IP_);}EXPORT_SYMBOL(kmem_cache_alloc_node);#endif/* * Slow patch handling. This may still be called frequently since objects * have a longer lifetime than the cpu slabs in most processing loads. * * So we still attempt to reduce cache line usage. Just take the slab * lock and free the item. If there is no additional partial page * handling required then we can return immediately. */static void __slab_free(struct kmem_cache *s, struct page *page,			void *x, unsigned long addr, unsigned int offset){	void *prior;	void **object = (void *)x;	struct kmem_cache_cpu *c;	c = get_cpu_slab(s, raw_smp_processor_id());	stat(c, FREE_SLOWPATH);	slab_lock(page);	if (unlikely(SLABDEBUG && PageSlubDebug(page)))		goto debug;checks_ok:	prior = object[offset] = page->freelist;	page->freelist = object;	page->inuse--;	if (unlikely(PageSlubFrozen(page))) {		stat(c, FREE_FROZEN);		goto out_unlock;	}	if (unlikely(!page->inuse))		goto slab_empty;	/*	 * Objects left in the slab. If it was not on the partial list before	 * then add it.	 */	if (unlikely(!prior)) {		add_partial(get_node(s, page_to_nid(page)), page, 1);		stat(c, FREE_ADD_PARTIAL);	}out_unlock:	slab_unlock(page);	return;slab_empty:	if (prior) {		/*		 * Slab still on the partial list.		 */		remove_partial(s, page);		stat(c, FREE_REMOVE_PARTIAL);	}	slab_unlock(page);	stat(c, FREE_SLAB);	discard_slab(s, page);	return;debug:	if (!free_debug_processing(s, page, x, addr))		goto out_unlock;	goto checks_ok;}/* * Fastpath with forced inlining to produce a kfree and kmem_cache_free that * can perform fastpath freeing without additional function calls. * * The fastpath is only possible if we are freeing to the current cpu slab * of this processor. This typically the case if we have just allocated * the item before. * * If fastpath is not possible then fall back to __slab_free where we deal * with all sorts of special processing. */static __always_inline void slab_free(struct kmem_cache *s,			struct page *page, void *x, unsigned long addr){	void **object = (void *)x;	struct kmem_cache_cpu *c;	unsigned long flags;	local_irq_save(flags);	c = get_cpu_slab(s, smp_processor_id());	debug_check_no_locks_freed(object, c->objsize);	if (!(s->flags & SLAB_DEBUG_OBJECTS))		debug_check_no_obj_freed(object, s->objsize);	if (likely(page == c->page && c->node >= 0)) {		object[c->offset] = c->freelist;		c->freelist = object;		stat(c, FREE_FASTPATH);	} else		__slab_free(s, page, x, addr, c->offset);	local_irq_restore(flags);}void kmem_cache_free(struct kmem_cache *s, void *x){	struct page *page;	page = virt_to_head_page(x);	slab_free(s, page, x, _RET_IP_);}EXPORT_SYMBOL(kmem_cache_free);/* Figure out on which slab page the object resides */static struct page *get_object_page(const void *x){	struct page *page = virt_to_head_page(x);	if (!PageSlab(page))		return NULL;	return page;}/* * Object placement in a slab is made very easy because we always start at * offset 0. If we tune the size of the object to the alignment then we can * get the required alignment by putting one properly sized object after * another. * * Notice that the allocation order determines the sizes of the per cpu * caches. Each processor has always one slab available for allocations. * Increasing the allocation order reduces the number of times that slabs * must be moved on and off the partial lists and is therefore a factor in * locking overhead. *//* * Mininum / Maximum order of slab pages. This influences locking overhead * and slab fragmentation. A higher order reduces the number of partial slabs * and increases the number of allocations possible without having to * take the list_lock. */static int slub_min_order;static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;static int slub_min_objects;/* * Merge control. If this is set then no merging of slab caches will occur. * (Could be removed. This was introduced to pacify the merge skeptics.) */static int slub_nomerge;/* * Calculate the order of allocation given an slab object size. * * The order of allocation has significant impact on performance and other * system components. Generally order 0 allocations should be preferred since * order 0 does not cause fragmentation in the page allocator. Larger objects * be problematic to put into order 0 slabs because there may be too much * unused space left. We go to a higher order if more than 1/16th of the slab * would be wasted. * * In order to reach satisfactory performance we must ensure that a minimum * number of objects is in one slab. Otherwise we may generate too much * activity on the partial lists which requires taking the list_lock. This is * less a concern for large slabs though which are rarely used. * * slub_max_order specifies the order where we begin to stop considering the * number of objects in a slab as critical. If we reach slub_max_order then * we try to keep the page order as low as possible. So we accept more waste * of space in favor of a small page order. * * Higher order allocations also allow the placement of more objects in a * slab and thereby reduce object handling overhead. If the user has * requested a higher mininum order then we start with that one instead of * the smallest order which will fit the object. */static inline int slab_order(int size, int min_objects,				int max_order, int fract_leftover){	int order;	int rem;	int min_order = slub_min_order;	if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)		return get_order(size * MAX_OBJS_PER_PAGE) - 1;	for (order = max(min_order,				fls(min_objects * size - 1) - PAGE_SHIFT);			order <= max_order; order++) {		unsigned long slab_size = PAGE_SIZE << order;		if (slab_size < min_objects * size)			continue;		rem = slab_size % size;		if (rem <= slab_size / fract_leftover)			break;	}	return order;}static inline int calculate_order(int size){	int order;	int min_objects;	int fraction;	/*	 * Attempt to find best configuration for a slab. This	 * works by first attempting to generate a layout with	 * the best configuration and backing off gradually.	 *	 * First we reduce the acceptable waste in a slab. Then	 * we reduce the minimum objects required in a slab.	 */	min_objects = slub_min_objects;	if (!min_objects)		min_objects = 4 * (fls(nr_cpu_ids) + 1);	while (min_objects > 1) {		fraction = 16;		while (fraction >= 4) {			order = slab_order(size, min_objects,						slub_max_order, fraction);			if (order <= slub_max_order)				return order;			fraction /= 2;		}		min_objects /= 2;	}	/*	 * We were unable to place multiple objects in a slab. Now	 * lets see if we can place a single object there.	 */	order = slab_order(size, 1, slub_max_order, 1);	if (order <= slub_max_order)		return order;	/*	 * Doh this slab cannot be placed using slub_max_order.	 */	order = slab_order(size, 1, MAX_ORDER, 1);	if (order <= MAX_ORDER)		return order;	return -ENOSYS;}/* * Figure out what the alignment of the objects will be. */static unsigned long calculate_alignment(unsigned long flags,		unsigned long align, unsigned long size){	/*	 * If the user wants hardware cache aligned objects then follow that	 * suggestion if the object is sufficiently large.	 *	 * The hardware cache alignment cannot override the specified	 * alignment though. If that is greater then use it.	 */	if (flags & SLAB_HWCACHE_ALIGN) {		unsigned long ralign = cache_line_size();		while (size <= ralign / 2)			ralign /= 2;		align = max(align, ralign);	}	if (align < ARCH_SLAB_MINALIGN)		align = ARCH_SLAB_MINALIGN;	return ALIGN(align, sizeof(void *));}static void init_kmem_cache_cpu(struct kmem_cache *s,			struct kmem_cache_cpu *c){	c->page = NULL;	c->freelist = NULL;	c->node = 0;	c->offset = s->offset / sizeof(void *);	c->objsize = s->objsize;#ifdef CONFIG_SLUB_STATS	memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));#endif}static voidinit_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s){	n->nr_partial = 0;	/*	 * The larger the object size is, the more pages we want on the partial	 * list to avoid pounding the page allocator excessively.	 */	n->min_partial = ilog2(s->size);	if (n->min_partial < MIN_PARTIAL)		n->min_partial = MIN_PARTIAL;	else if (n->min_partial > MAX_PARTIAL)		n->min_partial = MAX_PARTIAL;	spin_lock_init(&n->list_lock);	INIT_LIST_HEAD(&n->partial);#ifdef CONFIG_SLUB_DEBUG	atomic_long_set(&n->nr_slabs, 0);	atomic_long_set(&n->total_objects, 0);	INIT_LIST_HEAD(&n->full);#endif}#ifdef CONFIG_SMP/* * Per cpu array for per cpu structures. * * The per cpu array places all kmem_cache_cpu structures from one processor * close together meaning that it becomes possible that multiple per cpu * structures are contained in one cacheline. This may be particularly * beneficial for the kmalloc caches. * * A desktop system typically has around 60-80 slabs. With 100 here we are * likely able to get per cpu structures for all caches from the array defined * here. We must be able to cover all kmalloc caches during bootstrap. * * If the per cpu array is exhausted then fall back to kmalloc * of individual cachelines. No sharing is possible then. */#define NR_KMEM_CACHE_CPU 100static DEFINE_PER_CPU(struct kmem_cache_cpu,				kmem_cache_cpu)[NR_KMEM_CACHE_CPU];static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,							int cpu, gfp_t flags){	struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);	if (c)		per_cpu(kmem_cache_cpu_free, cpu) =				(void *)c->freelist;	else {		/* Table overflow: So allocate ourselves */		c = kmalloc_node(			ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),			flags, cpu_to_node(cpu));		if (!c)			return NULL;	}	init_kmem_cache_cpu(s, c);	return c;}static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu){	if (c < per_cpu(kmem_cache_cpu, cpu) ||			c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {		kfree(c);		return;	}	c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);	per_cpu(kmem_cache_cpu_free, cpu) = c;}static void free_kmem_cache_cpus(struct kmem_cache *s){	int cpu;	for_each_online_cpu(cpu) {		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);		if (c) {			s->cpu_slab[cpu] = NULL;			free_kmem_cache_cpu(c, cpu);		}	}}static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags){	int cpu;	for_each_online_cpu(cpu) {		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);		if (c)			continue;		c = alloc_kmem_cache_cpu(s, cpu, flags);		if (!c) {			free_kmem_cache_cpus(s);			return 0;		}		s->cpu_slab[cpu] = c;	}	return 1;}/* * Initialize the per cpu array. */static void init_alloc_cpu_cpu(int cpu){	int i;	if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))		return;	for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);	cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));}static void __init init_alloc_cpu(void){	int cpu;	for_each_online_cpu(cpu)		init_alloc_cpu_cpu(cpu);  }#elsestatic inline void free_kmem_cache_cpus(struct kmem_cache *s) {}static inline void init_alloc_cpu(void) {}static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags){	init_kmem_cache_cpu(s, &s->cpu_slab);	return 1;}#endif#ifdef CONFIG_NUMA/* * No kmalloc_node yet so do it by hand. We know that this is the first * slab on the node for this slabcache. There are no concurrent accesses * possible. * * Note that this function only works on the kmalloc_node_cache * when allocating for the kmalloc_node_cache. This is used for bootstrapping * memory on a fresh node that has no slab structures yet. */static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node){	struct page *page;	struct kmem_cache_node *n;	unsigned long flags;	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));	page = new_slab(kmalloc_caches, gfpflags, node);	BUG_ON(!page);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -