⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 slub.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	if (page_to_nid(page) != node) {		printk(KERN_ERR "SLUB: Unable to allocate memory from "				"node %d\n", node);		printk(KERN_ERR "SLUB: Allocating a useless per node structure "				"in order to be able to continue\n");	}	n = page->freelist;	BUG_ON(!n);	page->freelist = get_freepointer(kmalloc_caches, n);	page->inuse++;	kmalloc_caches->node[node] = n;#ifdef CONFIG_SLUB_DEBUG	init_object(kmalloc_caches, n, 1);	init_tracking(kmalloc_caches, n);#endif	init_kmem_cache_node(n, kmalloc_caches);	inc_slabs_node(kmalloc_caches, node, page->objects);	/*	 * lockdep requires consistent irq usage for each lock	 * so even though there cannot be a race this early in	 * the boot sequence, we still disable irqs.	 */	local_irq_save(flags);	add_partial(n, page, 0);	local_irq_restore(flags);}static void free_kmem_cache_nodes(struct kmem_cache *s){	int node;	for_each_node_state(node, N_NORMAL_MEMORY) {		struct kmem_cache_node *n = s->node[node];		if (n && n != &s->local_node)			kmem_cache_free(kmalloc_caches, n);		s->node[node] = NULL;	}}static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags){	int node;	int local_node;	if (slab_state >= UP)		local_node = page_to_nid(virt_to_page(s));	else		local_node = 0;	for_each_node_state(node, N_NORMAL_MEMORY) {		struct kmem_cache_node *n;		if (local_node == node)			n = &s->local_node;		else {			if (slab_state == DOWN) {				early_kmem_cache_node_alloc(gfpflags, node);				continue;			}			n = kmem_cache_alloc_node(kmalloc_caches,							gfpflags, node);			if (!n) {				free_kmem_cache_nodes(s);				return 0;			}		}		s->node[node] = n;		init_kmem_cache_node(n, s);	}	return 1;}#elsestatic void free_kmem_cache_nodes(struct kmem_cache *s){}static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags){	init_kmem_cache_node(&s->local_node, s);	return 1;}#endif/* * calculate_sizes() determines the order and the distribution of data within * a slab object. */static int calculate_sizes(struct kmem_cache *s, int forced_order){	unsigned long flags = s->flags;	unsigned long size = s->objsize;	unsigned long align = s->align;	int order;	/*	 * Round up object size to the next word boundary. We can only	 * place the free pointer at word boundaries and this determines	 * the possible location of the free pointer.	 */	size = ALIGN(size, sizeof(void *));#ifdef CONFIG_SLUB_DEBUG	/*	 * Determine if we can poison the object itself. If the user of	 * the slab may touch the object after free or before allocation	 * then we should never poison the object itself.	 */	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&			!s->ctor)		s->flags |= __OBJECT_POISON;	else		s->flags &= ~__OBJECT_POISON;	/*	 * If we are Redzoning then check if there is some space between the	 * end of the object and the free pointer. If not then add an	 * additional word to have some bytes to store Redzone information.	 */	if ((flags & SLAB_RED_ZONE) && size == s->objsize)		size += sizeof(void *);#endif	/*	 * With that we have determined the number of bytes in actual use	 * by the object. This is the potential offset to the free pointer.	 */	s->inuse = size;	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||		s->ctor)) {		/*		 * Relocate free pointer after the object if it is not		 * permitted to overwrite the first word of the object on		 * kmem_cache_free.		 *		 * This is the case if we do RCU, have a constructor or		 * destructor or are poisoning the objects.		 */		s->offset = size;		size += sizeof(void *);	}#ifdef CONFIG_SLUB_DEBUG	if (flags & SLAB_STORE_USER)		/*		 * Need to store information about allocs and frees after		 * the object.		 */		size += 2 * sizeof(struct track);	if (flags & SLAB_RED_ZONE)		/*		 * Add some empty padding so that we can catch		 * overwrites from earlier objects rather than let		 * tracking information or the free pointer be		 * corrupted if a user writes before the start		 * of the object.		 */		size += sizeof(void *);#endif	/*	 * Determine the alignment based on various parameters that the	 * user specified and the dynamic determination of cache line size	 * on bootup.	 */	align = calculate_alignment(flags, align, s->objsize);	/*	 * SLUB stores one object immediately after another beginning from	 * offset 0. In order to align the objects we have to simply size	 * each object to conform to the alignment.	 */	size = ALIGN(size, align);	s->size = size;	if (forced_order >= 0)		order = forced_order;	else		order = calculate_order(size);	if (order < 0)		return 0;	s->allocflags = 0;	if (order)		s->allocflags |= __GFP_COMP;	if (s->flags & SLAB_CACHE_DMA)		s->allocflags |= SLUB_DMA;	if (s->flags & SLAB_RECLAIM_ACCOUNT)		s->allocflags |= __GFP_RECLAIMABLE;	/*	 * Determine the number of objects per slab	 */	s->oo = oo_make(order, size);	s->min = oo_make(get_order(size), size);	if (oo_objects(s->oo) > oo_objects(s->max))		s->max = s->oo;	return !!oo_objects(s->oo);}static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,		const char *name, size_t size,		size_t align, unsigned long flags,		void (*ctor)(void *)){	memset(s, 0, kmem_size);	s->name = name;	s->ctor = ctor;	s->objsize = size;	s->align = align;	s->flags = kmem_cache_flags(size, flags, name, ctor);	if (!calculate_sizes(s, -1))		goto error;	s->refcount = 1;#ifdef CONFIG_NUMA	s->remote_node_defrag_ratio = 1000;#endif	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))		goto error;	if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))		return 1;	free_kmem_cache_nodes(s);error:	if (flags & SLAB_PANIC)		panic("Cannot create slab %s size=%lu realsize=%u "			"order=%u offset=%u flags=%lx\n",			s->name, (unsigned long)size, s->size, oo_order(s->oo),			s->offset, flags);	return 0;}/* * Check if a given pointer is valid */int kmem_ptr_validate(struct kmem_cache *s, const void *object){	struct page *page;	page = get_object_page(object);	if (!page || s != page->slab)		/* No slab or wrong slab */		return 0;	if (!check_valid_pointer(s, page, object))		return 0;	/*	 * We could also check if the object is on the slabs freelist.	 * But this would be too expensive and it seems that the main	 * purpose of kmem_ptr_valid() is to check if the object belongs	 * to a certain slab.	 */	return 1;}EXPORT_SYMBOL(kmem_ptr_validate);/* * Determine the size of a slab object */unsigned int kmem_cache_size(struct kmem_cache *s){	return s->objsize;}EXPORT_SYMBOL(kmem_cache_size);const char *kmem_cache_name(struct kmem_cache *s){	return s->name;}EXPORT_SYMBOL(kmem_cache_name);static void list_slab_objects(struct kmem_cache *s, struct page *page,							const char *text){#ifdef CONFIG_SLUB_DEBUG	void *addr = page_address(page);	void *p;	DECLARE_BITMAP(map, page->objects);	bitmap_zero(map, page->objects);	slab_err(s, page, "%s", text);	slab_lock(page);	for_each_free_object(p, s, page->freelist)		set_bit(slab_index(p, s, addr), map);	for_each_object(p, s, addr, page->objects) {		if (!test_bit(slab_index(p, s, addr), map)) {			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",							p, p - addr);			print_tracking(s, p);		}	}	slab_unlock(page);#endif}/* * Attempt to free all partial slabs on a node. */static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n){	unsigned long flags;	struct page *page, *h;	spin_lock_irqsave(&n->list_lock, flags);	list_for_each_entry_safe(page, h, &n->partial, lru) {		if (!page->inuse) {			list_del(&page->lru);			discard_slab(s, page);			n->nr_partial--;		} else {			list_slab_objects(s, page,				"Objects remaining on kmem_cache_close()");		}	}	spin_unlock_irqrestore(&n->list_lock, flags);}/* * Release all resources used by a slab cache. */static inline int kmem_cache_close(struct kmem_cache *s){	int node;	flush_all(s);	/* Attempt to free all objects */	free_kmem_cache_cpus(s);	for_each_node_state(node, N_NORMAL_MEMORY) {		struct kmem_cache_node *n = get_node(s, node);		free_partial(s, n);		if (n->nr_partial || slabs_node(s, node))			return 1;	}	free_kmem_cache_nodes(s);	return 0;}/* * Close a cache and release the kmem_cache structure * (must be used for caches created using kmem_cache_create) */void kmem_cache_destroy(struct kmem_cache *s){	down_write(&slub_lock);	s->refcount--;	if (!s->refcount) {		list_del(&s->list);		up_write(&slub_lock);		if (kmem_cache_close(s)) {			printk(KERN_ERR "SLUB %s: %s called for cache that "				"still has objects.\n", s->name, __func__);			dump_stack();		}		sysfs_slab_remove(s);	} else		up_write(&slub_lock);}EXPORT_SYMBOL(kmem_cache_destroy);/******************************************************************** *		Kmalloc subsystem *******************************************************************/struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;EXPORT_SYMBOL(kmalloc_caches);static int __init setup_slub_min_order(char *str){	get_option(&str, &slub_min_order);	return 1;}__setup("slub_min_order=", setup_slub_min_order);static int __init setup_slub_max_order(char *str){	get_option(&str, &slub_max_order);	return 1;}__setup("slub_max_order=", setup_slub_max_order);static int __init setup_slub_min_objects(char *str){	get_option(&str, &slub_min_objects);	return 1;}__setup("slub_min_objects=", setup_slub_min_objects);static int __init setup_slub_nomerge(char *str){	slub_nomerge = 1;	return 1;}__setup("slub_nomerge", setup_slub_nomerge);static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,		const char *name, int size, gfp_t gfp_flags){	unsigned int flags = 0;	if (gfp_flags & SLUB_DMA)		flags = SLAB_CACHE_DMA;	down_write(&slub_lock);	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,								flags, NULL))		goto panic;	list_add(&s->list, &slab_caches);	up_write(&slub_lock);	if (sysfs_slab_add(s))		goto panic;	return s;panic:	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);}#ifdef CONFIG_ZONE_DMAstatic struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];static void sysfs_add_func(struct work_struct *w){	struct kmem_cache *s;	down_write(&slub_lock);	list_for_each_entry(s, &slab_caches, list) {		if (s->flags & __SYSFS_ADD_DEFERRED) {			s->flags &= ~__SYSFS_ADD_DEFERRED;			sysfs_slab_add(s);		}	}	up_write(&slub_lock);}static DECLARE_WORK(sysfs_add_work, sysfs_add_func);static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags){	struct kmem_cache *s;	char *text;	size_t realsize;	s = kmalloc_caches_dma[index];	if (s)		return s;	/* Dynamically create dma cache */	if (flags & __GFP_WAIT)		down_write(&slub_lock);	else {		if (!down_write_trylock(&slub_lock))			goto out;	}	if (kmalloc_caches_dma[index])		goto unlock_out;	realsize = kmalloc_caches[index].objsize;	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",			 (unsigned int)realsize);	s = kmalloc(kmem_size, flags & ~SLUB_DMA);	if (!s || !text || !kmem_cache_open(s, flags, text,			realsize, ARCH_KMALLOC_MINALIGN,			SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {		kfree(s);		kfree(text);		goto unlock_out;	}	list_add(&s->list, &slab_caches);	kmalloc_caches_dma[index] = s;	schedule_work(&sysfs_add_work);unlock_out:	up_write(&slub_lock);out:	return kmalloc_caches_dma[index];}#endif/* * Conversion table for small slabs sizes / 8 to the index in the * kmalloc array. This is necessary for slabs < 192 since we have non power * of two cache sizes there. The size of larger slabs can be determined using * fls. */static s8 size_index[24] = {	3,	/* 8 */	4,	/* 16 */	5,	/* 24 */	5,	/* 32 */	6,	/* 40 */	6,	/* 48 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -