⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 slub.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes){	while (bytes) {		if (*start != (u8)value)			return start;		start++;		bytes--;	}	return NULL;}static void restore_bytes(struct kmem_cache *s, char *message, u8 data,						void *from, void *to){	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);	memset(from, data, to - from);}static int check_bytes_and_report(struct kmem_cache *s, struct page *page,			u8 *object, char *what,			u8 *start, unsigned int value, unsigned int bytes){	u8 *fault;	u8 *end;	fault = check_bytes(start, value, bytes);	if (!fault)		return 1;	end = start + bytes;	while (end > fault && end[-1] == value)		end--;	slab_bug(s, "%s overwritten", what);	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",					fault, end - 1, fault[0], value);	print_trailer(s, page, object);	restore_bytes(s, what, value, fault, end);	return 0;}/* * Object layout: * * object address * 	Bytes of the object to be managed. * 	If the freepointer may overlay the object then the free * 	pointer is the first word of the object. * * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is * 	0xa5 (POISON_END) * * object + s->objsize * 	Padding to reach word boundary. This is also used for Redzoning. * 	Padding is extended by another word if Redzoning is enabled and * 	objsize == inuse. * * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with * 	0xcc (RED_ACTIVE) for objects in use. * * object + s->inuse * 	Meta data starts here. * * 	A. Free pointer (if we cannot overwrite object on free) * 	B. Tracking data for SLAB_STORE_USER * 	C. Padding to reach required alignment boundary or at mininum * 		one word if debugging is on to be able to detect writes * 		before the word boundary. * *	Padding is done using 0x5a (POISON_INUSE) * * object + s->size * 	Nothing is used beyond s->size. * * If slabcaches are merged then the objsize and inuse boundaries are mostly * ignored. And therefore no slab options that rely on these boundaries * may be used with merged slabcaches. */static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p){	unsigned long off = s->inuse;	/* The end of info */	if (s->offset)		/* Freepointer is placed after the object. */		off += sizeof(void *);	if (s->flags & SLAB_STORE_USER)		/* We also have user information there */		off += 2 * sizeof(struct track);	if (s->size == off)		return 1;	return check_bytes_and_report(s, page, p, "Object padding",				p + off, POISON_INUSE, s->size - off);}/* Check the pad bytes at the end of a slab page */static int slab_pad_check(struct kmem_cache *s, struct page *page){	u8 *start;	u8 *fault;	u8 *end;	int length;	int remainder;	if (!(s->flags & SLAB_POISON))		return 1;	start = page_address(page);	length = (PAGE_SIZE << compound_order(page));	end = start + length;	remainder = length % s->size;	if (!remainder)		return 1;	fault = check_bytes(end - remainder, POISON_INUSE, remainder);	if (!fault)		return 1;	while (end > fault && end[-1] == POISON_INUSE)		end--;	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);	print_section("Padding", end - remainder, remainder);	restore_bytes(s, "slab padding", POISON_INUSE, start, end);	return 0;}static int check_object(struct kmem_cache *s, struct page *page,					void *object, int active){	u8 *p = object;	u8 *endobject = object + s->objsize;	if (s->flags & SLAB_RED_ZONE) {		unsigned int red =			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;		if (!check_bytes_and_report(s, page, object, "Redzone",			endobject, red, s->inuse - s->objsize))			return 0;	} else {		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {			check_bytes_and_report(s, page, p, "Alignment padding",				endobject, POISON_INUSE, s->inuse - s->objsize);		}	}	if (s->flags & SLAB_POISON) {		if (!active && (s->flags & __OBJECT_POISON) &&			(!check_bytes_and_report(s, page, p, "Poison", p,					POISON_FREE, s->objsize - 1) ||			 !check_bytes_and_report(s, page, p, "Poison",				p + s->objsize - 1, POISON_END, 1)))			return 0;		/*		 * check_pad_bytes cleans up on its own.		 */		check_pad_bytes(s, page, p);	}	if (!s->offset && active)		/*		 * Object and freepointer overlap. Cannot check		 * freepointer while object is allocated.		 */		return 1;	/* Check free pointer validity */	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {		object_err(s, page, p, "Freepointer corrupt");		/*		 * No choice but to zap it and thus lose the remainder		 * of the free objects in this slab. May cause		 * another error because the object count is now wrong.		 */		set_freepointer(s, p, NULL);		return 0;	}	return 1;}static int check_slab(struct kmem_cache *s, struct page *page){	int maxobj;	VM_BUG_ON(!irqs_disabled());	if (!PageSlab(page)) {		slab_err(s, page, "Not a valid slab page");		return 0;	}	maxobj = (PAGE_SIZE << compound_order(page)) / s->size;	if (page->objects > maxobj) {		slab_err(s, page, "objects %u > max %u",			s->name, page->objects, maxobj);		return 0;	}	if (page->inuse > page->objects) {		slab_err(s, page, "inuse %u > max %u",			s->name, page->inuse, page->objects);		return 0;	}	/* Slab_pad_check fixes things up after itself */	slab_pad_check(s, page);	return 1;}/* * Determine if a certain object on a page is on the freelist. Must hold the * slab lock to guarantee that the chains are in a consistent state. */static int on_freelist(struct kmem_cache *s, struct page *page, void *search){	int nr = 0;	void *fp = page->freelist;	void *object = NULL;	unsigned long max_objects;	while (fp && nr <= page->objects) {		if (fp == search)			return 1;		if (!check_valid_pointer(s, page, fp)) {			if (object) {				object_err(s, page, object,					"Freechain corrupt");				set_freepointer(s, object, NULL);				break;			} else {				slab_err(s, page, "Freepointer corrupt");				page->freelist = NULL;				page->inuse = page->objects;				slab_fix(s, "Freelist cleared");				return 0;			}			break;		}		object = fp;		fp = get_freepointer(s, object);		nr++;	}	max_objects = (PAGE_SIZE << compound_order(page)) / s->size;	if (max_objects > MAX_OBJS_PER_PAGE)		max_objects = MAX_OBJS_PER_PAGE;	if (page->objects != max_objects) {		slab_err(s, page, "Wrong number of objects. Found %d but "			"should be %d", page->objects, max_objects);		page->objects = max_objects;		slab_fix(s, "Number of objects adjusted.");	}	if (page->inuse != page->objects - nr) {		slab_err(s, page, "Wrong object count. Counter is %d but "			"counted were %d", page->inuse, page->objects - nr);		page->inuse = page->objects - nr;		slab_fix(s, "Object count adjusted.");	}	return search == NULL;}static void trace(struct kmem_cache *s, struct page *page, void *object,								int alloc){	if (s->flags & SLAB_TRACE) {		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",			s->name,			alloc ? "alloc" : "free",			object, page->inuse,			page->freelist);		if (!alloc)			print_section("Object", (void *)object, s->objsize);		dump_stack();	}}/* * Tracking of fully allocated slabs for debugging purposes. */static void add_full(struct kmem_cache_node *n, struct page *page){	spin_lock(&n->list_lock);	list_add(&page->lru, &n->full);	spin_unlock(&n->list_lock);}static void remove_full(struct kmem_cache *s, struct page *page){	struct kmem_cache_node *n;	if (!(s->flags & SLAB_STORE_USER))		return;	n = get_node(s, page_to_nid(page));	spin_lock(&n->list_lock);	list_del(&page->lru);	spin_unlock(&n->list_lock);}/* Tracking of the number of slabs for debugging purposes */static inline unsigned long slabs_node(struct kmem_cache *s, int node){	struct kmem_cache_node *n = get_node(s, node);	return atomic_long_read(&n->nr_slabs);}static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects){	struct kmem_cache_node *n = get_node(s, node);	/*	 * May be called early in order to allocate a slab for the	 * kmem_cache_node structure. Solve the chicken-egg	 * dilemma by deferring the increment of the count during	 * bootstrap (see early_kmem_cache_node_alloc).	 */	if (!NUMA_BUILD || n) {		atomic_long_inc(&n->nr_slabs);		atomic_long_add(objects, &n->total_objects);	}}static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects){	struct kmem_cache_node *n = get_node(s, node);	atomic_long_dec(&n->nr_slabs);	atomic_long_sub(objects, &n->total_objects);}/* Object debug checks for alloc/free paths */static void setup_object_debug(struct kmem_cache *s, struct page *page,								void *object){	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))		return;	init_object(s, object, 0);	init_tracking(s, object);}static int alloc_debug_processing(struct kmem_cache *s, struct page *page,					void *object, unsigned long addr){	if (!check_slab(s, page))		goto bad;	if (!on_freelist(s, page, object)) {		object_err(s, page, object, "Object already allocated");		goto bad;	}	if (!check_valid_pointer(s, page, object)) {		object_err(s, page, object, "Freelist Pointer check fails");		goto bad;	}	if (!check_object(s, page, object, 0))		goto bad;	/* Success perform special debug activities for allocs */	if (s->flags & SLAB_STORE_USER)		set_track(s, object, TRACK_ALLOC, addr);	trace(s, page, object, 1);	init_object(s, object, 1);	return 1;bad:	if (PageSlab(page)) {		/*		 * If this is a slab page then lets do the best we can		 * to avoid issues in the future. Marking all objects		 * as used avoids touching the remaining objects.		 */		slab_fix(s, "Marking all objects used");		page->inuse = page->objects;		page->freelist = NULL;	}	return 0;}static int free_debug_processing(struct kmem_cache *s, struct page *page,					void *object, unsigned long addr){	if (!check_slab(s, page))		goto fail;	if (!check_valid_pointer(s, page, object)) {		slab_err(s, page, "Invalid object pointer 0x%p", object);		goto fail;	}	if (on_freelist(s, page, object)) {		object_err(s, page, object, "Object already free");		goto fail;	}	if (!check_object(s, page, object, 1))		return 0;	if (unlikely(s != page->slab)) {		if (!PageSlab(page)) {			slab_err(s, page, "Attempt to free object(0x%p) "				"outside of slab", object);		} else if (!page->slab) {			printk(KERN_ERR				"SLUB <none>: no slab for object 0x%p.\n",						object);			dump_stack();		} else			object_err(s, page, object,					"page slab pointer corrupt.");		goto fail;	}	/* Special debug activities for freeing objects */	if (!PageSlubFrozen(page) && !page->freelist)		remove_full(s, page);	if (s->flags & SLAB_STORE_USER)		set_track(s, object, TRACK_FREE, addr);	trace(s, page, object, 0);	init_object(s, object, 0);	return 1;fail:	slab_fix(s, "Object at 0x%p not freed", object);	return 0;}static int __init setup_slub_debug(char *str){	slub_debug = DEBUG_DEFAULT_FLAGS;	if (*str++ != '=' || !*str)		/*		 * No options specified. Switch on full debugging.		 */		goto out;	if (*str == ',')		/*		 * No options but restriction on slabs. This means full		 * debugging for slabs matching a pattern.		 */		goto check_slabs;	slub_debug = 0;	if (*str == '-')		/*		 * Switch off all debugging measures.		 */		goto out;	/*	 * Determine which debug features should be switched on	 */	for (; *str && *str != ','; str++) {		switch (tolower(*str)) {		case 'f':			slub_debug |= SLAB_DEBUG_FREE;			break;		case 'z':			slub_debug |= SLAB_RED_ZONE;			break;		case 'p':			slub_debug |= SLAB_POISON;			break;		case 'u':			slub_debug |= SLAB_STORE_USER;			break;		case 't':			slub_debug |= SLAB_TRACE;			break;		default:			printk(KERN_ERR "slub_debug option '%c' "				"unknown. skipped\n", *str);		}	}check_slabs:	if (*str == ',')		slub_debug_slabs = str + 1;out:	return 1;}__setup("slub_debug", setup_slub_debug);static unsigned long kmem_cache_flags(unsigned long objsize,	unsigned long flags, const char *name,	void (*ctor)(void *)){	/*	 * Enable debugging if selected on the kernel commandline.	 */	if (slub_debug && (!slub_debug_slabs ||	    strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))			flags |= slub_debug;	return flags;}#elsestatic inline void setup_object_debug(struct kmem_cache *s,			struct page *page, void *object) {}static inline int alloc_debug_processing(struct kmem_cache *s,	struct page *page, void *object, unsigned long addr) { return 0; }static inline int free_debug_processing(struct kmem_cache *s,	struct page *page, void *object, unsigned long addr) { return 0; }static inline int slab_pad_check(struct kmem_cache *s, struct page *page)			{ return 1; }static inline int check_object(struct kmem_cache *s, struct page *page,			void *object, int active) { return 1; }static inline void add_full(struct kmem_cache_node *n, struct page *page) {}static inline unsigned long kmem_cache_flags(unsigned long objsize,	unsigned long flags, const char *name,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -