⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 slob.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * SLOB Allocator: Simple List Of Blocks * * Matt Mackall <mpm@selenic.com> 12/30/03 * * NUMA support by Paul Mundt, 2007. * * How SLOB works: * * The core of SLOB is a traditional K&R style heap allocator, with * support for returning aligned objects. The granularity of this * allocator is as little as 2 bytes, however typically most architectures * will require 4 bytes on 32-bit and 8 bytes on 64-bit. * * The slob heap is a set of linked list of pages from alloc_pages(), * and within each page, there is a singly-linked list of free blocks * (slob_t). The heap is grown on demand. To reduce fragmentation, * heap pages are segregated into three lists, with objects less than * 256 bytes, objects less than 1024 bytes, and all other objects. * * Allocation from heap involves first searching for a page with * sufficient free blocks (using a next-fit-like approach) followed by * a first-fit scan of the page. Deallocation inserts objects back * into the free list in address order, so this is effectively an * address-ordered first fit. * * Above this is an implementation of kmalloc/kfree. Blocks returned * from kmalloc are prepended with a 4-byte header with the kmalloc size. * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls * alloc_pages() directly, allocating compound pages so the page order * does not have to be separately tracked, and also stores the exact * allocation size in page->private so that it can be used to accurately * provide ksize(). These objects are detected in kfree() because slob_page() * is false for them. * * SLAB is emulated on top of SLOB by simply calling constructors and * destructors for every SLAB allocation. Objects are returned with the * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which * case the low-level allocator will fragment blocks to create the proper * alignment. Again, objects of page-size or greater are allocated by * calling alloc_pages(). As SLAB objects know their size, no separate * size bookkeeping is necessary and there is essentially no allocation * space overhead, and compound pages aren't needed for multi-page * allocations. * * NUMA support in SLOB is fairly simplistic, pushing most of the real * logic down to the page allocator, and simply doing the node accounting * on the upper levels. In the event that a node id is explicitly * provided, alloc_pages_node() with the specified node id is used * instead. The common case (or when the node id isn't explicitly provided) * will default to the current node, as per numa_node_id(). * * Node aware pages are still inserted in to the global freelist, and * these are scanned for by matching against the node id encoded in the * page flags. As a result, block allocations that can be satisfied from * the freelist will only be done so on pages residing on the same node, * in order to prevent random node placement. */#include <linux/kernel.h>#include <linux/slab.h>#include <linux/mm.h>#include <linux/cache.h>#include <linux/init.h>#include <linux/module.h>#include <linux/rcupdate.h>#include <linux/list.h>#include <asm/atomic.h>/* * slob_block has a field 'units', which indicates size of block if +ve, * or offset of next block if -ve (in SLOB_UNITs). * * Free blocks of size 1 unit simply contain the offset of the next block. * Those with larger size contain their size in the first SLOB_UNIT of * memory, and the offset of the next free block in the second SLOB_UNIT. */#if PAGE_SIZE <= (32767 * 2)typedef s16 slobidx_t;#elsetypedef s32 slobidx_t;#endifstruct slob_block {	slobidx_t units;};typedef struct slob_block slob_t;/* * We use struct page fields to manage some slob allocation aspects, * however to avoid the horrible mess in include/linux/mm_types.h, we'll * just define our own struct page type variant here. */struct slob_page {	union {		struct {			unsigned long flags;	/* mandatory */			atomic_t _count;	/* mandatory */			slobidx_t units;	/* free units left in page */			unsigned long pad[2];			slob_t *free;		/* first free slob_t in page */			struct list_head list;	/* linked list of free pages */		};		struct page page;	};};static inline void struct_slob_page_wrong_size(void){ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }/* * free_slob_page: call before a slob_page is returned to the page allocator. */static inline void free_slob_page(struct slob_page *sp){	reset_page_mapcount(&sp->page);	sp->page.mapping = NULL;}/* * All partially free slob pages go on these lists. */#define SLOB_BREAK1 256#define SLOB_BREAK2 1024static LIST_HEAD(free_slob_small);static LIST_HEAD(free_slob_medium);static LIST_HEAD(free_slob_large);/* * slob_page: True for all slob pages (false for bigblock pages) */static inline int slob_page(struct slob_page *sp){	return PageSlobPage((struct page *)sp);}static inline void set_slob_page(struct slob_page *sp){	__SetPageSlobPage((struct page *)sp);}static inline void clear_slob_page(struct slob_page *sp){	__ClearPageSlobPage((struct page *)sp);}/* * slob_page_free: true for pages on free_slob_pages list. */static inline int slob_page_free(struct slob_page *sp){	return PageSlobFree((struct page *)sp);}static void set_slob_page_free(struct slob_page *sp, struct list_head *list){	list_add(&sp->list, list);	__SetPageSlobFree((struct page *)sp);}static inline void clear_slob_page_free(struct slob_page *sp){	list_del(&sp->list);	__ClearPageSlobFree((struct page *)sp);}#define SLOB_UNIT sizeof(slob_t)#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)#define SLOB_ALIGN L1_CACHE_BYTES/* * struct slob_rcu is inserted at the tail of allocated slob blocks, which * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free * the block using call_rcu. */struct slob_rcu {	struct rcu_head head;	int size;};/* * slob_lock protects all slob allocator structures. */static DEFINE_SPINLOCK(slob_lock);/* * Encode the given size and next info into a free slob block s. */static void set_slob(slob_t *s, slobidx_t size, slob_t *next){	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);	slobidx_t offset = next - base;	if (size > 1) {		s[0].units = size;		s[1].units = offset;	} else		s[0].units = -offset;}/* * Return the size of a slob block. */static slobidx_t slob_units(slob_t *s){	if (s->units > 0)		return s->units;	return 1;}/* * Return the next free slob block pointer after this one. */static slob_t *slob_next(slob_t *s){	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);	slobidx_t next;	if (s[0].units < 0)		next = -s[0].units;	else		next = s[1].units;	return base+next;}/* * Returns true if s is the last free block in its page. */static int slob_last(slob_t *s){	return !((unsigned long)slob_next(s) & ~PAGE_MASK);}static void *slob_new_page(gfp_t gfp, int order, int node){	void *page;#ifdef CONFIG_NUMA	if (node != -1)		page = alloc_pages_node(node, gfp, order);	else#endif		page = alloc_pages(gfp, order);	if (!page)		return NULL;	return page_address(page);}/* * Allocate a slob block within a given slob_page sp. */static void *slob_page_alloc(struct slob_page *sp, size_t size, int align){	slob_t *prev, *cur, *aligned = 0;	int delta = 0, units = SLOB_UNITS(size);	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {		slobidx_t avail = slob_units(cur);		if (align) {			aligned = (slob_t *)ALIGN((unsigned long)cur, align);			delta = aligned - cur;		}		if (avail >= units + delta) { /* room enough? */			slob_t *next;			if (delta) { /* need to fragment head to align? */				next = slob_next(cur);				set_slob(aligned, avail - delta, next);				set_slob(cur, delta, aligned);				prev = cur;				cur = aligned;				avail = slob_units(cur);			}			next = slob_next(cur);			if (avail == units) { /* exact fit? unlink. */				if (prev)					set_slob(prev, slob_units(prev), next);				else					sp->free = next;			} else { /* fragment */				if (prev)					set_slob(prev, slob_units(prev), cur + units);				else					sp->free = cur + units;				set_slob(cur + units, avail - units, next);			}			sp->units -= units;			if (!sp->units)				clear_slob_page_free(sp);			return cur;		}		if (slob_last(cur))			return NULL;	}}/* * slob_alloc: entry point into the slob allocator. */static void *slob_alloc(size_t size, gfp_t gfp, int align, int node){	struct slob_page *sp;	struct list_head *prev;	struct list_head *slob_list;	slob_t *b = NULL;	unsigned long flags;	if (size < SLOB_BREAK1)		slob_list = &free_slob_small;	else if (size < SLOB_BREAK2)		slob_list = &free_slob_medium;	else		slob_list = &free_slob_large;	spin_lock_irqsave(&slob_lock, flags);	/* Iterate through each partially free page, try to find room */	list_for_each_entry(sp, slob_list, list) {#ifdef CONFIG_NUMA		/*		 * If there's a node specification, search for a partial		 * page with a matching node id in the freelist.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -