⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vmalloc.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	}	kfree(area);	return;}/** *	vfree  -  release memory allocated by vmalloc() *	@addr:		memory base address * *	Free the virtually continuous memory area starting at @addr, as *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is *	NULL, no operation is performed. * *	Must not be called in interrupt context. */void vfree(const void *addr){	BUG_ON(in_interrupt());	__vunmap(addr, 1);}EXPORT_SYMBOL(vfree);/** *	vunmap  -  release virtual mapping obtained by vmap() *	@addr:		memory base address * *	Free the virtually contiguous memory area starting at @addr, *	which was created from the page array passed to vmap(). * *	Must not be called in interrupt context. */void vunmap(const void *addr){	BUG_ON(in_interrupt());	__vunmap(addr, 0);}EXPORT_SYMBOL(vunmap);/** *	vmap  -  map an array of pages into virtually contiguous space *	@pages:		array of page pointers *	@count:		number of pages to map *	@flags:		vm_area->flags *	@prot:		page protection for the mapping * *	Maps @count pages from @pages into contiguous kernel virtual *	space. */void *vmap(struct page **pages, unsigned int count,		unsigned long flags, pgprot_t prot){	struct vm_struct *area;	if (count > num_physpages)		return NULL;	area = get_vm_area_caller((count << PAGE_SHIFT), flags,					__builtin_return_address(0));	if (!area)		return NULL;	if (map_vm_area(area, prot, &pages)) {		vunmap(area->addr);		return NULL;	}	return area->addr;}EXPORT_SYMBOL(vmap);static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,			    int node, void *caller);static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,				 pgprot_t prot, int node, void *caller){	struct page **pages;	unsigned int nr_pages, array_size, i;	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;	array_size = (nr_pages * sizeof(struct page *));	area->nr_pages = nr_pages;	/* Please note that the recursion is strictly bounded. */	if (array_size > PAGE_SIZE) {		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,				PAGE_KERNEL, node, caller);		area->flags |= VM_VPAGES;	} else {		pages = kmalloc_node(array_size,				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,				node);	}	area->pages = pages;	area->caller = caller;	if (!area->pages) {		remove_vm_area(area->addr);		kfree(area);		return NULL;	}	for (i = 0; i < area->nr_pages; i++) {		struct page *page;		if (node < 0)			page = alloc_page(gfp_mask);		else			page = alloc_pages_node(node, gfp_mask, 0);		if (unlikely(!page)) {			/* Successfully allocated i pages, free them in __vunmap() */			area->nr_pages = i;			goto fail;		}		area->pages[i] = page;	}	if (map_vm_area(area, prot, &pages))		goto fail;	return area->addr;fail:	vfree(area->addr);	return NULL;}void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot){	return __vmalloc_area_node(area, gfp_mask, prot, -1,					__builtin_return_address(0));}/** *	__vmalloc_node  -  allocate virtually contiguous memory *	@size:		allocation size *	@gfp_mask:	flags for the page level allocator *	@prot:		protection mask for the allocated pages *	@node:		node to use for allocation or -1 *	@caller:	caller's return address * *	Allocate enough pages to cover @size from the page level *	allocator with @gfp_mask flags.  Map them into contiguous *	kernel virtual space, using a pagetable protection of @prot. */static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,						int node, void *caller){	struct vm_struct *area;	size = PAGE_ALIGN(size);	if (!size || (size >> PAGE_SHIFT) > num_physpages)		return NULL;	area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,						node, gfp_mask, caller);	if (!area)		return NULL;	return __vmalloc_area_node(area, gfp_mask, prot, node, caller);}void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot){	return __vmalloc_node(size, gfp_mask, prot, -1,				__builtin_return_address(0));}EXPORT_SYMBOL(__vmalloc);/** *	vmalloc  -  allocate virtually contiguous memory *	@size:		allocation size *	Allocate enough pages to cover @size from the page level *	allocator and map them into contiguous kernel virtual space. * *	For tight control over page level allocator and protection flags *	use __vmalloc() instead. */void *vmalloc(unsigned long size){	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,					-1, __builtin_return_address(0));}EXPORT_SYMBOL(vmalloc);/** * vmalloc_user - allocate zeroed virtually contiguous memory for userspace * @size: allocation size * * The resulting memory area is zeroed so it can be mapped to userspace * without leaking data. */void *vmalloc_user(unsigned long size){	struct vm_struct *area;	void *ret;	ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,			     PAGE_KERNEL, -1, __builtin_return_address(0));	if (ret) {		area = find_vm_area(ret);		area->flags |= VM_USERMAP;	}	return ret;}EXPORT_SYMBOL(vmalloc_user);/** *	vmalloc_node  -  allocate memory on a specific node *	@size:		allocation size *	@node:		numa node * *	Allocate enough pages to cover @size from the page level *	allocator and map them into contiguous kernel virtual space. * *	For tight control over page level allocator and protection flags *	use __vmalloc() instead. */void *vmalloc_node(unsigned long size, int node){	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,					node, __builtin_return_address(0));}EXPORT_SYMBOL(vmalloc_node);#ifndef PAGE_KERNEL_EXEC# define PAGE_KERNEL_EXEC PAGE_KERNEL#endif/** *	vmalloc_exec  -  allocate virtually contiguous, executable memory *	@size:		allocation size * *	Kernel-internal function to allocate enough pages to cover @size *	the page level allocator and map them into contiguous and *	executable kernel virtual space. * *	For tight control over page level allocator and protection flags *	use __vmalloc() instead. */void *vmalloc_exec(unsigned long size){	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,			      -1, __builtin_return_address(0));}#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL#else#define GFP_VMALLOC32 GFP_KERNEL#endif/** *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable) *	@size:		allocation size * *	Allocate enough 32bit PA addressable pages to cover @size from the *	page level allocator and map them into contiguous kernel virtual space. */void *vmalloc_32(unsigned long size){	return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,			      -1, __builtin_return_address(0));}EXPORT_SYMBOL(vmalloc_32);/** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory *	@size:		allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. */void *vmalloc_32_user(unsigned long size){	struct vm_struct *area;	void *ret;	ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,			     -1, __builtin_return_address(0));	if (ret) {		area = find_vm_area(ret);		area->flags |= VM_USERMAP;	}	return ret;}EXPORT_SYMBOL(vmalloc_32_user);long vread(char *buf, char *addr, unsigned long count){	struct vm_struct *tmp;	char *vaddr, *buf_start = buf;	unsigned long n;	/* Don't allow overflow */	if ((unsigned long) addr + count < count)		count = -(unsigned long) addr;	read_lock(&vmlist_lock);	for (tmp = vmlist; tmp; tmp = tmp->next) {		vaddr = (char *) tmp->addr;		if (addr >= vaddr + tmp->size - PAGE_SIZE)			continue;		while (addr < vaddr) {			if (count == 0)				goto finished;			*buf = '\0';			buf++;			addr++;			count--;		}		n = vaddr + tmp->size - PAGE_SIZE - addr;		do {			if (count == 0)				goto finished;			*buf = *addr;			buf++;			addr++;			count--;		} while (--n > 0);	}finished:	read_unlock(&vmlist_lock);	return buf - buf_start;}long vwrite(char *buf, char *addr, unsigned long count){	struct vm_struct *tmp;	char *vaddr, *buf_start = buf;	unsigned long n;	/* Don't allow overflow */	if ((unsigned long) addr + count < count)		count = -(unsigned long) addr;	read_lock(&vmlist_lock);	for (tmp = vmlist; tmp; tmp = tmp->next) {		vaddr = (char *) tmp->addr;		if (addr >= vaddr + tmp->size - PAGE_SIZE)			continue;		while (addr < vaddr) {			if (count == 0)				goto finished;			buf++;			addr++;			count--;		}		n = vaddr + tmp->size - PAGE_SIZE - addr;		do {			if (count == 0)				goto finished;			*addr = *buf;			buf++;			addr++;			count--;		} while (--n > 0);	}finished:	read_unlock(&vmlist_lock);	return buf - buf_start;}/** *	remap_vmalloc_range  -  map vmalloc pages to userspace *	@vma:		vma to cover (map full range of vma) *	@addr:		vmalloc memory *	@pgoff:		number of pages into addr before first page to map * *	Returns:	0 for success, -Exxx on failure * *	This function checks that addr is a valid vmalloc'ed area, and *	that it is big enough to cover the vma. Will return failure if *	that criteria isn't met. * *	Similar to remap_pfn_range() (see mm/memory.c) */int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,						unsigned long pgoff){	struct vm_struct *area;	unsigned long uaddr = vma->vm_start;	unsigned long usize = vma->vm_end - vma->vm_start;	if ((PAGE_SIZE-1) & (unsigned long)addr)		return -EINVAL;	area = find_vm_area(addr);	if (!area)		return -EINVAL;	if (!(area->flags & VM_USERMAP))		return -EINVAL;	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)		return -EINVAL;	addr += pgoff << PAGE_SHIFT;	do {		struct page *page = vmalloc_to_page(addr);		int ret;		ret = vm_insert_page(vma, uaddr, page);		if (ret)			return ret;		uaddr += PAGE_SIZE;		addr += PAGE_SIZE;		usize -= PAGE_SIZE;	} while (usize > 0);	/* Prevent "things" like memory migration? VM_flags need a cleanup... */	vma->vm_flags |= VM_RESERVED;	return 0;}EXPORT_SYMBOL(remap_vmalloc_range);/* * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. */void  __attribute__((weak)) vmalloc_sync_all(void){}static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data){	/* apply_to_page_range() does all the hard work. */	return 0;}/** *	alloc_vm_area - allocate a range of kernel address space *	@size:		size of the area * *	Returns:	NULL on failure, vm_struct on success * *	This function reserves a range of kernel address space, and *	allocates pagetables to map that range.  No actual mappings *	are created.  If the kernel address space is not shared *	between processes, it syncs the pagetable across all *	processes. */struct vm_struct *alloc_vm_area(size_t size){	struct vm_struct *area;	area = get_vm_area_caller(size, VM_IOREMAP,				__builtin_return_address(0));	if (area == NULL)		return NULL;	/*	 * This ensures that page tables are constructed for this region	 * of kernel virtual address space and mapped into init_mm.	 */	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,				area->size, f, NULL)) {		free_vm_area(area);		return NULL;	}	/* Make sure the pagetables are constructed in process kernel	   mappings */	vmalloc_sync_all();	return area;}EXPORT_SYMBOL_GPL(alloc_vm_area);void free_vm_area(struct vm_struct *area){	struct vm_struct *ret;	ret = remove_vm_area(area->addr);	BUG_ON(ret != area);	kfree(area);}EXPORT_SYMBOL_GPL(free_vm_area);#ifdef CONFIG_PROC_FSstatic void *s_start(struct seq_file *m, loff_t *pos){	loff_t n = *pos;	struct vm_struct *v;	read_lock(&vmlist_lock);	v = vmlist;	while (n > 0 && v) {		n--;		v = v->next;	}	if (!n)		return v;	return NULL;}static void *s_next(struct seq_file *m, void *p, loff_t *pos){	struct vm_struct *v = p;	++*pos;	return v->next;}static void s_stop(struct seq_file *m, void *p){	read_unlock(&vmlist_lock);}static void show_numa_info(struct seq_file *m, struct vm_struct *v){	if (NUMA_BUILD) {		unsigned int nr, *counters = m->private;		if (!counters)			return;		memset(counters, 0, nr_node_ids * sizeof(unsigned int));		for (nr = 0; nr < v->nr_pages; nr++)			counters[page_to_nid(v->pages[nr])]++;		for_each_node_state(nr, N_HIGH_MEMORY)			if (counters[nr])				seq_printf(m, " N%u=%u", nr, counters[nr]);	}}static int s_show(struct seq_file *m, void *p){	struct vm_struct *v = p;	seq_printf(m, "0x%p-0x%p %7ld",		v->addr, v->addr + v->size, v->size);	if (v->caller) {		char buff[KSYM_SYMBOL_LEN];		seq_putc(m, ' ');		sprint_symbol(buff, (unsigned long)v->caller);		seq_puts(m, buff);	}	if (v->nr_pages)		seq_printf(m, " pages=%d", v->nr_pages);	if (v->phys_addr)		seq_printf(m, " phys=%lx", v->phys_addr);	if (v->flags & VM_IOREMAP)		seq_printf(m, " ioremap");	if (v->flags & VM_ALLOC)		seq_printf(m, " vmalloc");	if (v->flags & VM_MAP)		seq_printf(m, " vmap");	if (v->flags & VM_USERMAP)		seq_printf(m, " user");	if (v->flags & VM_VPAGES)		seq_printf(m, " vpages");	show_numa_info(m, v);	seq_putc(m, '\n');	return 0;}static const struct seq_operations vmalloc_op = {	.start = s_start,	.next = s_next,	.stop = s_stop,	.show = s_show,};static int vmalloc_open(struct inode *inode, struct file *file){	unsigned int *ptr = NULL;	int ret;	if (NUMA_BUILD)		ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);	ret = seq_open(file, &vmalloc_op);	if (!ret) {		struct seq_file *m = file->private_data;		m->private = ptr;	} else		kfree(ptr);	return ret;}static const struct file_operations proc_vmalloc_operations = {	.open		= vmalloc_open,	.read		= seq_read,	.llseek		= seq_lseek,	.release	= seq_release_private,};static int __init proc_vmalloc_init(void){	proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);	return 0;}module_init(proc_vmalloc_init);#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -