⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 hugetlb.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		if (err < 0)			return err;		return 0;	}}static void vma_commit_reservation(struct hstate *h,			struct vm_area_struct *vma, unsigned long addr){	struct address_space *mapping = vma->vm_file->f_mapping;	struct inode *inode = mapping->host;	if (vma->vm_flags & VM_SHARED) {		pgoff_t idx = vma_hugecache_offset(h, vma, addr);		region_add(&inode->i_mapping->private_list, idx, idx + 1);	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {		pgoff_t idx = vma_hugecache_offset(h, vma, addr);		struct resv_map *reservations = vma_resv_map(vma);		/* Mark this page used in the map. */		region_add(&reservations->regions, idx, idx + 1);	}}static struct page *alloc_huge_page(struct vm_area_struct *vma,				    unsigned long addr, int avoid_reserve){	struct hstate *h = hstate_vma(vma);	struct page *page;	struct address_space *mapping = vma->vm_file->f_mapping;	struct inode *inode = mapping->host;	unsigned int chg;	/*	 * Processes that did not create the mapping will have no reserves and	 * will not have accounted against quota. Check that the quota can be	 * made before satisfying the allocation	 * MAP_NORESERVE mappings may also need pages and quota allocated	 * if no reserve mapping overlaps.	 */	chg = vma_needs_reservation(h, vma, addr);	if (chg < 0)		return ERR_PTR(chg);	if (chg)		if (hugetlb_get_quota(inode->i_mapping, chg))			return ERR_PTR(-ENOSPC);	spin_lock(&hugetlb_lock);	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);	spin_unlock(&hugetlb_lock);	if (!page) {		page = alloc_buddy_huge_page(h, vma, addr);		if (!page) {			hugetlb_put_quota(inode->i_mapping, chg);			return ERR_PTR(-VM_FAULT_OOM);		}	}	set_page_refcounted(page);	set_page_private(page, (unsigned long) mapping);	vma_commit_reservation(h, vma, addr);	return page;}int __weak alloc_bootmem_huge_page(struct hstate *h){	struct huge_bootmem_page *m;	int nr_nodes = nodes_weight(node_online_map);	while (nr_nodes) {		void *addr;		addr = __alloc_bootmem_node_nopanic(				NODE_DATA(h->hugetlb_next_nid),				huge_page_size(h), huge_page_size(h), 0);		if (addr) {			/*			 * Use the beginning of the huge page to store the			 * huge_bootmem_page struct (until gather_bootmem			 * puts them into the mem_map).			 */			m = addr;			goto found;		}		hstate_next_node(h);		nr_nodes--;	}	return 0;found:	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));	/* Put them into a private list first because mem_map is not up yet */	list_add(&m->list, &huge_boot_pages);	m->hstate = h;	return 1;}static void prep_compound_huge_page(struct page *page, int order){	if (unlikely(order > (MAX_ORDER - 1)))		prep_compound_gigantic_page(page, order);	else		prep_compound_page(page, order);}/* Put bootmem huge pages into the standard lists after mem_map is up */static void __init gather_bootmem_prealloc(void){	struct huge_bootmem_page *m;	list_for_each_entry(m, &huge_boot_pages, list) {		struct page *page = virt_to_page(m);		struct hstate *h = m->hstate;		__ClearPageReserved(page);		WARN_ON(page_count(page) != 1);		prep_compound_huge_page(page, h->order);		prep_new_huge_page(h, page, page_to_nid(page));	}}static void __init hugetlb_hstate_alloc_pages(struct hstate *h){	unsigned long i;	for (i = 0; i < h->max_huge_pages; ++i) {		if (h->order >= MAX_ORDER) {			if (!alloc_bootmem_huge_page(h))				break;		} else if (!alloc_fresh_huge_page(h))			break;	}	h->max_huge_pages = i;}static void __init hugetlb_init_hstates(void){	struct hstate *h;	for_each_hstate(h) {		/* oversize hugepages were init'ed in early boot */		if (h->order < MAX_ORDER)			hugetlb_hstate_alloc_pages(h);	}}static char * __init memfmt(char *buf, unsigned long n){	if (n >= (1UL << 30))		sprintf(buf, "%lu GB", n >> 30);	else if (n >= (1UL << 20))		sprintf(buf, "%lu MB", n >> 20);	else		sprintf(buf, "%lu KB", n >> 10);	return buf;}static void __init report_hugepages(void){	struct hstate *h;	for_each_hstate(h) {		char buf[32];		printk(KERN_INFO "HugeTLB registered %s page size, "				 "pre-allocated %ld pages\n",			memfmt(buf, huge_page_size(h)),			h->free_huge_pages);	}}#ifdef CONFIG_HIGHMEMstatic void try_to_free_low(struct hstate *h, unsigned long count){	int i;	if (h->order >= MAX_ORDER)		return;	for (i = 0; i < MAX_NUMNODES; ++i) {		struct page *page, *next;		struct list_head *freel = &h->hugepage_freelists[i];		list_for_each_entry_safe(page, next, freel, lru) {			if (count >= h->nr_huge_pages)				return;			if (PageHighMem(page))				continue;			list_del(&page->lru);			update_and_free_page(h, page);			h->free_huge_pages--;			h->free_huge_pages_node[page_to_nid(page)]--;		}	}}#elsestatic inline void try_to_free_low(struct hstate *h, unsigned long count){}#endif#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count){	unsigned long min_count, ret;	if (h->order >= MAX_ORDER)		return h->max_huge_pages;	/*	 * Increase the pool size	 * First take pages out of surplus state.  Then make up the	 * remaining difference by allocating fresh huge pages.	 *	 * We might race with alloc_buddy_huge_page() here and be unable	 * to convert a surplus huge page to a normal huge page. That is	 * not critical, though, it just means the overall size of the	 * pool might be one hugepage larger than it needs to be, but	 * within all the constraints specified by the sysctls.	 */	spin_lock(&hugetlb_lock);	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {		if (!adjust_pool_surplus(h, -1))			break;	}	while (count > persistent_huge_pages(h)) {		/*		 * If this allocation races such that we no longer need the		 * page, free_huge_page will handle it by freeing the page		 * and reducing the surplus.		 */		spin_unlock(&hugetlb_lock);		ret = alloc_fresh_huge_page(h);		spin_lock(&hugetlb_lock);		if (!ret)			goto out;	}	/*	 * Decrease the pool size	 * First return free pages to the buddy allocator (being careful	 * to keep enough around to satisfy reservations).  Then place	 * pages into surplus state as needed so the pool will shrink	 * to the desired size as pages become free.	 *	 * By placing pages into the surplus state independent of the	 * overcommit value, we are allowing the surplus pool size to	 * exceed overcommit. There are few sane options here. Since	 * alloc_buddy_huge_page() is checking the global counter,	 * though, we'll note that we're not allowed to exceed surplus	 * and won't grow the pool anywhere else. Not until one of the	 * sysctls are changed, or the surplus pages go out of use.	 */	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;	min_count = max(count, min_count);	try_to_free_low(h, min_count);	while (min_count < persistent_huge_pages(h)) {		struct page *page = dequeue_huge_page(h);		if (!page)			break;		update_and_free_page(h, page);	}	while (count < persistent_huge_pages(h)) {		if (!adjust_pool_surplus(h, 1))			break;	}out:	ret = persistent_huge_pages(h);	spin_unlock(&hugetlb_lock);	return ret;}#define HSTATE_ATTR_RO(_name) \	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)#define HSTATE_ATTR(_name) \	static struct kobj_attribute _name##_attr = \		__ATTR(_name, 0644, _name##_show, _name##_store)static struct kobject *hugepages_kobj;static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];static struct hstate *kobj_to_hstate(struct kobject *kobj){	int i;	for (i = 0; i < HUGE_MAX_HSTATE; i++)		if (hstate_kobjs[i] == kobj)			return &hstates[i];	BUG();	return NULL;}static ssize_t nr_hugepages_show(struct kobject *kobj,					struct kobj_attribute *attr, char *buf){	struct hstate *h = kobj_to_hstate(kobj);	return sprintf(buf, "%lu\n", h->nr_huge_pages);}static ssize_t nr_hugepages_store(struct kobject *kobj,		struct kobj_attribute *attr, const char *buf, size_t count){	int err;	unsigned long input;	struct hstate *h = kobj_to_hstate(kobj);	err = strict_strtoul(buf, 10, &input);	if (err)		return 0;	h->max_huge_pages = set_max_huge_pages(h, input);	return count;}HSTATE_ATTR(nr_hugepages);static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,					struct kobj_attribute *attr, char *buf){	struct hstate *h = kobj_to_hstate(kobj);	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);}static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,		struct kobj_attribute *attr, const char *buf, size_t count){	int err;	unsigned long input;	struct hstate *h = kobj_to_hstate(kobj);	err = strict_strtoul(buf, 10, &input);	if (err)		return 0;	spin_lock(&hugetlb_lock);	h->nr_overcommit_huge_pages = input;	spin_unlock(&hugetlb_lock);	return count;}HSTATE_ATTR(nr_overcommit_hugepages);static ssize_t free_hugepages_show(struct kobject *kobj,					struct kobj_attribute *attr, char *buf){	struct hstate *h = kobj_to_hstate(kobj);	return sprintf(buf, "%lu\n", h->free_huge_pages);}HSTATE_ATTR_RO(free_hugepages);static ssize_t resv_hugepages_show(struct kobject *kobj,					struct kobj_attribute *attr, char *buf){	struct hstate *h = kobj_to_hstate(kobj);	return sprintf(buf, "%lu\n", h->resv_huge_pages);}HSTATE_ATTR_RO(resv_hugepages);static ssize_t surplus_hugepages_show(struct kobject *kobj,					struct kobj_attribute *attr, char *buf){	struct hstate *h = kobj_to_hstate(kobj);	return sprintf(buf, "%lu\n", h->surplus_huge_pages);}HSTATE_ATTR_RO(surplus_hugepages);static struct attribute *hstate_attrs[] = {	&nr_hugepages_attr.attr,	&nr_overcommit_hugepages_attr.attr,	&free_hugepages_attr.attr,	&resv_hugepages_attr.attr,	&surplus_hugepages_attr.attr,	NULL,};static struct attribute_group hstate_attr_group = {	.attrs = hstate_attrs,};static int __init hugetlb_sysfs_add_hstate(struct hstate *h){	int retval;	hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,							hugepages_kobj);	if (!hstate_kobjs[h - hstates])		return -ENOMEM;	retval = sysfs_create_group(hstate_kobjs[h - hstates],							&hstate_attr_group);	if (retval)		kobject_put(hstate_kobjs[h - hstates]);	return retval;}static void __init hugetlb_sysfs_init(void){	struct hstate *h;	int err;	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);	if (!hugepages_kobj)		return;	for_each_hstate(h) {		err = hugetlb_sysfs_add_hstate(h);		if (err)			printk(KERN_ERR "Hugetlb: Unable to add hstate %s",								h->name);	}}static void __exit hugetlb_exit(void){	struct hstate *h;	for_each_hstate(h) {		kobject_put(hstate_kobjs[h - hstates]);	}	kobject_put(hugepages_kobj);}module_exit(hugetlb_exit);static int __init hugetlb_init(void){	/* Some platform decide whether they support huge pages at boot	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when	 * there is no such support	 */	if (HPAGE_SHIFT == 0)		return 0;	if (!size_to_hstate(default_hstate_size)) {		default_hstate_size = HPAGE_SIZE;		if (!size_to_hstate(default_hstate_size))			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);	}	default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;	if (default_hstate_max_huge_pages)		default_hstate.max_huge_pages = default_hstate_max_huge_pages;	hugetlb_init_hstates();	gather_bootmem_prealloc();	report_hugepages();	hugetlb_sysfs_init();	return 0;}module_init(hugetlb_init);/* Should be called on processing a hugepagesz=... option */void __init hugetlb_add_hstate(unsigned order){	struct hstate *h;	unsigned long i;	if (size_to_hstate(PAGE_SIZE << order)) {		printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");		return;	}	BUG_ON(max_hstate >= HUGE_MAX_HSTATE);	BUG_ON(order == 0);	h = &hstates[max_hstate++];	h->order = order;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -