hugetlbpage.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 865 行 · 第 1/2 页
C
865 行
/* * PPC64 (POWER4) Huge TLB Page Support for Kernel. * * Copyright (C) 2003 David Gibson, IBM Corporation. * * Based on the IA-32 version: * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> */#include <linux/init.h>#include <linux/fs.h>#include <linux/mm.h>#include <linux/hugetlb.h>#include <linux/pagemap.h>#include <linux/smp_lock.h>#include <linux/slab.h>#include <linux/err.h>#include <linux/sysctl.h>#include <asm/mman.h>#include <asm/pgalloc.h>#include <asm/tlb.h>#include <asm/tlbflush.h>#include <asm/mmu_context.h>#include <asm/machdep.h>#include <asm/cputable.h>#include <asm/tlb.h>#include <linux/sysctl.h>/* HugePTE layout: * * 31 30 ... 15 14 13 12 10 9 8 7 6 5 4 3 2 1 0 * PFN>>12..... - - - - - - HASH_IX.... 2ND HASH RW - HG=1 */#define HUGEPTE_SHIFT 15#define _HUGEPAGE_PFN 0xffff8000#define _HUGEPAGE_BAD 0x00007f00#define _HUGEPAGE_HASHPTE 0x00000008#define _HUGEPAGE_SECONDARY 0x00000010#define _HUGEPAGE_GROUP_IX 0x000000e0#define _HUGEPAGE_HPTEFLAGS (_HUGEPAGE_HASHPTE | _HUGEPAGE_SECONDARY | \ _HUGEPAGE_GROUP_IX)#define _HUGEPAGE_RW 0x00000004typedef struct {unsigned int val;} hugepte_t;#define hugepte_val(hugepte) ((hugepte).val)#define __hugepte(x) ((hugepte_t) { (x) } )#define hugepte_pfn(x) \ ((unsigned long)(hugepte_val(x)>>HUGEPTE_SHIFT) << HUGETLB_PAGE_ORDER)#define mk_hugepte(page,wr) __hugepte( \ ((page_to_pfn(page)>>HUGETLB_PAGE_ORDER) << HUGEPTE_SHIFT ) \ | (!!(wr) * _HUGEPAGE_RW) | _PMD_HUGEPAGE )#define hugepte_bad(x) ( !(hugepte_val(x) & _PMD_HUGEPAGE) || \ (hugepte_val(x) & _HUGEPAGE_BAD) )#define hugepte_page(x) pfn_to_page(hugepte_pfn(x))#define hugepte_none(x) (!(hugepte_val(x) & _HUGEPAGE_PFN))static void flush_hash_hugepage(mm_context_t context, unsigned long ea, hugepte_t pte, int local);static inline unsigned int hugepte_update(hugepte_t *p, unsigned int clr, unsigned int set){ unsigned int old, tmp; __asm__ __volatile__( "1: lwarx %0,0,%3 # pte_update\n\ andc %1,%0,%4 \n\ or %1,%1,%5 \n\ stwcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*p) : "r" (p), "r" (clr), "r" (set), "m" (*p) : "cc" ); return old;}static inline void set_hugepte(hugepte_t *ptep, hugepte_t pte){ hugepte_update(ptep, ~_HUGEPAGE_HPTEFLAGS, hugepte_val(pte) & ~_HUGEPAGE_HPTEFLAGS);}static hugepte_t *hugepte_alloc(struct mm_struct *mm, unsigned long addr){ pgd_t *pgd; pmd_t *pmd = NULL; BUG_ON(!in_hugepage_area(mm->context, addr)); pgd = pgd_offset(mm, addr); pmd = pmd_alloc(mm, pgd, addr); /* We shouldn't find a (normal) PTE page pointer here */ BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd)); return (hugepte_t *)pmd;}static hugepte_t *hugepte_offset(struct mm_struct *mm, unsigned long addr){ pgd_t *pgd; pmd_t *pmd = NULL; BUG_ON(!in_hugepage_area(mm->context, addr)); pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) return NULL; pmd = pmd_offset(pgd, addr); /* We shouldn't find a (normal) PTE page pointer here */ BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd)); return (hugepte_t *)pmd;}static void setup_huge_pte(struct mm_struct *mm, struct page *page, hugepte_t *ptep, int write_access){ hugepte_t entry; int i; mm->rss += (HPAGE_SIZE / PAGE_SIZE); entry = mk_hugepte(page, write_access); for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) set_hugepte(ptep+i, entry);}static void teardown_huge_pte(hugepte_t *ptep){ int i; for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) pmd_clear((pmd_t *)(ptep+i));}/* * This function checks for proper alignment of input addr and len parameters. */int is_aligned_hugepage_range(unsigned long addr, unsigned long len){ if (len & ~HPAGE_MASK) return -EINVAL; if (addr & ~HPAGE_MASK) return -EINVAL; if (! (within_hugepage_low_range(addr, len) || within_hugepage_high_range(addr, len)) ) return -EINVAL; return 0;}static void flush_segments(void *parm){ u16 segs = (unsigned long) parm; unsigned long i; asm volatile("isync" : : : "memory"); for (i = 0; i < 16; i++) { if (! (segs & (1U << i))) continue; asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); } asm volatile("isync" : : : "memory");}static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg){ unsigned long start = seg << SID_SHIFT; unsigned long end = (seg+1) << SID_SHIFT; struct vm_area_struct *vma; unsigned long addr; struct mmu_gather *tlb; BUG_ON(seg >= 16); /* Check no VMAs are in the region */ vma = find_vma(mm, start); if (vma && (vma->vm_start < end)) return -EBUSY; /* Clean up any leftover PTE pages in the region */ spin_lock(&mm->page_table_lock); tlb = tlb_gather_mmu(mm, 0); for (addr = start; addr < end; addr += PMD_SIZE) { pgd_t *pgd = pgd_offset(mm, addr); pmd_t *pmd; struct page *page; pte_t *pte; int i; if (pgd_none(*pgd)) continue; pmd = pmd_offset(pgd, addr); if (!pmd || pmd_none(*pmd)) continue; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); continue; } pte = (pte_t *)pmd_page_kernel(*pmd); /* No VMAs, so there should be no PTEs, check just in case. */ for (i = 0; i < PTRS_PER_PTE; i++) { BUG_ON(!pte_none(*pte)); pte++; } page = pmd_page(*pmd); pmd_clear(pmd); dec_page_state(nr_page_table_pages); pte_free_tlb(tlb, page); } tlb_finish_mmu(tlb, start, end); spin_unlock(&mm->page_table_lock); return 0;}static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs){ unsigned long i; newsegs &= ~(mm->context.htlb_segs); if (! newsegs) return 0; /* The segments we want are already open */ for (i = 0; i < 16; i++) if ((1 << i) & newsegs) if (prepare_low_seg_for_htlb(mm, i) != 0) return -EBUSY; mm->context.htlb_segs |= newsegs; /* the context change must make it to memory before the flush, * so that further SLB misses do the right thing. */ mb(); on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1); return 0;}int prepare_hugepage_range(unsigned long addr, unsigned long len){ if (within_hugepage_high_range(addr, len)) return 0; else if ((addr < 0x100000000) && ((addr+len) < 0x100000000)) { int err; /* Yes, we need both tests, in case addr+len overflows * 64-bit arithmetic */ err = open_low_hpage_segs(current->mm, LOW_ESID_MASK(addr, len)); if (err) printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" " failed (segs: 0x%04hx)\n", addr, len, LOW_ESID_MASK(addr, len)); return err; } return -EINVAL;}int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma){ hugepte_t *src_pte, *dst_pte, entry; struct page *ptepage; unsigned long addr = vma->vm_start; unsigned long end = vma->vm_end; while (addr < end) { BUG_ON(! in_hugepage_area(src->context, addr)); BUG_ON(! in_hugepage_area(dst->context, addr)); dst_pte = hugepte_alloc(dst, addr); if (!dst_pte) return -ENOMEM; src_pte = hugepte_offset(src, addr); entry = *src_pte; if ((addr % HPAGE_SIZE) == 0) { /* This is the first hugepte in a batch */ ptepage = hugepte_page(entry); get_page(ptepage); dst->rss += (HPAGE_SIZE / PAGE_SIZE); } set_hugepte(dst_pte, entry); addr += PMD_SIZE; } return 0;}intfollow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i){ unsigned long vpfn, vaddr = *position; int remainder = *length; WARN_ON(!is_vm_hugetlb_page(vma)); vpfn = vaddr/PAGE_SIZE; while (vaddr < vma->vm_end && remainder) { BUG_ON(!in_hugepage_area(mm->context, vaddr)); if (pages) { hugepte_t *pte; struct page *page; pte = hugepte_offset(mm, vaddr); /* hugetlb should be locked, and hence, prefaulted */ WARN_ON(!pte || hugepte_none(*pte)); page = &hugepte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; WARN_ON(!PageCompound(page)); get_page(page); pages[i] = page; } if (vmas) vmas[i] = vma; vaddr += PAGE_SIZE; ++vpfn; --remainder; ++i; } *length = remainder; *position = vaddr; return i;}struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write){ return ERR_PTR(-EINVAL);}int pmd_huge(pmd_t pmd){ return pmd_hugepage(pmd);}struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write){ struct page *page; BUG_ON(! pmd_hugepage(*pmd)); page = hugepte_page(*(hugepte_t *)pmd); if (page) page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); return page;}void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end){ struct mm_struct *mm = vma->vm_mm; unsigned long addr; hugepte_t *ptep; struct page *page; int cpu; int local = 0; cpumask_t tmp; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON((start % HPAGE_SIZE) != 0); BUG_ON((end % HPAGE_SIZE) != 0); /* XXX are there races with checking cpu_vm_mask? - Anton */ cpu = get_cpu(); tmp = cpumask_of_cpu(cpu); if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) local = 1; for (addr = start; addr < end; addr += HPAGE_SIZE) { hugepte_t pte; BUG_ON(!in_hugepage_area(mm->context, addr)); ptep = hugepte_offset(mm, addr); if (!ptep || hugepte_none(*ptep)) continue; pte = *ptep; page = hugepte_page(pte); teardown_huge_pte(ptep); if (hugepte_val(pte) & _HUGEPAGE_HASHPTE) flush_hash_hugepage(mm->context, addr, pte, local); put_page(page); } put_cpu(); mm->rss -= (end - start) >> PAGE_SHIFT;}int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma){ struct mm_struct *mm = current->mm; unsigned long addr; int ret = 0; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON((vma->vm_start % HPAGE_SIZE) != 0); BUG_ON((vma->vm_end % HPAGE_SIZE) != 0); spin_lock(&mm->page_table_lock); for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { unsigned long idx; hugepte_t *pte = hugepte_alloc(mm, addr); struct page *page; BUG_ON(!in_hugepage_area(mm->context, addr));
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?