📄 mm.h
字号:
void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long);extern int try_to_release_page(struct page * page, gfp_t gfp_mask);extern void do_invalidatepage(struct page *page, unsigned long offset);int __set_page_dirty_nobuffers(struct page *page);int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page);int FASTCALL(set_page_dirty(struct page *page));int set_page_dirty_lock(struct page *page);int clear_page_dirty_for_io(struct page *page);extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr);/* * Prototype to add a shrinker callback for ageable caches. * * These functions are passed a count `nr_to_scan' and a gfpmask. They should * scan `nr_to_scan' objects, attempting to free them. * * The callback must return the number of objects which remain in the cache. * * The callback will be passed nr_to_scan == 0 when the VM is querying the * cache size, so a fastpath for that case is appropriate. */typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);/* * Add an aging callback. The int is the number of 'seeks' it takes * to recreate one of the objects that these functions age. */#define DEFAULT_SEEKS 2struct shrinker;extern struct shrinker *set_shrinker(int, shrinker_t);extern void remove_shrinker(struct shrinker *shrinker);/* * Some shared mappigns will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot * to the private version (using protection_map[] without the * VM_SHARED bit). */static inline int vma_wants_writenotify(struct vm_area_struct *vma){ unsigned int vm_flags = vma->vm_flags; /* If it was private or non-writable, the write bit is already clear */ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) return 0; /* The backer wishes to know when pages are first written to? */ if (vma->vm_ops && vma->vm_ops->page_mkwrite) return 1; /* The open routine did something to the protections already? */ if (pgprot_val(vma->vm_page_prot) != pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)])) return 0; /* Specialty mapping? */ if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) return 0; /* Can the mapping track the dirty pages? */ return vma->vm_file && vma->vm_file->f_mapping && mapping_cap_account_dirty(vma->vm_file->f_mapping);}extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);/* * The following ifdef needed to get the 4level-fixup.h header to work. * Remove it when 4level-fixup.h has been removed. */#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address){ return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? NULL: pud_offset(pgd, address);}static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address){ return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? NULL: pmd_offset(pud, address);}#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS/* * We tuck a spinlock to guard each pagetable page into its struct page, * at page->private, with BUILD_BUG_ON to make sure that this will not * overflow into the next struct page (as it might with DEBUG_SPINLOCK). * When freeing, reset page->mapping so free_pages_check won't complain. */#define __pte_lockptr(page) &((page)->ptl)#define pte_lock_init(_page) do { \ spin_lock_init(__pte_lockptr(_page)); \} while (0)#define pte_lock_deinit(page) ((page)->mapping = NULL)#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})#else/* * We use mm->page_table_lock to guard all pagetable pages of the mm. */#define pte_lock_init(page) do {} while (0)#define pte_lock_deinit(page) do {} while (0)#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */#define pte_offset_map_lock(mm, pmd, address, ptlp) \({ \ spinlock_t *__ptl = pte_lockptr(mm, pmd); \ pte_t *__pte = pte_offset_map(pmd, address); \ *(ptlp) = __ptl; \ spin_lock(__ptl); \ __pte; \})#define pte_unmap_unlock(pte, ptl) do { \ spin_unlock(ptl); \ pte_unmap(pte); \} while (0)#define pte_alloc_map(mm, pmd, address) \ ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ NULL: pte_offset_map(pmd, address))#define pte_alloc_map_lock(mm, pmd, address, ptlp) \ ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ NULL: pte_offset_map_lock(mm, pmd, address, ptlp))#define pte_alloc_kernel(pmd, address) \ ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ NULL: pte_offset_kernel(pmd, address))extern void free_area_init(unsigned long * zones_size);extern void free_area_init_node(int nid, pg_data_t *pgdat, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size);#ifdef CONFIG_ARCH_POPULATES_NODE_MAP/* * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its * zones, allocate the backing mem_map and account for memory holes in a more * architecture independent manner. This is a substitute for creating the * zone_sizes[] and zholes_size[] arrays and passing them to * free_area_init_node() * * An architecture is expected to register range of page frames backed by * physical memory with add_active_range() before calling * free_area_init_nodes() passing in the PFN each zone ends at. At a basic * usage, an architecture is expected to do something like * * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, * max_highmem_pfn}; * for_each_valid_physical_page_range() * add_active_range(node_id, start_pfn, end_pfn) * free_area_init_nodes(max_zone_pfns); * * If the architecture guarantees that there are no holes in the ranges * registered with add_active_range(), free_bootmem_active_regions() * will call free_bootmem_node() for each registered physical page range. * Similarly sparse_memory_present_with_active_regions() calls * memory_present() for each range when SPARSEMEM is enabled. * * See mm/page_alloc.c for more information on each function exposed by * CONFIG_ARCH_POPULATES_NODE_MAP */extern void free_area_init_nodes(unsigned long *max_zone_pfn);extern void add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn);extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn, unsigned long new_end_pfn);extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn);extern void remove_all_active_ranges(void);extern unsigned long absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn);extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn);extern unsigned long find_min_pfn_with_active_regions(void);extern unsigned long find_max_pfn_with_active_regions(void);extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn);extern void sparse_memory_present_with_active_regions(int nid);#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NIDextern int early_pfn_to_nid(unsigned long pfn);#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */extern void set_dma_reserve(unsigned long new_dma_reserve);extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, enum memmap_context);extern void setup_per_zone_pages_min(void);extern void mem_init(void);extern void show_mem(void);extern void si_meminfo(struct sysinfo * val);extern void si_meminfo_node(struct sysinfo *val, int nid);#ifdef CONFIG_NUMAextern void setup_per_cpu_pageset(void);#elsestatic inline void setup_per_cpu_pageset(void) {}#endif/* prio_tree.c */void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma, struct prio_tree_iter *iter);#define vma_prio_tree_foreach(vma, iter, root, begin, end) \ for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \ (vma = vma_prio_tree_next(vma, iter)); )static inline void vma_nonlinear_insert(struct vm_area_struct *vma, struct list_head *list){ vma->shared.vm_set.parent = NULL; list_add_tail(&vma->shared.vm_set.list, list);}/* mmap.c */extern int __vm_enough_memory(long pages, int cap_sys_admin);extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);extern struct vm_area_struct *vma_merge(struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, struct mempolicy *);extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);extern int split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below);extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *);extern void unlink_file_vma(struct vm_area_struct *);extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff);extern void exit_mmap(struct mm_struct *);extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff);static inline unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset){ unsigned long ret = -EINVAL; if ((offset + PAGE_ALIGN(len)) < offset) goto out; if (!(offset & ~PAGE_MASK)) ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);out: return ret;}extern int do_munmap(struct mm_struct *, unsigned long, size_t);extern unsigned long do_brk(unsigned long, unsigned long);/* filemap.c */extern unsigned long page_unuse(struct page *);extern void truncate_inode_pages(struct address_space *, loff_t);extern void truncate_inode_pages_range(struct address_space *, loff_t lstart, loff_t lend);/* generic vm_area_ops exported for stackable file systems */extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);extern int filemap_populate(struct vm_area_struct *, unsigned long, unsigned long, pgprot_t, unsigned long, int);/* mm/page-writeback.c */int write_one_page(struct page *page, int wait);/* readahead.c */#define VM_MAX_READAHEAD 128 /* kbytes */#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */#define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before * turning readahead off */int do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read);int force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read);unsigned long page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, pgoff_t offset, unsigned long size);void handle_ra_miss(struct address_space *mapping, struct file_ra_state *ra, pgoff_t offset);unsigned long max_sane_readahead(unsigned long nr);/* Do stack extension */extern int expand_stack(struct vm_area_struct *vma, unsigned long address);#ifdef CONFIG_IA64extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);#endif/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev);/* Look up the first VMA which intersects the interval start_addr..end_addr-1, NULL if none. Assume start_addr < end_addr. */static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr){ struct vm_area_struct * vma = find_vma(mm,start_addr); if (vma && end_addr <= vma->vm_start) vma = NULL; return vma;}static inline unsigned long vma_pages(struct vm_area_struct *vma){ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;}pgprot_t vm_get_page_prot(unsigned long vm_flags);struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);struct page *vmalloc_to_page(void *addr);unsigned long vmalloc_to_pfn(void *addr);int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t);int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);struct page *follow_page(struct vm_area_struct *, unsigned long address, unsigned int foll_flags);#define FOLL_WRITE 0x01 /* check pte is writable */#define FOLL_TOUCH 0x02 /* mark page accessed */#define FOLL_GET 0x04 /* do get_page on page */#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */#ifdef CONFIG_PROC_FSvoid vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);#elsestatic inline void vm_stat_account(struct mm_struct *mm, unsigned long flags, struct file *file, long pages){}#endif /* CONFIG_PROC_FS */#ifndef CONFIG_DEBUG_PAGEALLOCstatic inline voidkernel_map_pages(struct page *page, int numpages, int enable) {}#endifextern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);#ifdef __HAVE_ARCH_GATE_AREAint in_gate_area_no_task(unsigned long addr);int in_gate_area(struct task_struct *task, unsigned long addr);#elseint in_gate_area_no_task(unsigned long addr);#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})#endif /* __HAVE_ARCH_GATE_AREA */int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages);void drop_pagecache(void);void drop_slab(void);#ifndef CONFIG_MMU#define randomize_va_space 0#elseextern int randomize_va_space;#endif__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma);#endif /* __KERNEL__ */#endif /* _LINUX_MM_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -