⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mm.h

📁 linux内核的改写可以进行新的读写命令同时也可以在linux下体验编译内核的快感 这是我们os的一个project
💻 H
📖 第 1 页 / 共 3 页
字号:
static inline int can_do_mlock(void){	if (capable(CAP_IPC_LOCK))		return 1;	if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)		return 1;	return 0;}extern int user_shm_lock(size_t, struct user_struct *);extern void user_shm_unlock(size_t, struct user_struct *);/* * Parameter block passed down to zap_pte_range in exceptional cases. */struct zap_details {	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */	struct address_space *check_mapping;	/* Check page->mapping if set */	pgoff_t	first_index;			/* Lowest page->index to unmap */	pgoff_t last_index;			/* Highest page->index to unmap */	spinlock_t *i_mmap_lock;		/* For unmap_mapping_range: */	unsigned long truncate_count;		/* Compare vm_truncate_count */};unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,		unsigned long size, struct zap_details *);unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm,		struct vm_area_struct *start_vma, unsigned long start_addr,		unsigned long end_addr, unsigned long *nr_accounted,		struct zap_details *);void free_pgd_range(struct mmu_gather **tlb, unsigned long addr,		unsigned long end, unsigned long floor, unsigned long ceiling);void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,		unsigned long floor, unsigned long ceiling);int copy_page_range(struct mm_struct *dst, struct mm_struct *src,			struct vm_area_struct *vma);int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,			unsigned long size, pgprot_t prot);void unmap_mapping_range(struct address_space *mapping,		loff_t const holebegin, loff_t const holelen, int even_cows);static inline void unmap_shared_mapping_range(struct address_space *mapping,		loff_t const holebegin, loff_t const holelen){	unmap_mapping_range(mapping, holebegin, holelen, 0);}extern int vmtruncate(struct inode * inode, loff_t offset);extern pud_t *FASTCALL(__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address));extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address));extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address));extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access){	return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE);}extern int make_pages_present(unsigned long addr, unsigned long end);extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,		int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);int __set_page_dirty_buffers(struct page *page);int __set_page_dirty_nobuffers(struct page *page);int redirty_page_for_writepage(struct writeback_control *wbc,				struct page *page);int FASTCALL(set_page_dirty(struct page *page));int set_page_dirty_lock(struct page *page);int clear_page_dirty_for_io(struct page *page);extern unsigned long do_mremap(unsigned long addr,			       unsigned long old_len, unsigned long new_len,			       unsigned long flags, unsigned long new_addr);/* * Prototype to add a shrinker callback for ageable caches. *  * These functions are passed a count `nr_to_scan' and a gfpmask.  They should * scan `nr_to_scan' objects, attempting to free them. * * The callback must return the number of objects which remain in the cache. * * The callback will be passed nr_to_scan == 0 when the VM is querying the * cache size, so a fastpath for that case is appropriate. */typedef int (*shrinker_t)(int nr_to_scan, unsigned int gfp_mask);/* * Add an aging callback.  The int is the number of 'seeks' it takes * to recreate one of the objects that these functions age. */#define DEFAULT_SEEKS 2struct shrinker;extern struct shrinker *set_shrinker(int, shrinker_t);extern void remove_shrinker(struct shrinker *shrinker);/* * On a two-level or three-level page table, this ends up being trivial. Thus * the inlining and the symmetry break with pte_alloc_map() that does all * of this out-of-line. *//* * The following ifdef needed to get the 4level-fixup.h header to work. * Remove it when 4level-fixup.h has been removed. */#ifdef CONFIG_MMU#ifndef __ARCH_HAS_4LEVEL_HACK static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address){	if (pgd_none(*pgd))		return __pud_alloc(mm, pgd, address);	return pud_offset(pgd, address);}static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address){	if (pud_none(*pud))		return __pmd_alloc(mm, pud, address);	return pmd_offset(pud, address);}#endif#endif /* CONFIG_MMU */extern void free_area_init(unsigned long * zones_size);extern void free_area_init_node(int nid, pg_data_t *pgdat,	unsigned long * zones_size, unsigned long zone_start_pfn, 	unsigned long *zholes_size);extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long);extern void mem_init(void);extern void show_mem(void);extern void si_meminfo(struct sysinfo * val);extern void si_meminfo_node(struct sysinfo *val, int nid);#ifdef CONFIG_NUMAextern void setup_per_cpu_pageset(void);#elsestatic inline void setup_per_cpu_pageset(void) {}#endif/* prio_tree.c */void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,	struct prio_tree_iter *iter);#define vma_prio_tree_foreach(vma, iter, root, begin, end)	\	for (prio_tree_iter_init(iter, root, begin, end), vma = NULL;	\		(vma = vma_prio_tree_next(vma, iter)); )static inline void vma_nonlinear_insert(struct vm_area_struct *vma,					struct list_head *list){	vma->shared.vm_set.parent = NULL;	list_add_tail(&vma->shared.vm_set.list, list);}/* mmap.c */extern int __vm_enough_memory(long pages, int cap_sys_admin);extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);extern struct vm_area_struct *vma_merge(struct mm_struct *,	struct vm_area_struct *prev, unsigned long addr, unsigned long end,	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,	struct mempolicy *);extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);extern int split_vma(struct mm_struct *,	struct vm_area_struct *, unsigned long addr, int new_below);extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,	struct rb_node **, struct rb_node *);extern struct vm_area_struct *copy_vma(struct vm_area_struct **,	unsigned long addr, unsigned long len, pgoff_t pgoff);extern void exit_mmap(struct mm_struct *);extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,	unsigned long len, unsigned long prot,	unsigned long flag, unsigned long pgoff);/******************LKRR start 0372180****************//*** 它的主要功能是将可执行文件的映象映射到虚拟内存中,或将别的进程中的内存信息映射到该进程的虚拟空间中。并将映射了的虚拟块的vma加入到该进程的vma avl树中. 供内核自己使用***/static inline unsigned long do_mmap(struct file *file, unsigned long addr,	unsigned long len, unsigned long prot,	/*** file 代表着一个已打开的文件,	addr 为影射到用户空间的起始地址	*len 为长度, offset 为文件的起点,	prot 用于对所映射的访问模式, flag 用于控制***/ 	unsigned long flag, unsigned long offset) {	unsigned long ret = -EINVAL;		if ((offset + PAGE_ALIGN(len)) < offset)  		goto out; // 如果offset 或 ~PAGE_MASK有一个为0则条入到函数do_mmap_pgoff	if (!(offset & ~PAGE_MASK))        		ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);out:	return ret;}/******************LKRR end 0372180****************/extern int do_munmap(struct mm_struct *, unsigned long, size_t);extern unsigned long do_brk(unsigned long, unsigned long);/* filemap.c */extern unsigned long page_unuse(struct page *);extern void truncate_inode_pages(struct address_space *, loff_t);/* generic vm_area_ops exported for stackable file systems */extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);extern int filemap_populate(struct vm_area_struct *, unsigned long,		unsigned long, pgprot_t, unsigned long, int);/* mm/page-writeback.c */int write_one_page(struct page *page, int wait);/* readahead.c */#define VM_MAX_READAHEAD	128	/* kbytes */#define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */#define VM_MAX_CACHE_HIT    	256	/* max pages in a row in cache before					 * turning readahead off */int do_page_cache_readahead(struct address_space *mapping, struct file *filp,			unsigned long offset, unsigned long nr_to_read);int force_page_cache_readahead(struct address_space *mapping, struct file *filp,			unsigned long offset, unsigned long nr_to_read);unsigned long  page_cache_readahead(struct address_space *mapping,			  struct file_ra_state *ra,			  struct file *filp,			  unsigned long offset,			  unsigned long size);void handle_ra_miss(struct address_space *mapping, 		    struct file_ra_state *ra, pgoff_t offset);unsigned long max_sane_readahead(unsigned long nr);/* Do stack extension */extern int expand_stack(struct vm_area_struct * vma, unsigned long address);/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,					     struct vm_area_struct **pprev);/* Look up the first VMA which intersects the interval start_addr..end_addr-1,   NULL if none.  Assume start_addr < end_addr. */static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr){	struct vm_area_struct * vma = find_vma(mm,start_addr);	if (vma && end_addr <= vma->vm_start)		vma = NULL;	return vma;}static inline unsigned long vma_pages(struct vm_area_struct *vma){	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;}extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);extern struct page * vmalloc_to_page(void *addr);extern unsigned long vmalloc_to_pfn(void *addr);extern struct page * follow_page(struct mm_struct *mm, unsigned long address,		int write);extern int check_user_page_readable(struct mm_struct *mm, unsigned long address);int remap_pfn_range(struct vm_area_struct *, unsigned long,		unsigned long, unsigned long, pgprot_t);#ifdef CONFIG_PROC_FSvoid __vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);#elsestatic inline void __vm_stat_account(struct mm_struct *mm,			unsigned long flags, struct file *file, long pages){}#endif /* CONFIG_PROC_FS */static inline void vm_stat_account(struct vm_area_struct *vma){	__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,							vma_pages(vma));}static inline void vm_stat_unaccount(struct vm_area_struct *vma){	__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,							-vma_pages(vma));}/* update per process rss and vm hiwater data */extern void update_mem_hiwater(struct task_struct *tsk);#ifndef CONFIG_DEBUG_PAGEALLOCstatic inline voidkernel_map_pages(struct page *page, int numpages, int enable){}#endifextern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);#ifdef	__HAVE_ARCH_GATE_AREAint in_gate_area_no_task(unsigned long addr);int in_gate_area(struct task_struct *task, unsigned long addr);#elseint in_gate_area_no_task(unsigned long addr);#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})#endif	/* __HAVE_ARCH_GATE_AREA *//* /proc/<pid>/oom_adj set to -17 protects from the oom-killer */#define OOM_DISABLE -17#endif /* __KERNEL__ */#endif /* _LINUX_MM_H */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -