⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mm.h

📁 Axis 221 camera embedded programing interface
💻 H
📖 第 1 页 / 共 3 页
字号:
 * *        No sparsemem: |       NODE     | ZONE | ... | FLAGS | * with space for node: | SECTION | NODE | ZONE | ... | FLAGS | *   no space for node: | SECTION |     ZONE    | ... | FLAGS | */#ifdef CONFIG_SPARSEMEM#define SECTIONS_WIDTH		SECTIONS_SHIFT#else#define SECTIONS_WIDTH		0#endif#define ZONES_WIDTH		ZONES_SHIFT#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED#define NODES_WIDTH		NODES_SHIFT#else#define NODES_WIDTH		0#endif/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)/* * We are going to use the flags for the page to node mapping if its in * there.  This includes the case where there is no node, so it is implicit. */#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)#define NODE_NOT_IN_PAGE_FLAGS#endif#ifndef PFN_SECTION_SHIFT#define PFN_SECTION_SHIFT 0#endif/* * Define the bit shifts to access each section.  For non-existant * sections we define the shift as 0; that plus a 0 mask ensures * the compiler will optimise away reference to them. */#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */#ifdef NODE_NOT_IN_PAGEFLAGS#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)#else#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)#endif#if ZONES_WIDTH > 0#define ZONEID_PGSHIFT		ZONES_PGSHIFT#else#define ZONEID_PGSHIFT		NODES_PGOFF#endif#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED#endif#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)#define NODES_MASK		((1UL << NODES_WIDTH) - 1)#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)static inline enum zone_type page_zonenum(struct page *page){	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;}/* * The identification function is only used by the buddy allocator for * determining if two pages could be buddies. We are not really * identifying a zone since we could be using a the section number * id if we have not node id available in page flags. * We guarantee only that it will return the same value for two * combinable pages in a zone. */static inline int page_zone_id(struct page *page){	BUILD_BUG_ON(ZONEID_PGSHIFT == 0 && ZONEID_MASK);	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;}static inline int zone_to_nid(struct zone *zone){#ifdef CONFIG_NUMA	return zone->node;#else	return 0;#endif}#ifdef NODE_NOT_IN_PAGE_FLAGSextern int page_to_nid(struct page *page);#elsestatic inline int page_to_nid(struct page *page){	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;}#endifstatic inline struct zone *page_zone(struct page *page){	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];}static inline unsigned long page_to_section(struct page *page){	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;}static inline void set_page_zone(struct page *page, enum zone_type zone){	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;}static inline void set_page_node(struct page *page, unsigned long node){	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;}static inline void set_page_section(struct page *page, unsigned long section){	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;}static inline void set_page_links(struct page *page, enum zone_type zone,	unsigned long node, unsigned long pfn){	set_page_zone(page, zone);	set_page_node(page, node);	set_page_section(page, pfn_to_section_nr(pfn));}/* * Some inline functions in vmstat.h depend on page_zone() */#include <linux/vmstat.h>static __always_inline void *lowmem_page_address(struct page *page){	return __va(page_to_pfn(page) << PAGE_SHIFT);}#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)#define HASHED_PAGE_VIRTUAL#endif#if defined(WANT_PAGE_VIRTUAL)#define page_address(page) ((page)->virtual)#define set_page_address(page, address)			\	do {						\		(page)->virtual = (address);		\	} while(0)#define page_address_init()  do { } while(0)#endif#if defined(HASHED_PAGE_VIRTUAL)void *page_address(struct page *page);void set_page_address(struct page *page, void *virtual);void page_address_init(void);#endif#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)#define page_address(page) lowmem_page_address(page)#define set_page_address(page, address)  do { } while(0)#define page_address_init()  do { } while(0)#endif/* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; * with the PAGE_MAPPING_ANON bit set to distinguish it. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */#define PAGE_MAPPING_ANON	1extern struct address_space swapper_space;static inline struct address_space *page_mapping(struct page *page){	struct address_space *mapping = page->mapping;	if (unlikely(PageSwapCache(page)))		mapping = &swapper_space;	else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))		mapping = NULL;	return mapping;}static inline int PageAnon(struct page *page){	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;}/* * Return the pagecache index of the passed page.  Regular pagecache pages * use ->index whereas swapcache pages use ->private */static inline pgoff_t page_index(struct page *page){	if (unlikely(PageSwapCache(page)))		return page_private(page);	return page->index;}/* * The atomic page->_mapcount, like _count, starts from -1: * so that transitions both from it and to it can be tracked, * using atomic_inc_and_test and atomic_add_negative(-1). */static inline void reset_page_mapcount(struct page *page){	atomic_set(&(page)->_mapcount, -1);}static inline int page_mapcount(struct page *page){	return atomic_read(&(page)->_mapcount) + 1;}/* * Return true if this page is mapped into pagetables. */static inline int page_mapped(struct page *page){	return atomic_read(&(page)->_mapcount) >= 0;}/* * Error return values for the *_nopage functions */#define NOPAGE_SIGBUS	(NULL)#define NOPAGE_OOM	((struct page *) (-1))#define NOPAGE_REFAULT	((struct page *) (-2))	/* Return to userspace, rerun *//* * Error return values for the *_nopfn functions */#define NOPFN_SIGBUS	((unsigned long) -1)#define NOPFN_OOM	((unsigned long) -2)/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. */#define VM_FAULT_OOM	0x00#define VM_FAULT_SIGBUS	0x01#define VM_FAULT_MINOR	0x02#define VM_FAULT_MAJOR	0x03/*  * Special case for get_user_pages. * Must be in a distinct bit from the above VM_FAULT_ flags. */#define VM_FAULT_WRITE	0x10#define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)extern void show_free_areas(void);#ifdef CONFIG_SHMEMstruct page *shmem_nopage(struct vm_area_struct *vma,			unsigned long address, int *type);int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,					unsigned long addr);int shmem_lock(struct file *file, int lock, struct user_struct *user);#else#define shmem_nopage filemap_nopagestatic inline int shmem_lock(struct file *file, int lock,			     struct user_struct *user){	return 0;}static inline int shmem_set_policy(struct vm_area_struct *vma,				   struct mempolicy *new){	return 0;}static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,						 unsigned long addr){	return NULL;}#endifstruct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);extern int shmem_mmap(struct file *file, struct vm_area_struct *vma);int shmem_zero_setup(struct vm_area_struct *);#ifndef CONFIG_MMUextern unsigned long shmem_get_unmapped_area(struct file *file,					     unsigned long addr,					     unsigned long len,					     unsigned long pgoff,					     unsigned long flags);#endifstatic inline int can_do_mlock(void){	if (capable(CAP_IPC_LOCK))		return 1;	if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)		return 1;	return 0;}extern int user_shm_lock(size_t, struct user_struct *);extern void user_shm_unlock(size_t, struct user_struct *);/* * Parameter block passed down to zap_pte_range in exceptional cases. */struct zap_details {	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */	struct address_space *check_mapping;	/* Check page->mapping if set */	pgoff_t	first_index;			/* Lowest page->index to unmap */	pgoff_t last_index;			/* Highest page->index to unmap */	spinlock_t *i_mmap_lock;		/* For unmap_mapping_range: */	unsigned long truncate_count;		/* Compare vm_truncate_count */};struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,		unsigned long size, struct zap_details *);unsigned long unmap_vmas(struct mmu_gather **tlb,		struct vm_area_struct *start_vma, unsigned long start_addr,		unsigned long end_addr, unsigned long *nr_accounted,		struct zap_details *);void free_pgd_range(struct mmu_gather **tlb, unsigned long addr,		unsigned long end, unsigned long floor, unsigned long ceiling);void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,		unsigned long floor, unsigned long ceiling);int copy_page_range(struct mm_struct *dst, struct mm_struct *src,			struct vm_area_struct *vma);int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,			unsigned long size, pgprot_t prot);void unmap_mapping_range(struct address_space *mapping,		loff_t const holebegin, loff_t const holelen, int even_cows);static inline void unmap_shared_mapping_range(struct address_space *mapping,		loff_t const holebegin, loff_t const holelen){	unmap_mapping_range(mapping, holebegin, holelen, 0);}extern int vmtruncate(struct inode * inode, loff_t offset);extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);#ifdef CONFIG_MMUextern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,			unsigned long address, int write_access);static inline int handle_mm_fault(struct mm_struct *mm,			struct vm_area_struct *vma, unsigned long address,			int write_access){	return __handle_mm_fault(mm, vma, address, write_access) &				(~VM_FAULT_WRITE);}#elsestatic inline int handle_mm_fault(struct mm_struct *mm,			struct vm_area_struct *vma, unsigned long address,			int write_access){	/* should never happen if there's no MMU */	BUG();	return VM_FAULT_SIGBUS;}#endifextern int make_pages_present(unsigned long addr, unsigned long end);extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,		int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -