📄 mm.h
字号:
#ifndef XEN struct rb_node vm_rb;// XEN doesn't need all the backing store stuff /* * For areas with an address space and backing store, * linkage into the address_space->i_mmap prio tree, or * linkage to the list of like vmas hanging off its node, or * linkage of vma in the address_space->i_mmap_nonlinear list. */ union { struct { struct list_head list; void *parent; /* aligns with prio_tree_node parent */ struct vm_area_struct *head; } vm_set; struct prio_tree_node prio_tree_node; } shared; /* * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma * list, after a COW of one of the file pages. A MAP_SHARED vma * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack * or brk vma (with NULL file) can only be in an anon_vma list. */ struct list_head anon_vma_node; /* Serialized by anon_vma->lock */ struct anon_vma *anon_vma; /* Serialized by page_table_lock */ /* Function pointers to deal with this struct. */ struct vm_operations_struct * vm_ops; /* Information about our backing store: */ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */ struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */#ifdef CONFIG_NUMA struct mempolicy *vm_policy; /* NUMA policy for the VMA */#endif#endif};/* * vm_flags.. */#define VM_READ 0x00000001 /* currently active flags */#define VM_WRITE 0x00000002#define VM_EXEC 0x00000004#define VM_SHARED 0x00000008#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */#define VM_MAYWRITE 0x00000020#define VM_MAYEXEC 0x00000040#define VM_MAYSHARE 0x00000080#define VM_GROWSDOWN 0x00000100 /* general info on the segment */#define VM_GROWSUP 0x00000200#define VM_SHM 0x00000400 /* shared memory area, don't swap out */#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */#define VM_EXECUTABLE 0x00001000#define VM_LOCKED 0x00002000#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ /* Used by sys_madvise() */#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS#endif#ifdef CONFIG_STACK_GROWSUP#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)#else#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)#endif#if 0 /* removed when rebasing to 2.6.13 *//* * The zone field is never updated after free_area_init_core() * sets it, so none of the operations on it need to be atomic. * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total, * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits. */#define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)#define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)static inline unsigned long page_zonenum(struct page_info *page){ return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));}static inline unsigned long page_to_nid(struct page_info *page){ return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));}struct zone;extern struct zone *zone_table[];static inline struct zone *page_zone(struct page_info *page){ return zone_table[page->flags >> NODEZONE_SHIFT];}static inline void set_page_zone(struct page_info *page, unsigned long nodezone_num){ page->flags &= ~(~0UL << NODEZONE_SHIFT); page->flags |= nodezone_num << NODEZONE_SHIFT;}#endif#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */extern unsigned long max_mapnr;#endifstatic inline void *lowmem_page_address(struct page_info *page){ return __va(page_to_mfn(page) << PAGE_SHIFT);}#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)#define HASHED_PAGE_VIRTUAL#endif#if defined(WANT_PAGE_VIRTUAL)#define page_address(page) ((page)->virtual)#define set_page_address(page, address) \ do { \ (page)->virtual = (address); \ } while(0)#define page_address_init() do { } while(0)#endif#if defined(HASHED_PAGE_VIRTUAL)void *page_address(struct page_info *page);void set_page_address(struct page_info *page, void *virtual);void page_address_init(void);#endif#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)#define page_address(page) lowmem_page_address(page)#define set_page_address(page, address) do { } while(0)#define page_address_init() do { } while(0)#endif#ifndef CONFIG_DEBUG_PAGEALLOCstatic inline voidkernel_map_pages(struct page_info *page, int numpages, int enable){}#endifextern unsigned long num_physpages;extern unsigned long totalram_pages;extern int nr_swap_pages;extern void alloc_dom_xen_and_dom_io(void);extern int mm_teardown(struct domain* d);extern void mm_final_teardown(struct domain* d);extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr);extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags);extern void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);extern void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags);struct p2m_entry;extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr, struct p2m_entry* entry);extern void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr);extern volatile pte_t *lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr);extern unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long phys_addr, unsigned long size, unsigned long flags);extern unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size, unsigned long flags);int domain_page_mapped(struct domain *d, unsigned long mpaddr);int efi_mmio(unsigned long physaddr, unsigned long size);extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr);extern unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3);extern unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order);extern unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned long flags, domid_t domid);extern unsigned long dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn, unsigned long gmfn, unsigned long flags, domid_t domid);#ifdef CONFIG_XEN_IA64_EXPOSE_P2Mextern void expose_p2m_init(void);extern unsigned long dom0vp_expose_p2m(struct domain* d, unsigned long conv_start_gpfn, unsigned long assign_start_gpfn, unsigned long expose_size, unsigned long granule_pfn);extern void foreign_p2m_init(struct domain* d);extern void foreign_p2m_destroy(struct domain* d);extern unsigned long dom0vp_expose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid, XEN_GUEST_HANDLE(char) buffer, unsigned long flags);extern unsigned long dom0vp_unexpose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid);#else#define expose_p2m_init() do { } while (0)#define dom0vp_expose_p2m(d, conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn) (-ENOSYS)#define foreign_p2m_init(d) do { } while (0)#define foreign_p2m_destroy(d) do { } while (0)#define dom0vp_expose_foreign_p2m(dest_dom, dest_gpfn, domid, buffer, flags) (-ENOSYS)#define dom0vp_unexpose_foreign_p2m(dest_dom, dest_gpfn, domid) (-ENOSYS)#endifextern volatile unsigned long *mpt_table;extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir, struct p2m_entry* entry);#define machine_to_phys_mapping mpt_table#define INVALID_M2P_ENTRY (~0UL)#define VALID_M2P(_e) (!((_e) & (1UL<<63)))#define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))#define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])/* If pmt table is provided by control pannel later, we need __get_user* here. However if it's allocated by HV, we should access it directly*/#define mfn_to_gmfn(_d, mfn) \ get_gpfn_from_mfn(mfn)#define gmfn_to_mfn(_d, gpfn) \ gmfn_to_mfn_foreign((_d), (gpfn))#define __gpfn_invalid(_d, gpfn) \ (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL) == INVALID_MFN)#define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)#define __gpa_to_mpa(_d, gpa) \ ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))#define __mpa_to_gpa(madr) \ ((get_gpfn_from_mfn((madr) >> PAGE_SHIFT) << PAGE_SHIFT) | \ ((madr) & ~PAGE_MASK))/* Internal use only: returns 0 in case of bad address. */extern unsigned long paddr_to_maddr(unsigned long paddr);/* Arch-specific portion of memory_op hypercall. */long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);int steal_page( struct domain *d, struct page_info *page, unsigned int memflags);#define domain_clamp_alloc_bitsize(d, b) (b)unsigned long domain_get_maximum_gpfn(struct domain *d);extern struct domain *dom_xen, *dom_io; /* for vmcoreinfo */#endif /* __ASM_IA64_MM_H__ */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -