📄 mm.h
字号:
/* * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * dom0 vp model support */#ifndef __ASM_IA64_MM_H__#define __ASM_IA64_MM_H__#include <xen/config.h>#ifdef LINUX_2_6#include <linux/gfp.h>#endif#include <xen/list.h>#include <xen/spinlock.h>#include <xen/perfc.h>#include <xen/sched.h>#include <asm/processor.h>#include <asm/atomic.h>#include <asm/tlbflush.h>#include <asm/flushtlb.h>#include <asm/io.h>#include <public/xen.h>/* * The following is for page_alloc.c. */typedef unsigned long page_flags_t;/* * Per-page-frame information. * * Every architecture must ensure the following: * 1. 'struct page_info' contains a 'struct list_head list'. * 2. Provide a PFN_ORDER() macro for accessing the order of a free page. */#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)#define PRtype_info "016lx"struct page_info{ /* Each frame can be threaded onto a doubly-linked list. */ struct list_head list; /* Reference count and various PGC_xxx flags and fields. */ u32 count_info; /* Context-dependent fields follow... */ union { /* Page is in use: ((count_info & PGC_count_mask) != 0). */ struct { /* Owner of this page (NULL if page is anonymous). */ u32 _domain; /* pickled format */ /* Type reference count and various PGT_xxx flags and fields. */ unsigned long type_info; } __attribute__ ((packed)) inuse; /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ struct { /* Order-size of the free chunk this page is the head of. */ u32 order; /* Mask of possibly-tainted TLBs. */ cpumask_t cpumask; } __attribute__ ((packed)) free; } u; /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */ u32 tlbflush_timestamp;#if 0// following added for Linux compiling page_flags_t flags; atomic_t _count; struct list_head lru; // is this the same as above "list"?#endif};#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)/* * Still small set of flags defined by far on IA-64. * IA-64 should make it a definition same as x86_64. *//* The following page types are MUTUALLY EXCLUSIVE. */#define PGT_none (0UL<<29) /* no special uses of this page */#define PGT_l1_page_table (1UL<<29) /* using this page as an L1 page table? */#define PGT_l2_page_table (2UL<<29) /* using this page as an L2 page table? */#define PGT_l3_page_table (3UL<<29) /* using this page as an L3 page table? */#define PGT_l4_page_table (4UL<<29) /* using this page as an L4 page table? */ /* Value 5 reserved. See asm-x86/mm.h */ /* Value 6 reserved. See asm-x86/mm.h */#define PGT_writable_page (7UL<<29) /* has writable mappings of this page? */#define PGT_type_mask (7UL<<29) /* Bits 29-31. */ /* Has this page been validated for use as its current type? */#define _PGT_validated 28#define PGT_validated (1UL<<_PGT_validated) /* Owning guest has pinned this page to its current type? */#define _PGT_pinned 27#define PGT_pinned (1UL<<_PGT_pinned) /* 16-bit count of uses of this frame as its current type. */#define PGT_count_mask ((1UL<<16)-1) /* Cleared when the owning guest 'frees' this page. */#define _PGC_allocated 31#define PGC_allocated (1UL<<_PGC_allocated) /* Bit 30 reserved. See asm-x86/mm.h */ /* Bit 29 reserved. See asm-x86/mm.h */ /* 29-bit count of references to this frame. */#define PGC_count_mask ((1UL<<29)-1)#define is_xen_heap_mfn(mfn) (((mfn) < paddr_to_pfn(xenheap_phys_end)) \ && ((mfn) >= paddr_to_pfn(xen_pstart)))#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))extern void* xen_pickle_offset;#define __pickle(a) ((unsigned long)a - (unsigned long)xen_pickle_offset)#define __unpickle(a) (void *)(a + xen_pickle_offset)static inline struct domain *unpickle_domptr(u64 _d){ return (_d == 0) ? NULL : __unpickle(_d); }static inline u32 pickle_domptr(struct domain *_d){ return (_d == NULL) ? 0 : (u32)__pickle(_d); }#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))#define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))#define XENSHARE_writable 0#define XENSHARE_readonly 1void share_xen_page_with_guest(struct page_info *page, struct domain *d, int readonly);void share_xen_page_with_privileged_guests(struct page_info *page, int readonly);extern unsigned long frametable_pg_dir[];extern struct page_info *frame_table;extern unsigned long frame_table_size;extern struct list_head free_list;extern spinlock_t free_list_lock;extern unsigned int free_pfns;extern unsigned long max_page;extern void __init init_frametable(void);void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);static inline void put_page(struct page_info *page){ u32 nx, x, y = page->count_info; do { x = y; nx = x - 1; } while (unlikely((y = cmpxchg_rel(&page->count_info, x, nx)) != x)); if (unlikely((nx & PGC_count_mask) == 0)) free_domheap_page(page);}/* count_info and ownership are checked atomically. */static inline int get_page(struct page_info *page, struct domain *domain){ u64 x, nx, y = *((u64*)&page->count_info); u32 _domain = pickle_domptr(domain); do { x = y; nx = x + 1; if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */ unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */ unlikely((x >> 32) != _domain)) { /* Wrong owner? */ gdprintk(XENLOG_INFO, "Error pfn %lx: rd=%p, od=%p, caf=%016lx, taf=%" PRtype_info "\n", page_to_mfn(page), domain, unpickle_domptr(x >> 32), x, page->u.inuse.type_info); return 0; } } while(unlikely((y = cmpxchg_acq((u64*)&page->count_info, x, nx)) != x)); return 1;}int is_iomem_page(unsigned long mfn);extern void put_page_type(struct page_info *page);extern int get_page_type(struct page_info *page, u32 type);static inline void put_page_and_type(struct page_info *page){ put_page_type(page); put_page(page);}static inline int get_page_and_type(struct page_info *page, struct domain *domain, u32 type){ int rc = get_page(page, domain); if ( likely(rc) && unlikely(!get_page_type(page, type)) ) { put_page(page); rc = 0; } return rc;}#define set_machinetophys(_mfn, _pfn) do { } while(0);#ifdef MEMORY_GUARDvoid *memguard_init(void *heap_start);void memguard_guard_stack(void *p);void memguard_guard_range(void *p, unsigned long l);void memguard_unguard_range(void *p, unsigned long l);#else#define memguard_init(_s) (_s)#define memguard_guard_stack(_p) ((void)0)#define memguard_guard_range(_p,_l) ((void)0)#define memguard_unguard_range(_p,_l) ((void)0)#endif// prototype of misc memory stuff//unsigned long __get_free_pages(unsigned int mask, unsigned int order);//void __free_pages(struct page_info *page, unsigned int order);void *pgtable_quicklist_alloc(void);void pgtable_quicklist_free(void *pgtable_entry);// FOLLOWING FROM linux-2.6.7/include/mm.h/* * This struct defines a memory VMM memory area. There is one of these * per VM-area/task. A VM area is any part of the process virtual memory * space that has a special rule for the page-fault handlers (ie a shared * library, the executable area etc). */struct vm_area_struct { struct mm_struct * vm_mm; /* The address space we belong to. */ unsigned long vm_start; /* Our start address within vm_mm. */ unsigned long vm_end; /* The first byte after our end address within vm_mm. */ /* linked list of VM areas per task, sorted by address */ struct vm_area_struct *vm_next; pgprot_t vm_page_prot; /* Access permissions of this VMA. */ unsigned long vm_flags; /* Flags, listed below. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -