📄 mm.h
字号:
atomic_inc(&page->_count);}void put_page(struct page *page);#else /* CONFIG_HUGETLB_PAGE */#define page_count(p) (atomic_read(&(p)->_count) + 1)static inline void get_page(struct page *page){ atomic_inc(&page->_count);}static inline void put_page(struct page *page){ if (!PageReserved(page) && put_page_testzero(page)) __page_cache_release(page);}#endif /* CONFIG_HUGETLB_PAGE *//* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of * zeroes, and text pages of executables and shared libraries have * only one copy in memory, at most, normally. * * For the non-reserved pages, page_count(page) denotes a reference count. * page_count() == 0 means the page is free. * page_count() == 1 means the page is used for exactly one purpose * (e.g. a private data page of one process). * * A page may be used for kmalloc() or anyone else who does a * __get_free_page(). In this case the page_count() is at least 1, and * all other fields are unused but should be 0 or NULL. The * management of this page is the responsibility of the one who uses * it. * * The other pages (we may call them "process pages") are completely * managed by the Linux memory manager: I/O, buffers, swapping etc. * The following discussion applies only to them. * * A page may belong to an inode's memory mapping. In this case, * page->mapping is the pointer to the inode, and page->index is the * file offset of the page, in units of PAGE_CACHE_SIZE. * * A page contains an opaque `private' member, which belongs to the * page's address_space. Usually, this is the address of a circular * list of the page's disk buffers. * * For pages belonging to inodes, the page_count() is the number of * attaches, plus 1 if `private' contains something, plus one for * the page cache itself. * * All pages belonging to an inode are in these doubly linked lists: * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages; * using the page->list list_head. These fields are also used for * freelist managemet (when page_count()==0). * * There is also a per-mapping radix tree mapping index to the page * in memory if present. The tree is rooted at mapping->root. * * All process pages can do I/O: * - inode pages may need to be read from disk, * - inode pages which have been modified and are MAP_SHARED may need * to be written to disk, * - private pages which have been modified may need to be swapped out * to swap space and (later) to be read back into memory. *//* * The zone field is never updated after free_area_init_core() * sets it, so none of the operations on it need to be atomic. *//* * page->flags layout: * * There are three possibilities for how page->flags get * laid out. The first is for the normal case, without * sparsemem. The second is for sparsemem when there is * plenty of space for node and section. The last is when * we have run out of space and have to fall back to an * alternate (slower) way of determining the node. * * No sparsemem: | NODE | ZONE | ... | FLAGS | * with space for node: | SECTION | NODE | ZONE | ... | FLAGS | * no space for node: | SECTION | ZONE | ... | FLAGS | */#ifdef CONFIG_SPARSEMEM#define SECTIONS_WIDTH SECTIONS_SHIFT#else#define SECTIONS_WIDTH 0#endif#define ZONES_WIDTH ZONES_SHIFT#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED#define NODES_WIDTH NODES_SHIFT#else#define NODES_WIDTH 0#endif/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */#define SECTIONS_PGOFF ((sizeof(page_flags_t)*8) - SECTIONS_WIDTH)#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)/* * We are going to use the flags for the page to node mapping if its in * there. This includes the case where there is no node, so it is implicit. */#define FLAGS_HAS_NODE (NODES_WIDTH > 0 || NODES_SHIFT == 0)#ifndef PFN_SECTION_SHIFT#define PFN_SECTION_SHIFT 0#endif/* * Define the bit shifts to access each section. For non-existant * sections we define the shift as 0; that plus a 0 mask ensures * the compiler will optimise away reference to them. */#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */#if FLAGS_HAS_NODE#define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT)#else#define ZONETABLE_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)#endif#define ZONETABLE_PGSHIFT ZONES_PGSHIFT#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED#endif#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)#define NODES_MASK ((1UL << NODES_WIDTH) - 1)#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)static inline unsigned long page_zonenum(struct page *page){ return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;}struct zone;extern struct zone *zone_table[];static inline struct zone *page_zone(struct page *page){ return zone_table[(page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK];}static inline unsigned long page_to_nid(struct page *page){ if (FLAGS_HAS_NODE) return (page->flags >> NODES_PGSHIFT) & NODES_MASK; else return page_zone(page)->zone_pgdat->node_id;}static inline unsigned long page_to_section(struct page *page){ return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;}static inline void set_page_zone(struct page *page, unsigned long zone){ page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;}static inline void set_page_node(struct page *page, unsigned long node){ page->flags &= ~(NODES_MASK << NODES_PGSHIFT); page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;}static inline void set_page_section(struct page *page, unsigned long section){ page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;}static inline void set_page_links(struct page *page, unsigned long zone, unsigned long node, unsigned long pfn){ set_page_zone(page, zone); set_page_node(page, node); set_page_section(page, pfn_to_section_nr(pfn));}#ifndef CONFIG_DISCONTIGMEM/* The array of struct pages - for discontigmem use pgdat->lmem_map */extern struct page *mem_map;#endifstatic inline void *lowmem_page_address(struct page *page){ return __va(page_to_pfn(page) << PAGE_SHIFT);}#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)#define HASHED_PAGE_VIRTUAL#endif#if defined(WANT_PAGE_VIRTUAL)#define page_address(page) ((page)->virtual)#define set_page_address(page, address) \ do { \ (page)->virtual = (address); \ } while(0)#define page_address_init() do { } while(0)#endif#if defined(HASHED_PAGE_VIRTUAL)void *page_address(struct page *page);void set_page_address(struct page *page, void *virtual);void page_address_init(void);#endif#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)#define page_address(page) lowmem_page_address(page)#define set_page_address(page, address) do { } while(0)#define page_address_init() do { } while(0)#endif/* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; * with the PAGE_MAPPING_ANON bit set to distinguish it. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */#define PAGE_MAPPING_ANON 1extern struct address_space swapper_space;static inline struct address_space *page_mapping(struct page *page){ struct address_space *mapping = page->mapping; if (unlikely(PageSwapCache(page))) mapping = &swapper_space; else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) mapping = NULL; return mapping;}static inline int PageAnon(struct page *page){ return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;}/* * Return the pagecache index of the passed page. Regular pagecache pages * use ->index whereas swapcache pages use ->private */static inline pgoff_t page_index(struct page *page){ if (unlikely(PageSwapCache(page))) return page->private; return page->index;}/* * The atomic page->_mapcount, like _count, starts from -1: * so that transitions both from it and to it can be tracked, * using atomic_inc_and_test and atomic_add_negative(-1). */static inline void reset_page_mapcount(struct page *page){ atomic_set(&(page)->_mapcount, -1);}static inline int page_mapcount(struct page *page){ return atomic_read(&(page)->_mapcount) + 1;}/* * Return true if this page is mapped into pagetables. */static inline int page_mapped(struct page *page){ return atomic_read(&(page)->_mapcount) >= 0;}/* * Error return values for the *_nopage functions */#define NOPAGE_SIGBUS (NULL)#define NOPAGE_OOM ((struct page *) (-1))/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. */#define VM_FAULT_OOM 0x00#define VM_FAULT_SIGBUS 0x01#define VM_FAULT_MINOR 0x02#define VM_FAULT_MAJOR 0x03/* * Special case for get_user_pages. * Must be in a distinct bit from the above VM_FAULT_ flags. */#define VM_FAULT_WRITE 0x10#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)extern void show_free_areas(void);#ifdef CONFIG_SHMEMstruct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type);int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, unsigned long addr);int shmem_lock(struct file *file, int lock, struct user_struct *user);#else#define shmem_nopage filemap_nopage#define shmem_lock(a, b, c) ({0;}) /* always in memory, no need to lock */#define shmem_set_policy(a, b) (0)#define shmem_get_policy(a, b) (NULL)#endifstruct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);int shmem_zero_setup(struct vm_area_struct *);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -