⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mm.h

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 H
📖 第 1 页 / 共 2 页
字号:
#ifndef _LINUX_MM_H#define _LINUX_MM_H#include <linux/sched.h>#include <linux/errno.h>#ifdef __KERNEL__#include <linux/config.h>#include <linux/string.h>#include <linux/list.h>#include <linux/mmzone.h>extern unsigned long max_mapnr;extern unsigned long num_physpages;extern void * high_memory;extern int page_cluster;/* The inactive_clean lists are per zone. */extern struct list_head active_list;extern struct list_head inactive_dirty_list;#include <asm/page.h>#include <asm/pgtable.h>#include <asm/atomic.h>/* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way * we have a virtual fs - giving a cleaner interface to the * mm details, and allowing different kinds of memory mappings * (from shared memory to executable loading to arbitrary * mmap() functions). *//* * This struct defines a memory VMM memory area. There is one of these * per VM-area/task.  A VM area is any part of the process virtual memory * space that has a special rule for the page-fault handlers (ie a shared * library, the executable area etc). */struct vm_area_struct {	struct mm_struct * vm_mm;	/* VM area parameters */	unsigned long vm_start;	unsigned long vm_end;	/* linked list of VM areas per task, sorted by address */	struct vm_area_struct *vm_next;	pgprot_t vm_page_prot;	unsigned long vm_flags;	/* AVL tree of VM areas per task, sorted by address */	short vm_avl_height;	struct vm_area_struct * vm_avl_left;	struct vm_area_struct * vm_avl_right;	/* For areas with an address space and backing store,	 * one of the address_space->i_mmap{,shared} lists,	 * for shm areas, the list of attaches, otherwise unused.	 */	struct vm_area_struct *vm_next_share;	struct vm_area_struct **vm_pprev_share;	struct vm_operations_struct * vm_ops;	unsigned long vm_pgoff;		/* offset in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */	struct file * vm_file;	unsigned long vm_raend;	void * vm_private_data;		/* was vm_pte (shared mem) */};/* * vm_flags.. */#define VM_READ		0x00000001	/* currently active flags */#define VM_WRITE	0x00000002#define VM_EXEC		0x00000004#define VM_SHARED	0x00000008#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */#define VM_MAYWRITE	0x00000020#define VM_MAYEXEC	0x00000040#define VM_MAYSHARE	0x00000080#define VM_GROWSDOWN	0x00000100	/* general info on the segment */#define VM_GROWSUP	0x00000200#define VM_SHM		0x00000400	/* shared memory area, don't swap out */#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */#define VM_EXECUTABLE	0x00001000#define VM_LOCKED	0x00002000#define VM_IO           0x00004000	/* Memory mapped I/O or similar */#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */#define VM_RESERVED	0x00080000	/* Don't unmap it from swap_out */#define VM_STACK_FLAGS	0x00000177#define VM_READHINTMASK			(VM_SEQ_READ | VM_RAND_READ)#define VM_ClearReadHint(v)		(v)->vm_flags &= ~VM_READHINTMASK#define VM_NormalReadHint(v)		(!((v)->vm_flags & VM_READHINTMASK))#define VM_SequentialReadHint(v)	((v)->vm_flags & VM_SEQ_READ)#define VM_RandomReadHint(v)		((v)->vm_flags & VM_RAND_READ)/* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. */extern pgprot_t protection_map[16];/* * These are the virtual MM functions - opening of an area, closing and * unmapping it (needed to keep files on disk up-to-date etc), pointer * to the functions called when a no-page or a wp-page exception occurs.  */struct vm_operations_struct {	void (*open)(struct vm_area_struct * area);	void (*close)(struct vm_area_struct * area);	struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);};/* * Try to keep the most commonly accessed fields in single cache lines * here (16 bytes or greater).  This ordering should be particularly * beneficial on 32-bit processors. * * The first line is data used in page cache lookup, the second line * is used for linear searches (eg. clock algorithm scans).  */typedef struct page {	struct list_head list;	struct address_space *mapping;	unsigned long index;	struct page *next_hash;	atomic_t count;	unsigned long flags;	/* atomic flags, some possibly updated asynchronously */	struct list_head lru;	unsigned long age;	wait_queue_head_t wait;	struct page **pprev_hash;	struct buffer_head * buffers;	void *virtual; /* non-NULL if kmapped */	struct zone_struct *zone;} mem_map_t;#define get_page(p)		atomic_inc(&(p)->count)#define put_page(p)		__free_page(p)#define put_page_testzero(p) 	atomic_dec_and_test(&(p)->count)#define page_count(p)		atomic_read(&(p)->count)#define set_page_count(p,v) 	atomic_set(&(p)->count, v)/* Page flag bit values */#define PG_locked		 0#define PG_error		 1#define PG_referenced		 2#define PG_uptodate		 3#define PG_dirty		 4#define PG_decr_after		 5#define PG_active		 6#define PG_inactive_dirty	 7#define PG_slab			 8#define PG_swap_cache		 9#define PG_skip			10#define PG_inactive_clean	11#define PG_highmem		12				/* bits 21-29 unused */#define PG_arch_1		30#define PG_reserved		31/* Make it prettier to test the above... */#define Page_Uptodate(page)	test_bit(PG_uptodate, &(page)->flags)#define SetPageUptodate(page)	set_bit(PG_uptodate, &(page)->flags)#define ClearPageUptodate(page)	clear_bit(PG_uptodate, &(page)->flags)#define PageDirty(page)		test_bit(PG_dirty, &(page)->flags)#define SetPageDirty(page)	set_bit(PG_dirty, &(page)->flags)#define ClearPageDirty(page)	clear_bit(PG_dirty, &(page)->flags)#define PageLocked(page)	test_bit(PG_locked, &(page)->flags)#define LockPage(page)		set_bit(PG_locked, &(page)->flags)#define TryLockPage(page)	test_and_set_bit(PG_locked, &(page)->flags)extern void __set_page_dirty(struct page *);static inline void set_page_dirty(struct page * page){	if (!test_and_set_bit(PG_dirty, &page->flags))		__set_page_dirty(page);}/* * The first mb is necessary to safely close the critical section opened by the * TryLockPage(), the second mb is necessary to enforce ordering between * the clear_bit and the read of the waitqueue (to avoid SMP races with a * parallel wait_on_page). */#define UnlockPage(page)	do { \					smp_mb__before_clear_bit(); \					if (!test_and_clear_bit(PG_locked, &(page)->flags)) BUG(); \					smp_mb__after_clear_bit(); \					if (waitqueue_active(&page->wait)) \						wake_up(&page->wait); \				} while (0)#define PageError(page)		test_bit(PG_error, &(page)->flags)#define SetPageError(page)	set_bit(PG_error, &(page)->flags)#define ClearPageError(page)	clear_bit(PG_error, &(page)->flags)#define PageReferenced(page)	test_bit(PG_referenced, &(page)->flags)#define SetPageReferenced(page)	set_bit(PG_referenced, &(page)->flags)#define ClearPageReferenced(page)	clear_bit(PG_referenced, &(page)->flags)#define PageTestandClearReferenced(page)	test_and_clear_bit(PG_referenced, &(page)->flags)#define PageDecrAfter(page)	test_bit(PG_decr_after, &(page)->flags)#define SetPageDecrAfter(page)	set_bit(PG_decr_after, &(page)->flags)#define PageTestandClearDecrAfter(page)	test_and_clear_bit(PG_decr_after, &(page)->flags)#define PageSlab(page)		test_bit(PG_slab, &(page)->flags)#define PageSwapCache(page)	test_bit(PG_swap_cache, &(page)->flags)#define PageReserved(page)	test_bit(PG_reserved, &(page)->flags)#define PageSetSlab(page)	set_bit(PG_slab, &(page)->flags)#define PageSetSwapCache(page)	set_bit(PG_swap_cache, &(page)->flags)#define PageTestandSetSwapCache(page)	test_and_set_bit(PG_swap_cache, &(page)->flags)#define PageClearSlab(page)		clear_bit(PG_slab, &(page)->flags)#define PageClearSwapCache(page)	clear_bit(PG_swap_cache, &(page)->flags)#define PageTestandClearSwapCache(page)	test_and_clear_bit(PG_swap_cache, &(page)->flags)#define PageActive(page)	test_bit(PG_active, &(page)->flags)#define SetPageActive(page)	set_bit(PG_active, &(page)->flags)#define ClearPageActive(page)	clear_bit(PG_active, &(page)->flags)#define PageInactiveDirty(page)	test_bit(PG_inactive_dirty, &(page)->flags)#define SetPageInactiveDirty(page)	set_bit(PG_inactive_dirty, &(page)->flags)#define ClearPageInactiveDirty(page)	clear_bit(PG_inactive_dirty, &(page)->flags)#define PageInactiveClean(page)	test_bit(PG_inactive_clean, &(page)->flags)#define SetPageInactiveClean(page)	set_bit(PG_inactive_clean, &(page)->flags)#define ClearPageInactiveClean(page)	clear_bit(PG_inactive_clean, &(page)->flags)#ifdef CONFIG_HIGHMEM#define PageHighMem(page)		test_bit(PG_highmem, &(page)->flags)#else#define PageHighMem(page)		0 /* needed to optimize away at compile time */#endif#define SetPageReserved(page)		set_bit(PG_reserved, &(page)->flags)#define ClearPageReserved(page)		clear_bit(PG_reserved, &(page)->flags)/* * Error return values for the *_nopage functions */#define NOPAGE_SIGBUS	(NULL)#define NOPAGE_OOM	((struct page *) (-1))/* * Various page->flags bits: * * PG_reserved is set for a page which must never be accessed (which * may not even be present). * * PG_DMA has been removed, page->zone now tells exactly wether the * page is suited to do DMAing into. *

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -