📄 mm.h
字号:
#ifndef _LINUX_MM_H
#define _LINUX_MM_H
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#ifdef __KERNEL__
#include <linux/string.h>
extern unsigned long high_memory;
#include <asm/page.h>
#include <asm/atomic.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#ifdef DEBUG_VERIFY_AREA
#undef verify_area
extern int verify_area(int, const void *, unsigned long);
extern int verify_area_flf(int, const void *, unsigned long, char*file, int line, char*function);
#define verify_area(a,b,c) verify_area_flf(a,b,c,__FILE__,__LINE__,__FUNCTION__)
#else /* !DEBUG_VERIFY_AREA */
extern int verify_area(int, const void *, unsigned long);
#endif /* !DEBUG_VERIFY_AREA */
#ifdef MAGIC_ROM_PTR
extern int is_in_rom(unsigned long);
#endif /* !MAGIC_ROM_PTR */
/*
* Linux kernel virtual memory manager primitives.
* The idea being to have a "virtual" mm in the same way
* we have a virtual fs - giving a cleaner interface to the
* mm details, and allowing different kinds of memory mappings
* (from shared memory to executable loading to arbitrary
* mmap() functions).
*/
#ifndef NO_MM
/*
* This struct defines a memory VMM memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
*/
struct vm_area_struct {
struct mm_struct * vm_mm; /* VM area parameters */
unsigned long vm_start;
unsigned long vm_end;
pgprot_t vm_page_prot;
unsigned short vm_flags;
/* AVL tree of VM areas per task, sorted by address */
short vm_avl_height;
struct vm_area_struct * vm_avl_left;
struct vm_area_struct * vm_avl_right;
/* linked list of VM areas per task, sorted by address */
struct vm_area_struct * vm_next;
/* for areas with inode, the circular list inode->i_mmap */
/* for shm areas, the circular list of attaches */
/* otherwise unused */
struct vm_area_struct * vm_next_share;
struct vm_area_struct * vm_prev_share;
/* more */
struct vm_operations_struct * vm_ops;
unsigned long vm_offset;
struct inode * vm_inode;
unsigned long vm_pte; /* shared mem */
};
#else /* NO_MM */
/* This dummy vm_area_struct does not define a VM area, it is only
used to convey data between do_mmap and a f_op's mmap function. */
struct vm_area_struct {
unsigned long vm_start;
unsigned long vm_end;
unsigned short vm_flags;
unsigned long vm_offset;
};
#endif /* NO_MM */
/*
* vm_flags..
*/
#define VM_READ 0x0001 /* currently active flags */
#define VM_WRITE 0x0002
#define VM_EXEC 0x0004
#define VM_SHARED 0x0008
#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
#define VM_MAYWRITE 0x0020
#define VM_MAYEXEC 0x0040
#define VM_MAYSHARE 0x0080
#define VM_GROWSDOWN 0x0100 /* general info on the segment */
#define VM_GROWSUP 0x0200
#define VM_SHM 0x0400 /* shared memory area, don't swap out */
#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
#define VM_EXECUTABLE 0x1000
#define VM_LOCKED 0x2000
#define VM_STACK_FLAGS 0x0177
#ifndef NO_MM
/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
extern pgprot_t protection_map[16];
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
* to the functions called when a no-page or a wp-page exception occurs.
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
unsigned long page);
int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
};
#endif /* !NO_MM */
/*
* Try to keep the most commonly accessed fields in single cache lines
* here (16 bytes or greater). This ordering should be particularly
* beneficial on 32-bit processors.
*
* The first line is data used in page cache lookup, the second line
* is used for linear searches (eg. clock algorithm scans).
*/
typedef struct page {
/* these must be first (free area handling) */
struct page *next;
struct page *prev;
struct inode *inode;
unsigned long offset;
struct page *next_hash;
atomic_t count;
unsigned flags; /* atomic flags, some possibly updated asynchronously */
unsigned dirty:16,
age:8;
struct wait_queue *wait;
struct page *prev_hash;
struct buffer_head * buffers;
unsigned long swap_unlock_entry;
unsigned long map_nr; /* page->map_nr == page - mem_map */
} mem_map_t;
/* Page flag bit values */
#define PG_locked 0
#define PG_error 1
#define PG_referenced 2
#define PG_uptodate 3
#define PG_free_after 4
#define PG_decr_after 5
#define PG_swap_unlock_after 6
#define PG_DMA 7
#define PG_reserved 31
/* Make it prettier to test the above... */
#define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
#define PageError(page) (test_bit(PG_error, &(page)->flags))
#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
#define PageDirty(page) (test_bit(PG_dirty, &(page)->flags))
#define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
#define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
#define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
/*
* page->reserved denotes a page which must never be accessed (which
* may not even be present).
*
* page->dma is set for those pages which lie in the range of
* physical addresses capable of carrying DMA transfers.
*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
* zeroes, and text pages of executables and shared libraries have
* only one copy in memory, at most, normally.
*
* For the non-reserved pages, page->count denotes a reference count.
* page->count == 0 means the page is free.
* page->count == 1 means the page is used for exactly one purpose
* (e.g. a private data page of one process).
*
* A page may be used for kmalloc() or anyone else who does a
* get_free_page(). In this case the page->count is at least 1, and
* all other fields are unused but should be 0 or NULL. The
* management of this page is the responsibility of the one who uses
* it.
*
* The other pages (we may call them "process pages") are completely
* managed by the Linux memory manager: I/O, buffers, swapping etc.
* The following discussion applies only to them.
*
* A page may belong to an inode's memory mapping. In this case,
* page->inode is the inode, and page->offset is the file offset
* of the page (not necessarily a multiple of PAGE_SIZE).
*
* A page may have buffers allocated to it. In this case,
* page->buffers is a circular list of these buffer heads. Else,
* page->buffers == NULL.
*
* For pages belonging to inodes, the page->count is the number of
* attaches, plus 1 if buffers are allocated to the page.
*
* All pages belonging to an inode make up a doubly linked list
* inode->i_pages, using the fields page->next and page->prev. (These
* fields are also used for freelist management when page->count==0.)
* There is also a hash table mapping (inode,offset) to the page
* in memory if present. The lists for this hash table use the fields
* page->next_hash and page->prev_hash.
*
* All process pages can do I/O:
* - inode pages may need to be read from disk,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -