📄 mm.h
字号:
* - inode pages which have been modified and are MAP_SHARED may need
* to be written to disk,
* - private pages which have been modified may need to be swapped out
* to swap space and (later) to be read back into memory.
* During disk I/O, page->locked is true. This bit is set before I/O
* and reset when I/O completes. page->wait is a wait queue of all
* tasks waiting for the I/O on this page to complete.
* page->uptodate tells whether the page's contents is valid.
* When a read completes, the page becomes uptodate, unless a disk I/O
* error happened.
* When a write completes, and page->free_after is true, the page is
* freed without any further delay.
*
* For choosing which pages to swap out, inode pages carry a
* page->referenced bit, which is set any time the system accesses
* that page through the (inode,offset) hash table.
* There is also the page->age counter, which implements a linear
* decay (why not an exponential decay?), see swapctl.h.
*/
extern mem_map_t * mem_map;
#ifdef DEBUG_FREE_PAGES
#undef __get_free_pages
#undef get_free_page
#undef free_page
#undef __free_page
extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
extern void free_pages(unsigned long addr, unsigned long order);
extern unsigned long get_free_page(int priority);
extern void __free_page(struct page * ptr);
/*
* This is timing-critical - most of the time in getting a new page
* goes to clearing the page. If you want a page without the clearing
* overhead, just use __get_free_page() directly..
*/
#define __get_free_page(priority) __get_free_pages((priority),0,0)
#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
extern unsigned long __get_free_pages_flf(int priority, unsigned long gfporder, int dma, char *file, int line, char*function);
extern unsigned long get_free_page_flf(int priority, char * file, int line, char*function);
#define __get_free_pages(priority,order,dma) __get_free_pages_flf(priority,order,dma,__FILE__,__LINE__,__FUNCTION__)
#define get_free_page(priority) get_free_page_flf(priority,__FILE__,__LINE__,__FUNCTION__)
/* memory.c & swap.c*/
#define free_page(addr) free_pages((addr),0)
extern void free_pages_flf(unsigned long addr, unsigned long order, char*file, int line, char*function);
extern void __free_page_flf(struct page *, char*file, int line, char*function);
#define free_pages(addr, order) free_pages_flf(addr, order, __FILE__, __LINE__, __FUNCTION__)
#define __free_page(page) __free_page_flf(page, __FILE__, __LINE__, __FUNCTION__)
#else /* !DEBUG_FREE_PAGES */
/*
* This is timing-critical - most of the time in getting a new page
* goes to clearing the page. If you want a page without the clearing
* overhead, just use __get_free_page() directly..
*/
#define __get_free_page(priority) __get_free_pages((priority),0,0)
#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
extern inline unsigned long get_free_page(int priority)
{
unsigned long page;
page = __get_free_page(priority);
if (page)
memset((void *) page, 0, PAGE_SIZE);
return page;
}
/* memory.c & swap.c*/
#define free_page(addr) free_pages((addr),0)
extern void free_pages(unsigned long addr, unsigned long order);
extern void __free_page(struct page *);
#endif /* !DEBUG_FREE_PAGES */
extern void show_free_areas(void);
extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
unsigned long address);
extern void free_page_tables(struct mm_struct * mm);
extern void clear_page_tables(struct task_struct * tsk);
extern int new_page_tables(struct task_struct * tsk);
extern int copy_page_tables(struct task_struct * to);
#ifndef NO_MM
extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
extern void vmtruncate(struct inode * inode, unsigned long offset);
extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
#endif /* !NO_MM */
extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
extern void mem_init(unsigned long start_mem, unsigned long end_mem);
extern void show_mem(void);
extern void oom(struct task_struct * tsk);
extern void si_meminfo(struct sysinfo * val);
/* vmalloc.c */
extern void * vmalloc(unsigned long size);
extern void * vremap(unsigned long offset, unsigned long size);
extern void vfree(void * addr);
extern int vread(char *buf, char *addr, int count);
/* mmap.c */
#ifdef DEBUG_MMAP
#undef do_mmap
#undef do_munmap
extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long off);
extern int do_munmap(unsigned long, size_t);
extern unsigned long do_mmap_flf(struct file * file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long off, char*filename, int line, char*function);
extern int do_munmap_flf(unsigned long, size_t, char*file, int line, char*function);
#define do_mmap(file,addr,len,prot,flags,off) do_mmap_flf(file,addr,len,prot,flags,off,__FILE__,__LINE__,__FUNCTION__)
#define do_munmap(addr, size) do_munmap_flf(addr, size,__FILE__,__LINE__,__FUNCTION__)
#else /* !DEBUG_MMAP */
extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long off);
extern int do_munmap(unsigned long, size_t);
#endif /* DEBUG_MMAP */
extern void exit_mmap(struct mm_struct *);
#ifndef NO_MM
extern void merge_segments(struct mm_struct *, unsigned long, unsigned long);
extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void remove_shared_vm_struct(struct vm_area_struct *);
extern void build_mmap_avl(struct mm_struct *);
extern unsigned long get_unmapped_area(unsigned long, unsigned long);
#endif /* !NO_MM */
/* filemap.c */
extern unsigned long page_unuse(unsigned long);
extern int shrink_mmap(int, int, int);
extern void truncate_inode_pages(struct inode *, unsigned long);
#define GFP_BUFFER 0x00
#define GFP_ATOMIC 0x01
#define GFP_USER 0x02
#define GFP_KERNEL 0x03
#define GFP_NOBUFFER 0x04
#define GFP_NFS 0x05
#define GFP_IO 0x06
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
#define GFP_DMA 0x80
#define GFP_LEVEL_MASK 0xf
#ifndef NO_MM
/* vma is the first one with address < vma->vm_end,
* and even address < vma->vm_start. Have to extend vma. */
static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
{
unsigned long grow;
address &= PAGE_MASK;
grow = vma->vm_start - address;
if (vma->vm_end - address
> (unsigned long) current->rlim[RLIMIT_STACK].rlim_cur ||
(vma->vm_mm->total_vm << PAGE_SHIFT) + grow
> (unsigned long) current->rlim[RLIMIT_AS].rlim_cur)
return -ENOMEM;
vma->vm_start = address;
vma->vm_offset -= grow;
vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
return 0;
}
#define avl_empty (struct vm_area_struct *) NULL
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
static inline struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
{
struct vm_area_struct * result = NULL;
if (mm) {
struct vm_area_struct * tree = mm->mmap_avl;
for (;;) {
if (tree == avl_empty)
break;
if (tree->vm_end > addr) {
result = tree;
if (tree->vm_start <= addr)
break;
tree = tree->vm_avl_left;
} else
tree = tree->vm_avl_right;
}
}
return result;
}
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
{
struct vm_area_struct * vma;
vma = find_vma(mm,start_addr);
if (vma && end_addr <= vma->vm_start)
vma = NULL;
return vma;
}
#endif /* !NO_MM */
#endif /* __KERNEL__ */
#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -