📄 memory.c
字号:
return 0;
}
void do_no_page(unsigned long error_code, unsigned long address,
struct task_struct *tsk, unsigned long user_esp)
{
unsigned long tmp;
unsigned long page;
struct vm_area_struct * mpnt;
page = get_empty_pgtable(tsk,address);
if (!page)
return;
page &= PAGE_MASK;
page += PAGE_PTR(address);
tmp = *(unsigned long *) page;
if (tmp & PAGE_PRESENT)
return;
++tsk->rss;
if (tmp) {
++tsk->maj_flt;
swap_in((unsigned long *) page);
return;
}
address &= 0xfffff000;
tmp = 0;
for (mpnt = tsk->mmap; mpnt != NULL; mpnt = mpnt->vm_next) {
if (address < mpnt->vm_start)
break;
if (address >= mpnt->vm_end) {
tmp = mpnt->vm_end;
continue;
}
if (!mpnt->vm_ops || !mpnt->vm_ops->nopage) {
++tsk->min_flt;
get_empty_page(tsk,address);
return;
}
mpnt->vm_ops->nopage(error_code, mpnt, address);
return;
}
if (tsk != current)
goto ok_no_page;
if (address >= tsk->end_data && address < tsk->brk)
goto ok_no_page;
if (mpnt && mpnt == tsk->stk_vma &&
address - tmp > mpnt->vm_start - address &&
tsk->rlim[RLIMIT_STACK].rlim_cur > mpnt->vm_end - address) {
mpnt->vm_start = address;
goto ok_no_page;
}
tsk->tss.cr2 = address;
current->tss.error_code = error_code;
current->tss.trap_no = 14;
send_sig(SIGSEGV,tsk,1);
if (error_code & 4) /* user level access? */
return;
ok_no_page:
++tsk->min_flt;
get_empty_page(tsk,address);
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
unsigned long address;
unsigned long user_esp = 0;
unsigned int bit;
/* get the address */
__asm__("movl %%cr2,%0":"=r" (address));
if (address < TASK_SIZE) {
if (error_code & 4) { /* user mode access? */
if (regs->eflags & VM_MASK) {
bit = (address - 0xA0000) >> PAGE_SHIFT;
if (bit < 32)
current->screen_bitmap |= 1 << bit;
} else
user_esp = regs->esp;
}
if (error_code & 1)
do_wp_page(error_code, address, current, user_esp);
else
do_no_page(error_code, address, current, user_esp);
return;
}
address -= TASK_SIZE;
if (wp_works_ok < 0 && address == 0 && (error_code & PAGE_PRESENT)) {
wp_works_ok = 1;
pg0[0] = PAGE_SHARED;
printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
return;
}
if (address < PAGE_SIZE) {
printk("Unable to handle kernel NULL pointer dereference");
pg0[0] = PAGE_SHARED;
} else
printk("Unable to handle kernel paging request");
printk(" at address %08lx\n",address);
die_if_kernel("Oops", regs, error_code);
do_exit(SIGKILL);
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
* for a process dying in kernel mode, possibly leaving a inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
unsigned long __bad_pagetable(void)
{
extern char empty_bad_page_table[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (BAD_PAGE + PAGE_TABLE),
"D" ((long) empty_bad_page_table),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_bad_page_table;
}
unsigned long __bad_page(void)
{
extern char empty_bad_page[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (0),
"D" ((long) empty_bad_page),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_bad_page;
}
unsigned long __zero_page(void)
{
extern char empty_zero_page[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (0),
"D" ((long) empty_zero_page),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_zero_page;
}
void show_mem(void)
{
int i,free = 0,total = 0,reserved = 0;
int shared = 0;
printk("Mem-info:\n");
printk("Free pages: %6dkB\n",nr_free_pages<<(PAGE_SHIFT-10));
printk("Secondary pages: %6dkB\n",nr_secondary_pages<<(PAGE_SHIFT-10));
printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
i = high_memory >> PAGE_SHIFT;
while (i-- > 0) {
total++;
if (mem_map[i] & MAP_PAGE_RESERVED)
reserved++;
else if (!mem_map[i])
free++;
else
shared += mem_map[i]-1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
show_buffers();
}
/*
* paging_init() sets up the page tables - note that the first 4MB are
* already mapped by head.S.
*
* This routines also unmaps the page at virtual kernel address 0, so
* that we can trap those pesky NULL-reference errors in the kernel.
*/
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
{
unsigned long * pg_dir;
unsigned long * pg_table;
unsigned long tmp;
unsigned long address;
/*
* Physical page 0 is special; it's not touched by Linux since BIOS
* and SMM (for laptops with [34]86/SL chips) may need it. It is read
* and write protected to detect null pointer references in the
* kernel.
*/
#if 0
memset((void *) 0, 0, PAGE_SIZE);
#endif
start_mem = PAGE_ALIGN(start_mem);
address = 0;
pg_dir = swapper_pg_dir;
while (address < end_mem) {
tmp = *(pg_dir + 768); /* at virtual addr 0xC0000000 */
if (!tmp) {
tmp = start_mem | PAGE_TABLE;
*(pg_dir + 768) = tmp;
start_mem += PAGE_SIZE;
}
*pg_dir = tmp; /* also map it in at 0x0000000 for init */
pg_dir++;
pg_table = (unsigned long *) (tmp & PAGE_MASK);
for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
if (address < end_mem)
*pg_table = address | PAGE_SHARED;
else
*pg_table = 0;
address += PAGE_SIZE;
}
}
invalidate();
return start_mem;
}
void mem_init(unsigned long start_low_mem,
unsigned long start_mem, unsigned long end_mem)
{
int codepages = 0;
int reservedpages = 0;
int datapages = 0;
unsigned long tmp;
unsigned short * p;
extern int etext;
cli();
end_mem &= PAGE_MASK;
high_memory = end_mem;
start_mem += 0x0000000f;
start_mem &= ~0x0000000f;
tmp = MAP_NR(end_mem);
mem_map = (unsigned short *) start_mem;
p = mem_map + tmp;
start_mem = (unsigned long) p;
while (p > mem_map)
*--p = MAP_PAGE_RESERVED;
start_low_mem = PAGE_ALIGN(start_low_mem);
start_mem = PAGE_ALIGN(start_mem);
while (start_low_mem < 0xA0000) {
mem_map[MAP_NR(start_low_mem)] = 0;
start_low_mem += PAGE_SIZE;
}
while (start_mem < end_mem) {
mem_map[MAP_NR(start_mem)] = 0;
start_mem += PAGE_SIZE;
}
#ifdef CONFIG_SOUND
sound_mem_init();
#endif
free_page_list = 0;
nr_free_pages = 0;
for (tmp = 0 ; tmp < end_mem ; tmp += PAGE_SIZE) {
if (mem_map[MAP_NR(tmp)]) {
if (tmp >= 0xA0000 && tmp < 0x100000)
reservedpages++;
else if (tmp < (unsigned long) &etext)
codepages++;
else
datapages++;
continue;
}
*(unsigned long *) tmp = free_page_list;
free_page_list = tmp;
nr_free_pages++;
}
tmp = nr_free_pages << PAGE_SHIFT;
printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
tmp >> 10,
end_mem >> 10,
codepages << (PAGE_SHIFT-10),
reservedpages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10));
/* test if the WP bit is honoured in supervisor mode */
wp_works_ok = -1;
pg0[0] = PAGE_READONLY;
invalidate();
__asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
pg0[0] = 0;
invalidate();
if (wp_works_ok < 0)
wp_works_ok = 0;
return;
}
void si_meminfo(struct sysinfo *val)
{
int i;
i = high_memory >> PAGE_SHIFT;
val->totalram = 0;
val->freeram = 0;
val->sharedram = 0;
val->bufferram = buffermem;
while (i-- > 0) {
if (mem_map[i] & MAP_PAGE_RESERVED)
continue;
val->totalram++;
if (!mem_map[i]) {
val->freeram++;
continue;
}
val->sharedram += mem_map[i]-1;
}
val->totalram <<= PAGE_SHIFT;
val->freeram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
return;
}
/* This handles a generic mmap of a disk file */
void file_mmap_nopage(int error_code, struct vm_area_struct * area, unsigned long address)
{
struct inode * inode = area->vm_inode;
unsigned int block;
unsigned long page;
int nr[8];
int i, j;
int prot = area->vm_page_prot;
address &= PAGE_MASK;
block = address - area->vm_start + area->vm_offset;
block >>= inode->i_sb->s_blocksize_bits;
page = get_free_page(GFP_KERNEL);
if (share_page(area, area->vm_task, inode, address, error_code, page)) {
++area->vm_task->min_flt;
return;
}
++area->vm_task->maj_flt;
if (!page) {
oom(current);
put_page(area->vm_task, BAD_PAGE, address, PAGE_PRIVATE);
return;
}
for (i=0, j=0; i< PAGE_SIZE ; j++, block++, i += inode->i_sb->s_blocksize)
nr[j] = bmap(inode,block);
if (error_code & PAGE_RW)
prot |= PAGE_RW | PAGE_DIRTY;
page = bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, prot);
if (!(prot & PAGE_RW)) {
if (share_page(area, area->vm_task, inode, address, error_code, page))
return;
}
if (put_page(area->vm_task,page,address,prot))
return;
free_page(page);
oom(current);
}
void file_mmap_free(struct vm_area_struct * area)
{
if (area->vm_inode)
iput(area->vm_inode);
#if 0
if (area->vm_inode)
printk("Free inode %x:%d (%d)\n",area->vm_inode->i_dev,
area->vm_inode->i_ino, area->vm_inode->i_count);
#endif
}
/*
* Compare the contents of the mmap entries, and decide if we are allowed to
* share the pages
*/
int file_mmap_share(struct vm_area_struct * area1,
struct vm_area_struct * area2,
unsigned long address)
{
if (area1->vm_inode != area2->vm_inode)
return 0;
if (area1->vm_start != area2->vm_start)
return 0;
if (area1->vm_end != area2->vm_end)
return 0;
if (area1->vm_offset != area2->vm_offset)
return 0;
if (area1->vm_page_prot != area2->vm_page_prot)
return 0;
return 1;
}
struct vm_operations_struct file_mmap = {
NULL, /* open */
file_mmap_free, /* close */
file_mmap_nopage, /* nopage */
NULL, /* wppage */
file_mmap_share, /* share */
NULL, /* unmap */
};
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -