📄 memory.c
字号:
if (!(PAGE_PRESENT & *dir)) {
/* clearing page here, needed? SRB. */
if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
invalidate();
return -1;
}
*dir++ = ((unsigned long) page_table) | PAGE_TABLE;
}
else
page_table = (unsigned long *)(PAGE_MASK & *dir++);
if (poff) {
page_table += poff;
poff = 0;
}
for (size -= pcnt; pcnt-- ;) {
if ((page = *page_table) != 0) {
*page_table = 0;
if (PAGE_PRESENT & page) {
if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
if (current->rss > 0)
--current->rss;
free_page(PAGE_MASK & page);
} else
swap_free(page);
}
/*
* the first condition should return an invalid access
* when the page is referenced. current assumptions
* cause it to be treated as demand allocation in some
* cases.
*/
if (!mask)
*page_table++ = 0; /* not present */
else if (to >= high_memory)
*page_table++ = (to | mask);
else if (!mem_map[MAP_NR(to)])
*page_table++ = 0; /* not present */
else {
*page_table++ = (to | mask);
if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED)) {
++current->rss;
mem_map[MAP_NR(to)]++;
}
}
to += PAGE_SIZE;
}
pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
}
invalidate();
return 0;
}
/*
* This function puts a page in memory at the wanted address.
* It returns the physical address of the page gotten, 0 if
* out of memory (either when trying to access page-table or
* page.)
*/
unsigned long put_page(struct task_struct * tsk,unsigned long page,
unsigned long address,int prot)
{
unsigned long *page_table;
if ((prot & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT)
printk("put_page: prot = %08x\n",prot);
if (page >= high_memory) {
printk("put_page: trying to put page %08lx at %08lx\n",page,address);
return 0;
}
page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
if ((*page_table) & PAGE_PRESENT)
page_table = (unsigned long *) (PAGE_MASK & *page_table);
else {
printk("put_page: bad page directory entry\n");
oom(tsk);
*page_table = BAD_PAGETABLE | PAGE_TABLE;
return 0;
}
page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
if (*page_table) {
printk("put_page: page already exists\n");
*page_table = 0;
invalidate();
}
*page_table = page | prot;
/* no need for invalidate */
return page;
}
/*
* The previous function doesn't work very well if you also want to mark
* the page dirty: exec.c wants this, as it has earlier changed the page,
* and we want the dirty-status to be correct (for VM). Thus the same
* routine, but this time we mark it dirty too.
*/
unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
{
unsigned long tmp, *page_table;
if (page >= high_memory)
printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
if (mem_map[MAP_NR(page)] != 1)
printk("mem_map disagrees with %08lx at %08lx\n",page,address);
page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
if (PAGE_PRESENT & *page_table)
page_table = (unsigned long *) (PAGE_MASK & *page_table);
else {
if (!(tmp = get_free_page(GFP_KERNEL)))
return 0;
if (PAGE_PRESENT & *page_table) {
free_page(tmp);
page_table = (unsigned long *) (PAGE_MASK & *page_table);
} else {
*page_table = tmp | PAGE_TABLE;
page_table = (unsigned long *) tmp;
}
}
page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
if (*page_table) {
printk("put_dirty_page: page already exists\n");
*page_table = 0;
invalidate();
}
*page_table = page | (PAGE_DIRTY | PAGE_PRIVATE);
/* no need for invalidate */
return page;
}
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
* and decrementing the shared-page counter for the old page.
*
* Note that we do many checks twice (look at do_wp_page()), as
* we have to be careful about race-conditions.
*
* Goto-purists beware: the only reason for goto's here is that it results
* in better assembly code.. The "default" path will see no jumps at all.
*/
static void __do_wp_page(unsigned long error_code, unsigned long address,
struct task_struct * tsk, unsigned long user_esp)
{
unsigned long *pde, pte, old_page, prot;
unsigned long new_page;
new_page = __get_free_page(GFP_KERNEL);
pde = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
pte = *pde;
if (!(pte & PAGE_PRESENT))
goto end_wp_page;
if ((pte & PAGE_TABLE) != PAGE_TABLE || pte >= high_memory)
goto bad_wp_pagetable;
pte &= PAGE_MASK;
pte += PAGE_PTR(address);
old_page = *(unsigned long *) pte;
if (!(old_page & PAGE_PRESENT))
goto end_wp_page;
if (old_page >= high_memory)
goto bad_wp_page;
if (old_page & PAGE_RW)
goto end_wp_page;
tsk->min_flt++;
prot = (old_page & ~PAGE_MASK) | PAGE_RW;
old_page &= PAGE_MASK;
if (mem_map[MAP_NR(old_page)] != 1) {
if (new_page) {
if (mem_map[MAP_NR(old_page)] & MAP_PAGE_RESERVED)
++tsk->rss;
copy_page(old_page,new_page);
*(unsigned long *) pte = new_page | prot;
free_page(old_page);
invalidate();
return;
}
free_page(old_page);
oom(tsk);
*(unsigned long *) pte = BAD_PAGE | prot;
invalidate();
return;
}
*(unsigned long *) pte |= PAGE_RW;
invalidate();
if (new_page)
free_page(new_page);
return;
bad_wp_page:
printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
*(unsigned long *) pte = BAD_PAGE | PAGE_SHARED;
send_sig(SIGKILL, tsk, 1);
goto end_wp_page;
bad_wp_pagetable:
printk("do_wp_page: bogus page-table at address %08lx (%08lx)\n",address,pte);
*pde = BAD_PAGETABLE | PAGE_TABLE;
send_sig(SIGKILL, tsk, 1);
end_wp_page:
if (new_page)
free_page(new_page);
return;
}
/*
* check that a page table change is actually needed, and call
* the low-level function only in that case..
*/
void do_wp_page(unsigned long error_code, unsigned long address,
struct task_struct * tsk, unsigned long user_esp)
{
unsigned long page;
unsigned long * pg_table;
pg_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
page = *pg_table;
if (!page)
return;
if ((page & PAGE_PRESENT) && page < high_memory) {
pg_table = (unsigned long *) ((page & PAGE_MASK) + PAGE_PTR(address));
page = *pg_table;
if (!(page & PAGE_PRESENT))
return;
if (page & PAGE_RW)
return;
if (!(page & PAGE_COW)) {
if (user_esp && tsk == current) {
current->tss.cr2 = address;
current->tss.error_code = error_code;
current->tss.trap_no = 14;
send_sig(SIGSEGV, tsk, 1);
return;
}
}
if (mem_map[MAP_NR(page)] == 1) {
*pg_table |= PAGE_RW | PAGE_DIRTY;
invalidate();
return;
}
__do_wp_page(error_code, address, tsk, user_esp);
return;
}
printk("bad page directory entry %08lx\n",page);
*pg_table = 0;
}
int __verify_write(unsigned long start, unsigned long size)
{
size--;
size += start & ~PAGE_MASK;
size >>= PAGE_SHIFT;
start &= PAGE_MASK;
do {
do_wp_page(1,start,current,0);
start += PAGE_SIZE;
} while (size--);
return 0;
}
static inline void get_empty_page(struct task_struct * tsk, unsigned long address)
{
unsigned long tmp;
if (!(tmp = get_free_page(GFP_KERNEL))) {
oom(tsk);
tmp = BAD_PAGE;
}
if (!put_page(tsk,tmp,address,PAGE_PRIVATE))
free_page(tmp);
}
/*
* try_to_share() checks the page at address "address" in the task "p",
* to see if it exists, and if it is clean. If so, share it with the current
* task.
*
* NOTE! This assumes we have checked that p != current, and that they
* share the same executable or library.
*
* We may want to fix this to allow page sharing for PIC pages at different
* addresses so that ELF will really perform properly. As long as the vast
* majority of sharable libraries load at fixed addresses this is not a
* big concern. Any sharing of pages between the buffer cache and the
* code space reduces the need for this as well. - ERY
*/
static int try_to_share(unsigned long address, struct task_struct * tsk,
struct task_struct * p, unsigned long error_code, unsigned long newpage)
{
unsigned long from;
unsigned long to;
unsigned long from_page;
unsigned long to_page;
from_page = (unsigned long)PAGE_DIR_OFFSET(p->tss.cr3,address);
to_page = (unsigned long)PAGE_DIR_OFFSET(tsk->tss.cr3,address);
/* is there a page-directory at from? */
from = *(unsigned long *) from_page;
if (!(from & PAGE_PRESENT))
return 0;
from &= PAGE_MASK;
from_page = from + PAGE_PTR(address);
from = *(unsigned long *) from_page;
/* is the page clean and present? */
if ((from & (PAGE_PRESENT | PAGE_DIRTY)) != PAGE_PRESENT)
return 0;
if (from >= high_memory)
return 0;
if (mem_map[MAP_NR(from)] & MAP_PAGE_RESERVED)
return 0;
/* is the destination ok? */
to = *(unsigned long *) to_page;
if (!(to & PAGE_PRESENT))
return 0;
to &= PAGE_MASK;
to_page = to + PAGE_PTR(address);
if (*(unsigned long *) to_page)
return 0;
/* share them if read - do COW immediately otherwise */
if (error_code & PAGE_RW) {
if(!newpage) /* did the page exist? SRB. */
return 0;
copy_page((from & PAGE_MASK),newpage);
to = newpage | PAGE_PRIVATE;
} else {
mem_map[MAP_NR(from)]++;
from &= ~PAGE_RW;
to = from;
if(newpage) /* only if it existed. SRB. */
free_page(newpage);
}
*(unsigned long *) from_page = from;
*(unsigned long *) to_page = to;
invalidate();
return 1;
}
/*
* share_page() tries to find a process that could share a page with
* the current one. Address is the address of the wanted page relative
* to the current data space.
*
* We first check if it is at all feasible by checking executable->i_count.
* It should be >1 if there are other tasks sharing this inode.
*/
int share_page(struct vm_area_struct * area, struct task_struct * tsk,
struct inode * inode,
unsigned long address, unsigned long error_code, unsigned long newpage)
{
struct task_struct ** p;
if (!inode || inode->i_count < 2 || !area->vm_ops)
return 0;
for (p = &LAST_TASK ; p > &FIRST_TASK ; --p) {
if (!*p)
continue;
if (tsk == *p)
continue;
if (inode != (*p)->executable) {
if(!area) continue;
/* Now see if there is something in the VMM that
we can share pages with */
if(area){
struct vm_area_struct * mpnt;
for (mpnt = (*p)->mmap; mpnt; mpnt = mpnt->vm_next) {
if (mpnt->vm_ops == area->vm_ops &&
mpnt->vm_inode->i_ino == area->vm_inode->i_ino&&
mpnt->vm_inode->i_dev == area->vm_inode->i_dev){
if (mpnt->vm_ops->share(mpnt, area, address))
break;
};
};
if (!mpnt) continue; /* Nope. Nuthin here */
};
}
if (try_to_share(address,tsk,*p,error_code,newpage))
return 1;
}
return 0;
}
/*
* fill in an empty page-table if none exists.
*/
static inline unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned long address)
{
unsigned long page;
unsigned long *p;
p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
if (PAGE_PRESENT & *p)
return *p;
if (*p) {
printk("get_empty_pgtable: bad page-directory entry \n");
*p = 0;
}
page = get_free_page(GFP_KERNEL);
p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
if (PAGE_PRESENT & *p) {
free_page(page);
return *p;
}
if (*p) {
printk("get_empty_pgtable: bad page-directory entry \n");
*p = 0;
}
if (page) {
*p = page | PAGE_TABLE;
return *p;
}
oom(current);
*p = BAD_PAGETABLE | PAGE_TABLE;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -