⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 memory.c

📁 linux1.1源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *  linux/mm/memory.c * *  Copyright (C) 1991, 1992  Linus Torvalds *//* * demand-loading started 01.12.91 - seems it is high on the list of * things wanted, and it should be easy to implement. - Linus *//* * Ok, demand-loading was easy, shared pages a little bit tricker. Shared * pages started 02.12.91, seems to work. - Linus. * * Tested sharing by executing about 30 /bin/sh: under the old kernel it * would have taken more than the 6M I have free, but it worked well as * far as I could see. * * Also corrected some "invalidate()"s - I wasn't doing enough of them. *//* * Real VM (paging to/from disk) started 18.12.91. Much more work and * thought has to go into this. Oh, well.. * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why. *		Found it. Everything seems to work now. * 20.12.91  -  Ok, making the swap-device changeable like the root. */#include <asm/system.h>#include <linux/config.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/head.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>unsigned long high_memory = 0;extern unsigned long pg0[1024];		/* page table for 0-4MB for everybody */extern void sound_mem_init(void);extern void die_if_kernel(char *,struct pt_regs *,long);int nr_swap_pages = 0;int nr_free_pages = 0;unsigned long free_page_list = 0;/* * The secondary free_page_list is used for malloc() etc things that * may need pages during interrupts etc. Normal get_free_page() operations * don't touch it, so it stays as a kind of "panic-list", that can be * accessed when all other mm tricks have failed. */int nr_secondary_pages = 0;unsigned long secondary_page_list = 0;#define copy_page(from,to) \__asm__("cld ; rep ; movsl": :"S" (from),"D" (to),"c" (1024):"cx","di","si")unsigned short * mem_map = NULL;#define CODE_SPACE(addr,p) ((addr) < (p)->end_code)/* * oom() prints a message (so that the user knows why the process died), * and gives the process an untrappable SIGSEGV. */void oom(struct task_struct * task){	printk("\nout of memory\n");	task->sigaction[SIGKILL-1].sa_handler = NULL;	task->blocked &= ~(1<<(SIGKILL-1));	send_sig(SIGKILL,task,1);}static void free_one_table(unsigned long * page_dir){	int j;	unsigned long pg_table = *page_dir;	unsigned long * page_table;	if (!pg_table)		return;	*page_dir = 0;	if (pg_table >= high_memory || !(pg_table & PAGE_PRESENT)) {		printk("Bad page table: [%p]=%08lx\n",page_dir,pg_table);		return;	}	if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)		return;	page_table = (unsigned long *) (pg_table & PAGE_MASK);	for (j = 0 ; j < PTRS_PER_PAGE ; j++,page_table++) {		unsigned long pg = *page_table;				if (!pg)			continue;		*page_table = 0;		if (pg & PAGE_PRESENT)			free_page(PAGE_MASK & pg);		else			swap_free(pg);	}	free_page(PAGE_MASK & pg_table);}/* * This function clears all user-level page tables of a process - this * is needed by execve(), so that old pages aren't in the way. Note that * unlike 'free_page_tables()', this function still leaves a valid * page-table-tree in memory: it just removes the user pages. The two * functions are similar, but there is a fundamental difference. */void clear_page_tables(struct task_struct * tsk){	int i;	unsigned long pg_dir;	unsigned long * page_dir;	if (!tsk)		return;	if (tsk == task[0])		panic("task[0] (swapper) doesn't support exec()\n");	pg_dir = tsk->tss.cr3;	page_dir = (unsigned long *) pg_dir;	if (!page_dir || page_dir == swapper_pg_dir) {		printk("Trying to clear kernel page-directory: not good\n");		return;	}	if (mem_map[MAP_NR(pg_dir)] > 1) {		unsigned long * new_pg;		if (!(new_pg = (unsigned long*) get_free_page(GFP_KERNEL))) {			oom(tsk);			return;		}		for (i = 768 ; i < 1024 ; i++)			new_pg[i] = page_dir[i];		free_page(pg_dir);		tsk->tss.cr3 = (unsigned long) new_pg;		return;	}	for (i = 0 ; i < 768 ; i++,page_dir++)		free_one_table(page_dir);	invalidate();	return;}/* * This function frees up all page tables of a process when it exits. */void free_page_tables(struct task_struct * tsk){	int i;	unsigned long pg_dir;	unsigned long * page_dir;	if (!tsk)		return;	if (tsk == task[0]) {		printk("task[0] (swapper) killed: unable to recover\n");		panic("Trying to free up swapper memory space");	}	pg_dir = tsk->tss.cr3;	if (!pg_dir || pg_dir == (unsigned long) swapper_pg_dir) {		printk("Trying to free kernel page-directory: not good\n");		return;	}	tsk->tss.cr3 = (unsigned long) swapper_pg_dir;	if (tsk == current)		__asm__ __volatile__("movl %0,%%cr3": :"a" (tsk->tss.cr3));	if (mem_map[MAP_NR(pg_dir)] > 1) {		free_page(pg_dir);		return;	}	page_dir = (unsigned long *) pg_dir;	for (i = 0 ; i < PTRS_PER_PAGE ; i++,page_dir++)		free_one_table(page_dir);	free_page(pg_dir);	invalidate();}/* * clone_page_tables() clones the page table for a process - both * processes will have the exact same pages in memory. There are * probably races in the memory management with cloning, but we'll * see.. */int clone_page_tables(struct task_struct * tsk){	unsigned long pg_dir;	pg_dir = current->tss.cr3;	mem_map[MAP_NR(pg_dir)]++;	tsk->tss.cr3 = pg_dir;	return 0;}/* * copy_page_tables() just copies the whole process memory range: * note the special handling of RESERVED (ie kernel) pages, which * means that they are always shared by all processes. */int copy_page_tables(struct task_struct * tsk){	int i;	unsigned long old_pg_dir, *old_page_dir;	unsigned long new_pg_dir, *new_page_dir;	if (!(new_pg_dir = get_free_page(GFP_KERNEL)))		return -ENOMEM;	old_pg_dir = current->tss.cr3;	tsk->tss.cr3 = new_pg_dir;	old_page_dir = (unsigned long *) old_pg_dir;	new_page_dir = (unsigned long *) new_pg_dir;	for (i = 0 ; i < PTRS_PER_PAGE ; i++,old_page_dir++,new_page_dir++) {		int j;		unsigned long old_pg_table, *old_page_table;		unsigned long new_pg_table, *new_page_table;		old_pg_table = *old_page_dir;		if (!old_pg_table)			continue;		if (old_pg_table >= high_memory || !(old_pg_table & PAGE_PRESENT)) {			printk("copy_page_tables: bad page table: "				"probable memory corruption");			*old_page_dir = 0;			continue;		}		if (mem_map[MAP_NR(old_pg_table)] & MAP_PAGE_RESERVED) {			*new_page_dir = old_pg_table;			continue;		}		if (!(new_pg_table = get_free_page(GFP_KERNEL))) {			free_page_tables(tsk);			return -ENOMEM;		}		old_page_table = (unsigned long *) (PAGE_MASK & old_pg_table);		new_page_table = (unsigned long *) (PAGE_MASK & new_pg_table);		for (j = 0 ; j < PTRS_PER_PAGE ; j++,old_page_table++,new_page_table++) {			unsigned long pg;			pg = *old_page_table;			if (!pg)				continue;			if (!(pg & PAGE_PRESENT)) {				*new_page_table = swap_duplicate(pg);				continue;			}			if ((pg & (PAGE_RW | PAGE_COW)) == (PAGE_RW | PAGE_COW))				pg &= ~PAGE_RW;			*new_page_table = pg;			if (mem_map[MAP_NR(pg)] & MAP_PAGE_RESERVED)				continue;			*old_page_table = pg;			mem_map[MAP_NR(pg)]++;		}		*new_page_dir = new_pg_table | PAGE_TABLE;	}	invalidate();	return 0;}/* * a more complete version of free_page_tables which performs with page * granularity. */int unmap_page_range(unsigned long from, unsigned long size){	unsigned long page, page_dir;	unsigned long *page_table, *dir;	unsigned long poff, pcnt, pc;	if (from & ~PAGE_MASK) {		printk("unmap_page_range called with wrong alignment\n");		return -EINVAL;	}	size = (size + ~PAGE_MASK) >> PAGE_SHIFT;	dir = PAGE_DIR_OFFSET(current->tss.cr3,from);	poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);	if ((pcnt = PTRS_PER_PAGE - poff) > size)		pcnt = size;	for ( ; size > 0; ++dir, size -= pcnt,	     pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {		if (!(page_dir = *dir))	{			poff = 0;			continue;		}		if (!(page_dir & PAGE_PRESENT)) {			printk("unmap_page_range: bad page directory.");			continue;		}		page_table = (unsigned long *)(PAGE_MASK & page_dir);		if (poff) {			page_table += poff;			poff = 0;		}		for (pc = pcnt; pc--; page_table++) {			if ((page = *page_table) != 0) {				*page_table = 0;				if (1 & page) {					if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))						if (current->rss > 0)							--current->rss;					free_page(PAGE_MASK & page);				} else					swap_free(page);			}		}		if (pcnt == PTRS_PER_PAGE) {			*dir = 0;			free_page(PAGE_MASK & page_dir);		}	}	invalidate();	return 0;}int zeromap_page_range(unsigned long from, unsigned long size, int mask){	unsigned long *page_table, *dir;	unsigned long poff, pcnt;	unsigned long page;	if (mask) {		if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {			printk("zeromap_page_range: mask = %08x\n",mask);			return -EINVAL;		}		mask |= ZERO_PAGE;	}	if (from & ~PAGE_MASK) {		printk("zeromap_page_range: from = %08lx\n",from);		return -EINVAL;	}	dir = PAGE_DIR_OFFSET(current->tss.cr3,from);	size = (size + ~PAGE_MASK) >> PAGE_SHIFT;	poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);	if ((pcnt = PTRS_PER_PAGE - poff) > size)		pcnt = size;	while (size > 0) {		if (!(PAGE_PRESENT & *dir)) {				/* clear page needed here?  SRB. */			if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {				invalidate();				return -ENOMEM;			}			if (PAGE_PRESENT & *dir) {				free_page((unsigned long) page_table);				page_table = (unsigned long *)(PAGE_MASK & *dir++);			} else				*dir++ = ((unsigned long) page_table) | PAGE_TABLE;		} else			page_table = (unsigned long *)(PAGE_MASK & *dir++);		page_table += poff;		poff = 0;		for (size -= pcnt; pcnt-- ;) {			if ((page = *page_table) != 0) {				*page_table = 0;				if (page & PAGE_PRESENT) {					if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))						if (current->rss > 0)							--current->rss;					free_page(PAGE_MASK & page);				} else					swap_free(page);			}			*page_table++ = mask;		}		pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);	}	invalidate();	return 0;}/* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask){	unsigned long *page_table, *dir;	unsigned long poff, pcnt;	unsigned long page;	if (mask) {		if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {			printk("remap_page_range: mask = %08x\n",mask);			return -EINVAL;		}	}	if ((from & ~PAGE_MASK) || (to & ~PAGE_MASK)) {		printk("remap_page_range: from = %08lx, to=%08lx\n",from,to);		return -EINVAL;	}	dir = PAGE_DIR_OFFSET(current->tss.cr3,from);	size = (size + ~PAGE_MASK) >> PAGE_SHIFT;	poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);	if ((pcnt = PTRS_PER_PAGE - poff) > size)		pcnt = size;	while (size > 0) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -