⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 paging.c

📁 一个类linux的dos下开发的操作系统.
💻 C
字号:
/*****************************************************************************
PAGING

EXPORTS:
unsigned long alloc_page(void);
int page_fault(task_t *curr_task, unsigned long err_code);
int init_paging(unsigned long *dir);

- page_fault() treats PAGE NOT PRESENT, PRIVILEGE VIOLATION, and
  WRITE TO READ-ONLY PAGE as mutually exclusive -- is this correct?
  will it still be correct if I add support for shared memory or
  copy-on-write pages?

- this code seems to work on my Pentium system even without doing
  invalidate_page_tables() or invalidate_page() -- check with 486
  and possibly 386 systems (I think you get a second TLB miss
  after the page fault has been repaired unless you invalidate,
  so it's a matter of speed rather than correctness)

- which is faster: invalidating individual pages or invalidating
  the entire TLB? under which conditions?

>>>- maybe get rid of all peekl()s and pokel()s, and use only
>>>  near virtual addresses to access page directories and tables
*****************************************************************************/
#include <string.h> /* NULL, memset(), memcpy() */
#include <krnl.h> /* LINEAR_DATA_SEL, DISCARDABLE_CODE() */
#include <x86.h> /* peekl(), pokel() */

/* IMPORTS:
from STARTUP.ASM */
extern unsigned long _ext_mem_size, _conv_mem_size, _kvirt_to_phys;
extern unsigned long _init_ramdisk_adr, _init_ramdisk_size;

unsigned long get_page_fault_adr(void);
//unsigned long get_page_dir(void);
//void set_page_dir(unsigned long cr3);

/* from DEBUG.C */
//void dump_page_tables(unsigned long *page_dir);

/* from linker script file */
extern unsigned char code[], data[], end[];

/* from MAIN.C */
void kprintf(const char *fmt, ...);

/* the 12 bits at the bottom of a page directory/table entry: */
#define	PRIV_PRESENT	0x001
#define	PRIV_WRITABLE	0x002
#define	PRIV_USER	0x004
/* b3, b4 reserved */
#define	PRIV_ACCESSED	0x020
#define	PRIV_DIRTY	0x040
/* b7, b8 reserved */
#define	PRIV_USER2	0x200		/* user-defined */
#define	PRIV_USER4	0x400
#define	PRIV_USER8	0x800

#define	PRIV_COW	PRIV_USER2	/* copy-on-write */
#define	PRIV_ALL	0xFFF

#define	LG2_PAGE_SIZE	12
#define	PAGE_TAB_ENTS	1024
#define	PAGE_DIR_ENTS	1024

/* convert virtual address to page address */
#define	PAGE_ADR(X)	((X) & 0xFFFFF000L)
/* convert virtual address to page number */
#define	PAGE_NUM(X)	((X) >> 12)
/* convert virtual address to page table entry (0-1023) */
#define	TAB_ENT(X)	(((X) >> 12) & 0x3FF)
/* convert virtual address to page dir entry (0-1023) */
#define	DIR_ENT(X)	(((X) >> 22) & 0x3FF)

typedef struct
{
	unsigned char use_count;/* 0==free page */
} page_t;

#define	MAX_PAGE	1024	/* xxx - 4 meg of RAM exactly - ugh */
static page_t _pages[MAX_PAGE];
/*****************************************************************************
*****************************************************************************/
DISCARDABLE_CODE(int init_paging(void))
{
	unsigned short i, last_krnl_page;
	unsigned long phys;

/* mark the initial RAM disk memory in use */
	i = PAGE_NUM(_init_ramdisk_adr);
	for(; i < PAGE_NUM(_init_ramdisk_adr + _init_ramdisk_size); i++)
		_pages[i].use_count = 1;
/* mark the adapter memory (640K ... 1M; video memory + BIOSes) in use */
	for(i = 160; i < 256; i++)
		_pages[i].use_count = 1;
/* mark the kernel memory pages in use */
	phys = (unsigned long)code + _kvirt_to_phys;
	last_krnl_page = PAGE_NUM(phys + (end - code) - 1);
	for(i = PAGE_NUM(phys); i <= last_krnl_page; i++)
		_pages[i].use_count = 1;
//dump_page_tables(_page_dir);
	return 0;
}
/*****************************************************************************
*****************************************************************************/
unsigned long alloc_page(void)
{
	unsigned long ret_val;
	page_t *page;
	unsigned i;

/* ### - slow linear search */
	page = _pages + 1;
/* leave page 0 free for trapping NULL pointer references,
and because the real-mode interrupt table and BIOS data are there */
	for(i = 1; i < MAX_PAGE; i++)
	{
		if(page->use_count == 0)
			break;
		page++;
	}
	if(i >= MAX_PAGE)
		ret_val = 0;
	else
	{
		page->use_count++;
		ret_val = i << LG2_PAGE_SIZE;
	}
	return ret_val;
}
/*****************************************************************************
*****************************************************************************/
static unsigned long alloc_zero_page(void)
{
	unsigned long ret_val;

	ret_val = alloc_page();
	if(ret_val != NULL)
		memset((void *)ret_val, 0, PAGE_SIZE);
	return ret_val;
}
/*****************************************************************************
demand-loading
*****************************************************************************/
static unsigned long load_page(sect_t *sect, unsigned long fault_adr)
{
	unsigned long new_page, src;

	new_page = alloc_page();
	if(new_page != NULL)
	{
/* point to code or data in executable file */
		src = (PAGE_ADR(fault_adr) - sect->adr) + sect->off;
/* "load" it
no block devices yet, just copy from pre-loaded ELF or COFF image */
		memcpy((void *)new_page, (void *)src, PAGE_SIZE);
	}
	return new_page;
}
/*****************************************************************************
*****************************************************************************/
#if 0
#define	DEBUG(X)	X
#else
#define	DEBUG(X)	/* nothing */
#endif

static int no_page(task_t *curr_task, unsigned long fault_adr)
{
	unsigned long ram_top, new_page = 0, dirent_adr, tab;
	unsigned short i, priv = PRIV_PRESENT;
	sect_t *sect;

	DEBUG(kprintf("task %u: pg fault @ 0x%lX: ",
		curr_task - _tasks, fault_adr);)
	if(_ext_mem_size != 0)
		ram_top = _ext_mem_size + 1048576L;
	else
		ram_top = _conv_mem_size;
/* identity-map physical memory on demand (kernel only) */
	if(fault_adr >= PAGE_SIZE && fault_adr < ram_top)
	{
		priv |= PRIV_WRITABLE;
		new_page = PAGE_ADR(fault_adr);
		DEBUG(kprintf("identity-mapped RAM, ");)
OK:
/* check for out-of-memory */
		if(new_page == 0)
		{
MEM:			kprintf("out of memory\n");
			return -1;
		}
/* OK, the page fault is valid
point to page directory entry corresponding to fault address */
		dirent_adr = curr_task->page_dir +
			DIR_ENT(fault_adr) * 4;
/* get the entry */
		tab = peekl(LINEAR_DATA_SEL, dirent_adr);
//		tab = *(unsigned long *)(dirent_adr - _kvirt_to_phys);
/* if it's 0, we must allocate a new page table */
		if(tab == 0)
		{
			tab = alloc_zero_page();
			if(tab == NULL)
				goto MEM;
/* we'll make the page tables writable and accessable to user code,
but the pages themselves may be read-only or kernel-only */
			pokel(LINEAR_DATA_SEL, dirent_adr, tab | PRIV_PRESENT |
				PRIV_WRITABLE | PRIV_USER);
//			*(unsigned long *)(dirent_adr - _kvirt_to_phys) =
//				tab | PRIV_PRESENT |
//				PRIV_WRITABLE | PRIV_USER;

			//invalidate_page(tab);
		}
/* point to page table entry corresponding to fault address */
		tab = PAGE_ADR(tab) + TAB_ENT(fault_adr) * 4;
/* map the page */
		pokel(LINEAR_DATA_SEL, tab, new_page | priv);
//		*(unsigned long *)(tab - _kvirt_to_phys) = new_page | priv;
/* invalidate_page() for 486+, which has INVLPG instruction
invlidate_page_tables() for 386 */
		//invalidate_page(new_page);
		DEBUG(kprintf("OK\n");)
		return 0;
	}
/* demand-load, demand-zero, or demand-allocate user task memory */
	for(i = 0; i < curr_task->num_sects; i++)
	{
		sect = curr_task->sect + i;
		if(fault_adr >= sect->adr &&
			fault_adr < sect->adr + sect->size)
		{
			DEBUG(kprintf("sect %u, 0x%lX-0x%lX, ",
				i, sect->adr, sect->adr + sect->size);)
			priv |= PRIV_USER;
/* can't do anything with sect->read or sect->exec for x86 pages */
			if(sect->write)
				priv |= PRIV_WRITABLE;
/* maybe check that sect->load and sect->zero are not both set */
			if(sect->load)
			{
				DEBUG(kprintf("load, ");)
				new_page = load_page(sect, fault_adr);
			}
			else if(sect->zero)
			{
				DEBUG(kprintf("zero, ");)
				new_page = alloc_zero_page();
			}
			else
			{
				DEBUG(kprintf("alloc, ");)
				new_page = alloc_page();
			}
			goto OK;
		}
	}
/* let fault() kill the task or panic */
	return -1;
}
/*****************************************************************************
*****************************************************************************/
int page_fault(task_t *curr_task, unsigned long err_code)
{
	unsigned long fault_adr;
	int ret_val;

	fault_adr = get_page_fault_adr();
/* PAGE NOT PRESENT (write or read) */
	if((err_code & 1) == 0)
		ret_val = no_page(curr_task, fault_adr);
/* PRIVILEGE VIOLATION
let fault() kill the task */
	else if(err_code & 4)
	{
		kprintf("Attempt to access privileged memory at 0x%X\n",
			fault_adr);
		ret_val = -1;
	}
/* WRITE TO READ-ONLY PAGE
let fault() kill the task or panic */
	else if(err_code & 2)
		ret_val = -1;
/* for 386 CPU only (no INVLPG instruction) */
//	invalidate_page_tables();
	return ret_val;
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -