⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 memory.c

📁 arm平台上的uclinux系统全部源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/arch/m68knommu/mm/memory.c * *  Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>, *                      The Silver Hammer Group, Ltd. * *  Based on: * *  linux/arch/m68k/mm/memory.c * *  Copyright (C) 1995  Hamish Macdonald */#include <linux/config.h>#include <linux/mm.h>#include <linux/kernel.h>#include <linux/string.h>#include <linux/types.h>#include <linux/malloc.h>#include <asm/setup.h>#include <asm/segment.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/system.h>#include <asm/traps.h>#include <asm/shglcore.h>#ifndef NO_MMextern pte_t *kernel_page_table (unsigned long *memavailp);/* Strings for `extern inline' functions in <asm/pgtable.h>.  If put   directly into these functions, they are output for every file that   includes pgtable.h */const char PgtabStr_bad_pmd[] = "Bad pmd in pte_alloc: %08lx\n";const char PgtabStr_bad_pgd[] = "Bad pgd in pmd_alloc: %08lx\n";const char PgtabStr_bad_pmdk[] = "Bad pmd in pte_alloc_kernel: %08lx\n";const char PgtabStr_bad_pgdk[] = "Bad pgd in pmd_alloc_kernel: %08lx\n";static struct ptable_desc {	struct ptable_desc *prev;	struct ptable_desc *next;	unsigned long	   page;	unsigned char	   alloced;} ptable_list = { &ptable_list, &ptable_list, 0, 0xff };#define PD_NONEFREE(dp) ((dp)->alloced == 0xff)#define PD_ALLFREE(dp) ((dp)->alloced == 0)#define PD_TABLEFREE(dp,i) (!((dp)->alloced & (1<<(i))))#define PD_MARKUSED(dp,i) ((dp)->alloced |= (1<<(i)))#define PD_MARKFREE(dp,i) ((dp)->alloced &= ~(1<<(i)))#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))pmd_t *get_pointer_table (void){	pmd_t *pmdp = NULL;	unsigned long flags;	struct ptable_desc *dp = ptable_list.next;	int i;	/*	 * For a pointer table for a user process address space, a	 * table is taken from a page allocated for the purpose.  Each	 * page can hold 8 pointer tables.  The page is remapped in	 * virtual address space to be noncacheable.	 */	if (PD_NONEFREE (dp)) {		if (!(dp = kmalloc (sizeof(struct ptable_desc),GFP_KERNEL))) {			return 0;		}		if (!(dp->page = __get_free_page (GFP_KERNEL))) {			kfree (dp);			return 0;		}		nocache_page (dp->page);		dp->alloced = 0;		/* put at head of list */		save_flags(flags);		cli();		dp->next = ptable_list.next;		dp->prev = ptable_list.next->prev;		ptable_list.next->prev = dp;		ptable_list.next = dp;		restore_flags(flags);	}	for (i = 0; i < 8; i++)		if (PD_TABLEFREE (dp, i)) {			PD_MARKUSED (dp, i);			pmdp = (pmd_t *)(dp->page + PTABLE_SIZE*i);			break;		}	if (PD_NONEFREE (dp)) {		/* move to end of list */		save_flags(flags);		cli();		dp->prev->next = dp->next;		dp->next->prev = dp->prev;		dp->next = ptable_list.next->prev;		dp->prev = ptable_list.prev;		ptable_list.prev->next = dp;		ptable_list.prev = dp;		restore_flags(flags);	}	memset (pmdp, 0, PTABLE_SIZE);	return pmdp;}void free_pointer_table (pmd_t *ptable){	struct ptable_desc *dp;	unsigned long page = (unsigned long)ptable & PAGE_MASK;	int index = ((unsigned long)ptable - page)/PTABLE_SIZE;	unsigned long flags;	for (dp = ptable_list.next; dp->page && dp->page != page; dp = dp->next)		;	if (!dp->page)		panic ("unable to find desc for ptable %p on list!", ptable);	if (PD_TABLEFREE (dp, index))		panic ("table already free!");	PD_MARKFREE (dp, index);	if (PD_ALLFREE (dp)) {		/* all tables in page are free, free page */		save_flags(flags);		cli();		dp->prev->next = dp->next;		dp->next->prev = dp->prev;		restore_flags(flags);		cache_page (dp->page);		free_page (dp->page);		kfree (dp);		return;	} else {		/*		 * move this descriptor the the front of the list, since		 * it has one or more free tables.		 */		save_flags(flags);		cli();		dp->prev->next = dp->next;		dp->next->prev = dp->prev;		dp->next = ptable_list.next;		dp->prev = ptable_list.next->prev;		ptable_list.next->prev = dp;		ptable_list.next = dp;		restore_flags(flags);	}}/* maximum pages used for kpointer tables */#define KPTR_PAGES      4/* # of reserved slots */#define RESERVED_KPTR	4extern pmd_tablepage kernel_pmd_table; /* reserved in head.S */static struct kpointer_pages {        pmd_tablepage *page[KPTR_PAGES];        u_char alloced[KPTR_PAGES];} kptr_pages;void init_kpointer_table(void) {	short i = KPTR_PAGES-1;	/* first page is reserved in head.S */	kptr_pages.page[i] = &kernel_pmd_table;	kptr_pages.alloced[i] = ~(0xff>>RESERVED_KPTR);	for (i--; i>=0; i--) {		kptr_pages.page[i] = NULL;		kptr_pages.alloced[i] = 0;	}}pmd_t *get_kpointer_table (void){	/* For pointer tables for the kernel virtual address space,	 * use the page that is reserved in head.S that can hold up to	 * 8 pointer tables. 3 of these tables are always reserved	 * (kernel_pg_dir, swapper_pg_dir and kernel pointer table for	 * the first 16 MB of RAM). In addition, the 4th pointer table	 * in this page is reserved. On Amiga and Atari, it is used to	 * map in the hardware registers. It may be used for other	 * purposes on other 68k machines. This leaves 4 pointer tables	 * available for use by the kernel. 1 of them are usually used	 * for the vmalloc tables. This allows mapping of 3 * 32 = 96 MB	 * of physical memory. But these pointer tables are also used	 * for other purposes, like kernel_map(), so further pages can	 * now be allocated.	 */	pmd_tablepage *page;	pmd_table *table;	long nr, offset = -8;	short i;	for (i=KPTR_PAGES-1; i>=0; i--) {		asm volatile("bfffo %1{%2,#8},%0"			: "=d" (nr)			: "d" ((u_char)~kptr_pages.alloced[i]), "d" (offset));		if (nr)			break;	}	if (i < 0) {		printk("No space for kernel pointer table!\n");		return NULL;	}	if (!(page = kptr_pages.page[i])) {		if (!(page = (pmd_tablepage *)__get_free_page(GFP_KERNEL))) {			printk("No space for kernel pointer table!\n");			return NULL;		}		nocache_page((u_long)(kptr_pages.page[i] = page));	}	asm volatile("bfset %0@{%1,#1}"		: /* no output */		: "a" (&kptr_pages.alloced[i]), "d" (nr-offset));	table = &(*page)[nr-offset];	memset(table, 0, sizeof(pmd_table));	return ((pmd_t *)table);}void free_kpointer_table (pmd_t *pmdp){	pmd_table *table = (pmd_table *)pmdp;	pmd_tablepage *page = (pmd_tablepage *)((u_long)table & PAGE_MASK);	long nr;	short i;	for (i=KPTR_PAGES-1; i>=0; i--) {		if (kptr_pages.page[i] == page)			break;	}	nr = ((u_long)table - (u_long)page) / sizeof(pmd_table);	if (!table || i < 0 || (i == KPTR_PAGES-1 && nr < RESERVED_KPTR)) {		printk("Attempt to free invalid kernel pointer table: %p\n", table);		return;	}	asm volatile("bfclr %0@{%1,#1}"		: /* no output */		: "a" (&kptr_pages.alloced[i]), "d" (nr));	if (!kptr_pages.alloced[i]) {		kptr_pages.page[i] = 0;		cache_page ((u_long)page);		free_page ((u_long)page);	}}static unsigned long transp_transl_matches( unsigned long regval,					    unsigned long vaddr ){    unsigned long base, mask;    /* enabled? */    if (!(regval & 0x8000))	return( 0 );    if (CPU_IS_030) {	/* function code match? */	base = (regval >> 4) & 7;	mask = ~(regval & 7);	if ((SUPER_DATA & mask) != (base & mask))	    return( 0 );    }    else {	/* must not be user-only */	if ((regval & 0x6000) == 0)	    return( 0 );    }    /* address match? */    base = regval & 0xff000000;    mask = ~((regval << 8) & 0xff000000);    return( (vaddr & mask) == (base & mask) );}/* * The following two routines map from a physical address to a kernel * virtual address and vice versa. */unsigned long mm_vtop (unsigned long vaddr){	int i;	unsigned long voff = vaddr;	unsigned long offset = 0;	for (i = 0; i < boot_info.num_memory; i++)	{		if (voff < offset + boot_info.memory[i].size) {#ifdef DEBUGPV			printk ("VTOP(%lx)=%lx\n", vaddr,				boot_info.memory[i].addr + voff - offset);#endif			return boot_info.memory[i].addr + voff - offset;		} else			offset += boot_info.memory[i].size;	}	/* not in one of the memory chunks; test for applying transparent	 * translation */	if (CPU_IS_030) {	    unsigned long ttreg;	    register unsigned long *ttregptr __asm__( "a2" ) = &ttreg;	    asm volatile( ".long 0xf0120a00;" /* pmove %/tt0,%a0@ */			  : "=g" (ttreg) : "a" (ttregptr) );	    if (transp_transl_matches( ttreg, vaddr ))		return vaddr;	    asm volatile( ".long 0xf0120a00" /* pmove %/tt1,%a0@ */			  : "=g" (ttreg) : "a" (ttregptr) );	    if (transp_transl_matches( ttreg, vaddr ))		return vaddr;	}	else if (CPU_IS_040_OR_060) {	    register unsigned long ttreg __asm__( "d0" );	    	    asm volatile( ".long 0x4e7a0006" /* movec %dtt0,%d0 */			  : "=d" (ttreg) );	    if (transp_transl_matches( ttreg, vaddr ))		return vaddr;	    asm volatile( ".long 0x4e7a0007" /* movec %dtt1,%d0 */			  : "=d" (ttreg) );	    if (transp_transl_matches( ttreg, vaddr ))		return vaddr;	}	/* no match, too, so get the actual physical address from the MMU. */	if (CPU_IS_060) {	  unsigned long fs = get_fs();	  unsigned long  paddr;	  set_fs (SUPER_DATA);	  /* The PLPAR instruction causes an access error if the translation	   * is not possible. We don't catch that here, so a bad kernel trap	   * will be reported in this case. */	  asm volatile ("movel %1,%/a0\n\t"			".word 0xf5c8\n\t"	/* plpar (a0) */			"movel %/a0,%0"			: "=g" (paddr)			: "g" (vaddr)			: "a0" );	  set_fs (fs);	  return paddr;	} else if (CPU_IS_040) {	  unsigned long mmusr;	  unsigned long fs = get_fs();	  set_fs (SUPER_DATA);	  asm volatile ("movel %1,%/a0\n\t"			".word 0xf568\n\t"	/* ptestr (a0) */			".long 0x4e7a8805\n\t"	/* movec mmusr, a0 */			"movel %/a0,%0"			: "=g" (mmusr)			: "g" (vaddr)			: "a0", "d0");	  set_fs (fs);	  if (mmusr & MMU_R_040)	    return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));	  panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr);	} else {	  volatile unsigned short temp;	  unsigned short mmusr;	  unsigned long *descaddr;	  asm volatile ("ptestr #5,%2@,#7,%0\n\t"			"pmove %/psr,%1@"			: "=a&" (descaddr)			: "a" (&temp), "a" (vaddr));	  mmusr = temp;	  if (mmusr & (MMU_I|MMU_B|MMU_L))	    panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr);	  descaddr = (unsigned long *)PTOV(descaddr);	  switch (mmusr & MMU_NUM) {	  case 1:	    return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);	  case 2:	    return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);	  case 3:	    return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));	  default:	    panic ("VTOP: bad levels (%u) for virtual address %08lx", 		   mmusr & MMU_NUM, vaddr);	  }	}	panic ("VTOP: bad virtual address %08lx", vaddr);}unsigned long mm_ptov (unsigned long paddr){	int i;	unsigned long offset = 0;	for (i = 0; i < boot_info.num_memory; i++)	{		if (paddr >= boot_info.memory[i].addr &&		    paddr < (boot_info.memory[i].addr			     + boot_info.memory[i].size)) {#ifdef DEBUGPV			printk ("PTOV(%lx)=%lx\n", paddr,				(paddr - boot_info.memory[i].addr) + offset);#endif			return (paddr - boot_info.memory[i].addr) + offset;		} else			offset += boot_info.memory[i].size;	}	/*	 * assume that the kernel virtual address is the same as the	 * physical address.	 *	 * This should be reasonable in most situations:	 *  1) They shouldn't be dereferencing the virtual address	 *     unless they are sure that it is valid from kernel space.	 *  2) The only usage I see so far is converting a page table	 *     reference to some non-FASTMEM address space when freeing         *     mmaped "/dev/mem" pages.  These addresses are just passed	 *     to "free_page", which ignores addresses that aren't in	 *     the memory list anyway.	 *	 */	/*	 * if on an amiga and address is in first 16M, move it 	 * to the ZTWO_ADDR range	 */	if (MACH_IS_AMIGA && paddr < 16*1024*1024)		return ZTWO_VADDR(paddr);	return paddr;}/* invalidate page in both caches */#define	clear040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\					      "nop\n\t"\					      ".word 0xf4d0"\					      /* CINVP I/D (a0) */\					      : : "g" ((paddr))\					      : "a0")/* invalidate page in i-cache */#define	cleari040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\					       /* CINVP I (a0) */\					       "nop\n\t"\					       ".word 0xf490"\					       : : "g" ((paddr))\					       : "a0")/* push page in both caches */#define	push040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\					      "nop\n\t"\					     ".word 0xf4f0"\					     /* CPUSHP I/D (a0) */\					     : : "g" ((paddr))\					     : "a0")/* push and invalidate page in both caches */#define	pushcl040(paddr) do { push040((paddr));\			      if (CPU_IS_060) clear040((paddr));\			 } while(0)/* push page in both caches, invalidate in i-cache */#define	pushcli040(paddr) do { push040((paddr));\			       if (CPU_IS_060) cleari040((paddr));\			  } while(0)/* push page defined by virtual address in both caches */#define	pushv040(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\					      /* ptestr (a0) */\					      "nop\n\t"\					      ".word 0xf568\n\t"\					      /* movec mmusr,d0 */\					      ".long 0x4e7a0805\n\t"\					      "andw #0xf000,%/d0\n\t"\					      "movel %/d0,%/a0\n\t"\					      /* CPUSHP I/D (a0) */\					      "nop\n\t"\					      ".word 0xf4f0"\					      : : "g" ((vaddr))\					      : "a0", "d0")/* push page defined by virtual address in both caches */#define	pushv060(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\					      /* plpar (a0) */\					      ".word 0xf5c8\n\t"\					      /* CPUSHP I/D (a0) */\					      ".word 0xf4f0"\					      : : "g" ((vaddr))\					      : "a0")/* * 040: Hit every page containing an address in the range paddr..paddr+len-1. * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s). * Hit every page until there is a page or less to go. Hit the next page, * and the one after that if the range hits it. *//* ++roman: A little bit more care is required here: The CINVP instruction * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning * and the end of the region must be treated differently if they are not * exactly at the beginning or end of a page boundary. Else, maybe too much * data becomes invalidated and thus lost forever. CPUSHP does what we need: * it invalidates the page after pushing dirty data to memory. (Thanks to Jes * for discovering the problem!) *//* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set * the DPI bit in the CACR; would it cause problems with temporarily changing * this?). So we have to push first and then additionally to invalidate.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -