⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 memory.c

📁 arm平台上的uclinux系统全部源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
 *//* * cache_clear() semantics: Clear any cache entries for the area in question, * without writing back dirty entries first. This is useful if the data will * be overwritten anyway, e.g. by DMA to memory. The range is defined by a * _physical_ address. */void cache_clear (unsigned long paddr, int len){    if (CPU_IS_040_OR_060) {	/*	 * cwe need special treatment for the first page, in case it	 * is not page-aligned.	 */	if (paddr & (PAGE_SIZE - 1)){	    pushcl040(paddr);	    if (len <= PAGE_SIZE){		if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {		    pushcl040(paddr + len - 1);		}		return;	    }else{		len -=PAGE_SIZE;		paddr += PAGE_SIZE;	    }	}			while (len > PAGE_SIZE) {#if 0	    pushcl040(paddr);#else	    clear040(paddr);#endif	    len -= PAGE_SIZE;	    paddr += PAGE_SIZE;	}	if (len > 0) {	    pushcl040(paddr);	    if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {		/* a page boundary gets crossed at the end */		pushcl040(paddr + len - 1);	    }	}    }    else /* 68030 or 68020 */	asm volatile ("movec %/cacr,%/d0\n\t"		      "oriw %0,%/d0\n\t"		      "movec %/d0,%/cacr"		      : : "i" (FLUSH_I_AND_D)		      : "d0");}/* * cache_push() semantics: Write back any dirty cache data in the given area, * and invalidate the range in the instruction cache. It needs not (but may) * invalidate those entries also in the data cache. The range is defined by a * _physical_ address. */void cache_push (unsigned long paddr, int len){    if (CPU_IS_040_OR_060) {	/*         * on 68040 or 68060, push cache lines for pages in the range;	 * on the '040 this also invalidates the pushed lines, but not on	 * the '060!	 */	while (len > PAGE_SIZE) {	    pushcli040(paddr);	    len -= PAGE_SIZE;	    paddr += PAGE_SIZE;	    }	if (len > 0) {	    pushcli040(paddr);	    if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {		/* a page boundary gets crossed at the end */		pushcli040(paddr + len - 1);		}	    }	}            /*     * 68030/68020 have no writeback cache. On the other hand,     * cache_push is actually a superset of cache_clear (the lines     * get written back and invalidated), so we should make sure     * to perform the corresponding actions. After all, this is getting     * called in places where we've just loaded code, or whatever, so     * flushing the icache is appropriate; flushing the dcache shouldn't     * be required.     */    else /* 68030 or 68020 */	asm volatile ("movec %/cacr,%/d0\n\t"		      "oriw %0,%/d0\n\t"		      "movec %/d0,%/cacr"		      : : "i" (FLUSH_I)		      : "d0");}/* * cache_push_v() semantics: Write back any dirty cache data in the given * area, and invalidate those entries at least in the instruction cache. This * is intended to be used after data has been written that can be executed as * code later. The range is defined by a _user_mode_ _virtual_ address  (or, * more exactly, the space is defined by the %sfc/%dfc register.) */void cache_push_v (unsigned long vaddr, int len){    if (CPU_IS_040) {	/* on 68040, push cache lines for pages in the range */	while (len > PAGE_SIZE) {	    pushv040(vaddr);	    len -= PAGE_SIZE;	    vaddr += PAGE_SIZE;	    }	if (len > 0) {	    pushv040(vaddr);	    if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {		/* a page boundary gets crossed at the end */		pushv040(vaddr + len - 1);		}	    }	}    else if (CPU_IS_060) {	/* on 68040, push cache lines for pages in the range */	while (len > PAGE_SIZE) {	    pushv060(vaddr);	    len -= PAGE_SIZE;	    vaddr += PAGE_SIZE;	}	if (len > 0) {	    pushv060(vaddr);	    if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {		/* a page boundary gets crossed at the end */		pushv060(vaddr + len - 1);	    }	}    }    /* 68030/68020 have no writeback cache; still need to clear icache. */    else /* 68030 or 68020 */	asm volatile ("movec %/cacr,%/d0\n\t"		      "oriw %0,%/d0\n\t"		      "movec %/d0,%/cacr"		      : : "i" (FLUSH_I)		      : "d0");}#undef clear040#undef cleari040#undef push040#undef pushcl040#undef pushcli040#undef pushv040#undef pushv060unsigned long mm_phys_to_virt (unsigned long addr){    return PTOV (addr);}int mm_end_of_chunk (unsigned long addr, int len){	int i;	for (i = 0; i < boot_info.num_memory; i++)		if (boot_info.memory[i].addr + boot_info.memory[i].size		    == addr + len)			return 1;	return 0;}/* Map some physical address range into the kernel address space. The * code is copied and adapted from map_chunk(). */unsigned long kernel_map(unsigned long paddr, unsigned long size,			 int nocacheflag, unsigned long *memavailp ){#define STEP_SIZE	(256*1024)	static unsigned long vaddr = 0xe0000000; /* safe place */	unsigned long physaddr, retaddr;	pte_t *ktablep = NULL;	pmd_t *kpointerp;	pgd_t *page_dir;	int pindex;   /* index into pointer table */	int prot;		/* Round down 'paddr' to 256 KB and adjust size */	physaddr = paddr & ~(STEP_SIZE-1);	size += paddr - physaddr;	retaddr = vaddr + (paddr - physaddr);	paddr = physaddr;	/* Round up the size to 256 KB. It doesn't hurt if too much is	 * mapped... */	size = (size + STEP_SIZE - 1) & ~(STEP_SIZE-1);	if (CPU_IS_040_OR_060) {		prot = _PAGE_PRESENT | _PAGE_GLOBAL040;		switch( nocacheflag ) {		  case KERNELMAP_FULL_CACHING:			prot |= _PAGE_CACHE040;			break;		  case KERNELMAP_NOCACHE_SER:		  default:			prot |= _PAGE_NOCACHE_S;			break;		  case KERNELMAP_NOCACHE_NONSER:			prot |= _PAGE_NOCACHE;			break;		  case KERNELMAP_NO_COPYBACK:			prot |= _PAGE_CACHE040W;			/* prot |= 0; */			break;		}	} else		prot = _PAGE_PRESENT |			   ((nocacheflag == KERNELMAP_FULL_CACHING ||				 nocacheflag == KERNELMAP_NO_COPYBACK) ? 0 : _PAGE_NOCACHE030);		page_dir = pgd_offset_k(vaddr);	if (pgd_present(*page_dir)) {		kpointerp = (pmd_t *)pgd_page(*page_dir);		pindex = (vaddr >> 18) & 0x7f;		if (pindex != 0 && CPU_IS_040_OR_060) {			if (pmd_present(*kpointerp))				ktablep = (pte_t *)pmd_page(*kpointerp);			else {				ktablep = kernel_page_table (memavailp);				/* Make entries invalid */				memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);				pmd_set(kpointerp,ktablep);			}			ktablep += (pindex & 15)*64;		}	}	else {		/* we need a new pointer table */		kpointerp = get_kpointer_table ();		pgd_set(page_dir, (pmd_t *)kpointerp);		memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));		pindex = 0;	}	for (physaddr = paddr; physaddr < paddr + size; vaddr += STEP_SIZE) {		if (pindex > 127) {			/* we need a new pointer table */			kpointerp = get_kpointer_table ();			pgd_set(pgd_offset_k(vaddr), (pmd_t *)kpointerp);			memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));			pindex = 0;		}		if (CPU_IS_040_OR_060) {			int i;			unsigned long ktable;			/*			 * 68040, use page tables pointed to by the			 * kernel pointer table.			 */			if ((pindex & 15) == 0) {				/* Need new page table every 4M on the '040 */				ktablep = kernel_page_table (memavailp);				/* Make entries invalid */				memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);			}			ktable = VTOP(ktablep);			/*			 * initialize section of the page table mapping			 * this 1M portion.			 */			for (i = 0; i < 64; i++) {				pte_val(*ktablep++) = physaddr | prot;				physaddr += PAGE_SIZE;			}			/*			 * make the kernel pointer table point to the			 * kernel page table.			 */			((unsigned long *)kpointerp)[pindex++] = ktable | _PAGE_TABLE;		} else {			/*			 * 68030, use early termination page descriptors.			 * Each one points to 64 pages (256K).			 */			((unsigned long *)kpointerp)[pindex++] = physaddr | prot;			physaddr += 64 * PAGE_SIZE;		}	}	return( retaddr );}static inline void set_cmode_pte( pmd_t *pmd, unsigned long address,				  unsigned long size, unsigned cmode ){	pte_t *pte;	unsigned long end;	if (pmd_none(*pmd))		return;	pte = pte_offset( pmd, address );	address &= ~PMD_MASK;	end = address + size;	if (end >= PMD_SIZE)		end = PMD_SIZE;	for( ; address < end; pte++ ) {		pte_val(*pte) = (pte_val(*pte) & ~_PAGE_NOCACHE) | cmode;		address += PAGE_SIZE;	}}static inline void set_cmode_pmd( pgd_t *dir, unsigned long address,				  unsigned long size, unsigned cmode ){	pmd_t *pmd;	unsigned long end;	if (pgd_none(*dir))		return;	pmd = pmd_offset( dir, address );	address &= ~PGDIR_MASK;	end = address + size;	if (end > PGDIR_SIZE)		end = PGDIR_SIZE;	if ((pmd_val(*pmd) & _DESCTYPE_MASK) == _PAGE_PRESENT) {		/* 68030 early termination descriptor */		pmd_val(*pmd) = (pmd_val(*pmd) & ~_PAGE_NOCACHE) | cmode;		return;	}	else {		/* "normal" tables */		for( ; address < end; pmd++ ) {			set_cmode_pte( pmd, address, end - address, cmode );			address = (address + PMD_SIZE) & PMD_MASK;		}	}}/* * Set new cache mode for some kernel address space. * The caller must push data for that range itself, if such data may already * be in the cache. */void kernel_set_cachemode( unsigned long address, unsigned long size,						   unsigned cmode ){	pgd_t *dir = pgd_offset_k( address );	unsigned long end = address + size;		if (CPU_IS_040_OR_060) {		switch( cmode ) {		  case KERNELMAP_FULL_CACHING:			cmode = _PAGE_CACHE040;			break;		  case KERNELMAP_NOCACHE_SER:		  default:			cmode = _PAGE_NOCACHE_S;			break;		  case KERNELMAP_NOCACHE_NONSER:			cmode = _PAGE_NOCACHE;			break;		  case KERNELMAP_NO_COPYBACK:			cmode = _PAGE_CACHE040W;			break;		}	} else		cmode = ((cmode == KERNELMAP_FULL_CACHING ||				  cmode == KERNELMAP_NO_COPYBACK)    ?			 0 : _PAGE_NOCACHE030);	for( ; address < end; dir++ ) {		set_cmode_pmd( dir, address, end - address, cmode );		address = (address + PGDIR_SIZE) & PGDIR_MASK;	}	flush_tlb_all();}#else /* !NO_MM *//* * The following two routines map from a physical address to a kernel * virtual address and vice versa. */unsigned long mm_vtop (unsigned long vaddr){	return vaddr;}unsigned long mm_ptov (unsigned long paddr){	return paddr;}/* * 040: Hit every page containing an address in the range paddr..paddr+len-1. * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s). * Hit every page until there is a page or less to go. Hit the next page, * and the one after that if the range hits it. *//* ++roman: A little bit more care is required here: The CINVP instruction * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning * and the end of the region must be treated differently if they are not * exactly at the beginning or end of a page boundary. Else, maybe too much * data becomes invalidated and thus lost forever. CPUSHP does what we need: * it invalidates the page after pushing dirty data to memory. (Thanks to Jes * for discovering the problem!) *//* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set * the DPI bit in the CACR; would it cause problems with temporarily changing * this?). So we have to push first and then additionally to invalidate. *//* * cache_clear() semantics: Clear any cache entries for the area in question, * without writing back dirty entries first. This is useful if the data will * be overwritten anyway, e.g. by DMA to memory. The range is defined by a * _physical_ address. */void cache_clear (unsigned long paddr, int len){}/* * cache_push() semantics: Write back any dirty cache data in the given area, * and invalidate the range in the instruction cache. It needs not (but may) * invalidate those entries also in the data cache. The range is defined by a * _physical_ address. */void cache_push (unsigned long paddr, int len){}/* * cache_push_v() semantics: Write back any dirty cache data in the given * area, and invalidate those entries at least in the instruction cache. This * is intended to be used after data has been written that can be executed as * code later. The range is defined by a _user_mode_ _virtual_ address  (or, * more exactly, the space is defined by the %sfc/%dfc register.) */void cache_push_v (unsigned long vaddr, int len){}unsigned long mm_phys_to_virt (unsigned long addr){    return PTOV (addr);}/* Map some physical address range into the kernel address space. The * code is copied and adapted from map_chunk(). */unsigned long kernel_map(unsigned long paddr, unsigned long size,			 int nocacheflag, unsigned long *memavailp ){	return paddr;}void kernel_set_cachemode( unsigned long address, unsigned long size,						   unsigned cmode ){}#ifdef MAGIC_ROM_PTRint is_in_rom(unsigned long addr) {#ifdef CONFIG_PILOT	if (addr >= 0x10c00000)		return 1;	else		return 0;#endif#ifdef CONFIG_M68EZ328ADS	if ( 0x00200000 <= addr && addr < 0x00400000)		return 1;	else 		return 0;#endif#ifdef CONFIG_M68332	extern char _etext;	#ifdef SHGLCORE_ROM_BANK_0_ADDR	if ((addr >= SHGLCORE_ROM_BANK_0_ADDR) && (addr < (SHGLCORE_ROM_BANK_0_ADDR+SHGLCORE_ROM_BANK_0_LENGTH)))		return 1;#endif#ifdef SHGLCORE_ROM_BANK_1_ADDR	else if ((addr >= SHGLCORE_ROM_BANK_1_ADDR) && (addr < (SHGLCORE_ROM_BANK_1_ADDR+SHGLCORE_ROM_BANK_1_LENGTH)))		return 1;#endif#ifdef SHGLCORE_FLASH_BANK_0_ADDR	else if ((addr >= SHGLCORE_FLASH_BANK_0_ADDR) && (addr < (SHGLCORE_FLASH_BANK_0_ADDR+SHGLCORE_FLASH_BANK_0_LENGTH)))		return 1;#endif#ifdef SHGLCORE_FLASH_BANK_1_ADDR	else if ((addr >= SHGLCORE_FLASH_BANK_1_ADDR) && (addr < (SHGLCORE_FLASH_BANK_1_ADDR+SHGLCORE_FLASH_BANK_1_LENGTH)))		return 1;#endif	else		return 0;#endif}#endif#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -