📄 memory.c
字号:
set_fs (MAKE_MM_SEG(SUPER_DATA)); /* The PLPAR instruction causes an access error if the translation * is not possible. To catch this we use the same exception mechanism * as for user space accesses in <asm/uaccess.h>. */ asm volatile (".chip 68060\n" "1: plpar (%0)\n" ".chip 68k\n" "2:\n" ".section .fixup,\"ax\"\n" " .even\n" "3: lea -1,%0\n" " jra 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,3b\n" ".previous" : "=a" (paddr) : "0" (vaddr)); set_fs (fs); return paddr; } else if (CPU_IS_040) { unsigned long mmusr; mm_segment_t fs = get_fs(); set_fs (MAKE_MM_SEG(SUPER_DATA)); asm volatile (".chip 68040\n\t" "ptestr (%1)\n\t" "movec %%mmusr, %0\n\t" ".chip 68k" : "=r" (mmusr) : "a" (vaddr)); set_fs (fs); if (mmusr & MMU_T_040) { return (unsigned long)vaddr; /* Transparent translation */ } if (mmusr & MMU_R_040) return (mmusr & PAGE_MASK) | ((unsigned long)vaddr & (PAGE_SIZE-1)); printk("VTOP040: bad virtual address %lx (%lx)", vaddr, mmusr); return -1; } else { volatile unsigned short temp; unsigned short mmusr; unsigned long *descaddr; asm volatile ("ptestr #5,%2@,#7,%0\n\t" "pmove %/psr,%1@" : "=a&" (descaddr) : "a" (&temp), "a" (vaddr)); mmusr = temp; if (mmusr & (MMU_I|MMU_B|MMU_L)) printk("VTOP030: bad virtual address %lx (%x)\n", vaddr, mmusr); descaddr = phys_to_virt((unsigned long)descaddr); switch (mmusr & MMU_NUM) { case 1: return (*descaddr & 0xfe000000) | ((unsigned long)vaddr & 0x01ffffff); case 2: return (*descaddr & 0xfffc0000) | ((unsigned long)vaddr & 0x0003ffff); case 3: return (*descaddr & PAGE_MASK) | ((unsigned long)vaddr & (PAGE_SIZE-1)); default: printk("VTOP: bad levels (%u) for virtual address %lx\n", mmusr & MMU_NUM, vaddr); } } printk("VTOP: bad virtual address %lx\n", vaddr); return -1;}#ifndef CONFIG_SINGLE_MEMORY_CHUNKunsigned long mm_ptov (unsigned long paddr){ int i = 0; unsigned long poff, voff = PAGE_OFFSET; do { poff = paddr - m68k_memory[i].addr; if (poff < m68k_memory[i].size) {#ifdef DEBUGPV printk ("PTOV(%lx)=%lx\n", paddr, poff + voff);#endif return poff + voff; } voff += m68k_memory[i].size; } while (++i < m68k_num_memory);#if DEBUG_INVALID_PTOV if (mm_inv_cnt > 0) { mm_inv_cnt--; printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n", paddr, __builtin_return_address(0)); }#endif /* * assume that the kernel virtual address is the same as the * physical address. * * This should be reasonable in most situations: * 1) They shouldn't be dereferencing the virtual address * unless they are sure that it is valid from kernel space. * 2) The only usage I see so far is converting a page table * reference to some non-FASTMEM address space when freeing * mmaped "/dev/mem" pages. These addresses are just passed * to "free_page", which ignores addresses that aren't in * the memory list anyway. * */#ifdef CONFIG_AMIGA /* * if on an amiga and address is in first 16M, move it * to the ZTWO_VADDR range */ if (MACH_IS_AMIGA && paddr < 16*1024*1024) return ZTWO_VADDR(paddr);#endif return -1;}#endif/* invalidate page in both caches */#define clear040(paddr) \ __asm__ __volatile__ ("nop\n\t" \ ".chip 68040\n\t" \ "cinvp %%bc,(%0)\n\t" \ ".chip 68k" \ : : "a" (paddr))/* invalidate page in i-cache */#define cleari040(paddr) \ __asm__ __volatile__ ("nop\n\t" \ ".chip 68040\n\t" \ "cinvp %%ic,(%0)\n\t" \ ".chip 68k" \ : : "a" (paddr))/* push page in both caches */#define push040(paddr) \ __asm__ __volatile__ ("nop\n\t" \ ".chip 68040\n\t" \ "cpushp %%bc,(%0)\n\t" \ ".chip 68k" \ : : "a" (paddr))/* push and invalidate page in both caches, must disable ints * to avoid invalidating valid data */#define pushcl040(paddr) \ do { unsigned long flags; \ save_flags(flags); \ cli(); \ push040(paddr); \ if (CPU_IS_060) clear040(paddr); \ restore_flags(flags); \ } while(0)/* push page in both caches, invalidate in i-cache *//* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */#define pushcli040(paddr) \ do { push040(paddr); \ } while(0)/* * 040: Hit every page containing an address in the range paddr..paddr+len-1. * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s). * Hit every page until there is a page or less to go. Hit the next page, * and the one after that if the range hits it. *//* ++roman: A little bit more care is required here: The CINVP instruction * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning * and the end of the region must be treated differently if they are not * exactly at the beginning or end of a page boundary. Else, maybe too much * data becomes invalidated and thus lost forever. CPUSHP does what we need: * it invalidates the page after pushing dirty data to memory. (Thanks to Jes * for discovering the problem!) *//* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set * the DPI bit in the CACR; would it cause problems with temporarily changing * this?). So we have to push first and then additionally to invalidate. *//* * cache_clear() semantics: Clear any cache entries for the area in question, * without writing back dirty entries first. This is useful if the data will * be overwritten anyway, e.g. by DMA to memory. The range is defined by a * _physical_ address. */void cache_clear (unsigned long paddr, int len){ if (CPU_IS_040_OR_060) { int tmp; /* * We need special treatment for the first page, in case it * is not page-aligned. Page align the addresses to work * around bug I17 in the 68060. */ if ((tmp = -paddr & (PAGE_SIZE - 1))) { pushcl040(paddr & PAGE_MASK); if ((len -= tmp) <= 0) return; paddr += tmp; } tmp = PAGE_SIZE; paddr &= PAGE_MASK; while ((len -= tmp) >= 0) { clear040(paddr); paddr += tmp; } if ((len += tmp)) /* a page boundary gets crossed at the end */ pushcl040(paddr); } else /* 68030 or 68020 */ asm volatile ("movec %/cacr,%/d0\n\t" "oriw %0,%/d0\n\t" "movec %/d0,%/cacr" : : "i" (FLUSH_I_AND_D) : "d0");#ifdef CONFIG_M68K_L2_CACHE if(mach_l2_flush) mach_l2_flush(0);#endif}/* * cache_push() semantics: Write back any dirty cache data in the given area, * and invalidate the range in the instruction cache. It needs not (but may) * invalidate those entries also in the data cache. The range is defined by a * _physical_ address. */void cache_push (unsigned long paddr, int len){ if (CPU_IS_040_OR_060) { int tmp = PAGE_SIZE; /* * on 68040 or 68060, push cache lines for pages in the range; * on the '040 this also invalidates the pushed lines, but not on * the '060! */ len += paddr & (PAGE_SIZE - 1); /* * Work around bug I17 in the 68060 affecting some instruction * lines not being invalidated properly. */ paddr &= PAGE_MASK; do { pushcli040(paddr); paddr += tmp; } while ((len -= tmp) > 0); } /* * 68030/68020 have no writeback cache. On the other hand, * cache_push is actually a superset of cache_clear (the lines * get written back and invalidated), so we should make sure * to perform the corresponding actions. After all, this is getting * called in places where we've just loaded code, or whatever, so * flushing the icache is appropriate; flushing the dcache shouldn't * be required. */ else /* 68030 or 68020 */ asm volatile ("movec %/cacr,%/d0\n\t" "oriw %0,%/d0\n\t" "movec %/d0,%/cacr" : : "i" (FLUSH_I) : "d0");#ifdef CONFIG_M68K_L2_CACHE if(mach_l2_flush) mach_l2_flush(1);#endif}#undef clear040#undef cleari040#undef push040#undef pushcl040#undef pushcli040#ifndef CONFIG_SINGLE_MEMORY_CHUNKint mm_end_of_chunk (unsigned long addr, int len){ int i; for (i = 0; i < m68k_num_memory; i++) if (m68k_memory[i].addr + m68k_memory[i].size == addr + len) return 1; return 0;}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -