📄 cachealib.s
字号:
cmplwi p2,0x000C /* check for MPC7400 or MPC7410 */ bne noHWflush /* * Code sequence described in sec. 3.5.2 "Data Cache Hardware Flush * Parameter in MSSCR0" of the MPC7400 and MPC7410 User's Manuals */ .long 0x7e00066c /* dssall */ sync mfspr p2,MSSCR0 oris p2,p2,_PPC_MSSCR0_DL1HWF_U mtspr MSSCR0,p2wait4l1hw: mfspr p2,MSSCR0 rlwinm. p2,p2,0,8,8 /* _PPC_MSSCR0_DL1HWF */ bne wait4l1hw sync b cachePpcDisableFlushDonenoHWflush:# ifdef _PPC604_USE_DCFA /* * Set HID0[DCFA] for 7xx. p1 already contains HID0, and interrupts * are already disabled. This is the officially recommended method * for all 7xx, however malfunctions have been observed on 750CX and * 750FX if the MMU is enabled. The alternative below may work better * in such cases. (Another possible approach, not yet tested, is to * preload the TLB with the addresses to be used in the flush operation * so that TLB misses do not occur during the flush loop.) */ ori p3, p1, _PPC_HID0_DCFA mtspr HID0,p3# else /* _PPC604_USE_DCFA */ /* * To cover optimized PLRU replacement which uses invalid lines * first, 7xx manuals say count must be 1.5 * total # of lines in * cache: 32KB => 1536 */ li p3, (3 * MPC_IMP_DCACHE_SIZE) / (2 * MPC_IMP_BLOCK_SIZE) b cachePpcDataDisableSetCtr# endif /* _PPC604_USE_DCFA */cachePpcDataDisable604: /* * Interrupts have been disabled, and HID0[DCFA] has been set if * required. Former contents of MSR and HID0 are in p0 and p1 * respectively, and will be restored even if unchanged. This also * works for 7450 since its PLRU always acts as if DCFA were set. */ li p3, MPC_IMP_DCACHE_SIZE / MPC_IMP_BLOCK_SIZE /* 32KB => 1024 */# else /* PPC604 */ /* * All supported PPC603 (incl MPC82xx) have 16KB or smaller dCache, * and no DCFA or hardware flush facility. */ li p3, MPC_IMP_DCACHE_SIZE / MPC_IMP_BLOCK_SIZE /* 16KB => 512 */# endif /* PPC604 */# if defined (PPC604) && !defined (_PPC604_USE_DCFA)cachePpcDataDisableSetCtr:# endif /* PPC604 & !_PPC604_USE_DCFA */ mtspr CTR,p3 /* load CTR with the number of index */ /* load up p2 with the buffer address minus one cache block size */ lis p2,HI(cachePpcReadOrigin-MPC_IMP_BLOCK_SIZE) ori p2,p2,LO(cachePpcReadOrigin-MPC_IMP_BLOCK_SIZE)cachePpcDisableLoad: lbzu p4,MPC_IMP_BLOCK_SIZE(p2) /* cast out old line if modified */ bdnz cachePpcDisableLoad /* repeat for all sets and ways */ mtspr CTR,p3 /* reload CTR and p2 */ lis p2,HI(cachePpcReadOrigin-MPC_IMP_BLOCK_SIZE) ori p2,p2,LO(cachePpcReadOrigin-MPC_IMP_BLOCK_SIZE)cachePpcDisableFlush: addi p2, p2, MPC_IMP_BLOCK_SIZE /* point to next cache line */ dcbf 0,p2 /* flush newly-loaded line */ bdnz cachePpcDisableFlush /* repeat for all sets and ways */cachePpcDisableFlushDone: rlwinm p1,p1,0,18,16 /* Turn off _PPC_HID0_DCE */ sync /* Synchronize for DCE disable */ mtspr HID0,p1 /* Disable dCache and restore DCFA */ sync mtmsr p0 /* restore MSR -- ENABLE INTERRUPT & MMU */ b cacheArchOK /* return OK *//* **** cachePpcDisable **** */#endif /* (CPU == PPC85XX) */FUNC_END(cachePpcDisable)/******************************************************************************** cacheArchInvalidate - invalidate entries in a PowerPC cache** This routine invalidates some or all entries in a specified PowerPC cache.** RETURNS: OK, or ERROR if the cache type is invalid or the cache control* is not supported.** STATUS cacheArchInvalidate* (* CACHE_TYPE cache, /@ cache to invalidate @/* void * address, /@ virtual address @/* size_t bytes /@ number of bytes to invalidate @/* )* INTERNAL* This code assumes that cache blocks are 16 (403) or 32 (else) bytes in size.*/FUNC_BEGIN(cacheArchInvalidate) add p2,p2,p1 /* bytes += address */#if ( (CPU == PPC405) || (CPU == PPC405F) ) lis p4, HIADJ(ppc405CACHE_ALIGN_SIZE) lwz p4, LO(ppc405CACHE_ALIGN_SIZE)(p4)#endif /* PPC405F || PPC405 */#if (CPU == PPC85XX) lis p4, HIADJ(ppcE500CACHE_ALIGN_SIZE) lwz p4, LO(ppcE500CACHE_ALIGN_SIZE)(p4)#endif#if (CPU == PPC403) clrrwi p1,p1,4 /* round address to 16 byte boundary */#else /* CPU == PPC403 */ clrrwi p1,p1,5 /* round address to 32 byte boundary */#endif /* CPU == PPC403 */invChk: cmpwi cr6,p0,_DATA_CACHE /* test cache type in p0 (reuse cr6 below) */ beq cr6,invDat cmpwi p0,_INSTRUCTION_CACHE /* <cache> == _INSTRUCTION_CACHE ? */ bne cacheArchError /* invalid cache? return ERROR */ /* partial invalidation of instruction or data cache */invIns: ICBI(p1,p3,p5,p6,p7,glr1) /* invalidate copy(ies) in icache */ b invBottominvDat: dcbi r0,p1 /* dcache block invalidate */invBottom:#if ( (CPU == PPC405F) || (CPU == PPC405) || (CPU == PPC85XX) ) add p1,p1, p4 /* address += ppc405CACHE_ALIGN_SIZE */#else /* PPC405F || PPC405 */ addi p1,p1,_CACHE_ALIGN_SIZE /* address += _CACHE_ALIGN_SIZE */#endif /* PPC405F || PPC405 */ cmplw p1,p2 /* (address < bytes) ? */ bge cacheArchOK /* if not, return OK */ beq cr6,invDat /* repeat data cache loop */ b invIns /* repeat instruction cache loop */FUNC_END(cacheArchInvalidate)/******************************************************************************** cacheArchFlush - flush entries in a PowerPC cache** This routine flushes some or all entries in a specified PowerPC cache.** RETURNS: OK, or ERROR if the cache type is invalid or the cache control* is not supported.** STATUS cacheArchFlush* (* CACHE_TYPE cache, /@ cache to flush @/* void * address, /@ virtual address @/* size_t bytes /@ number of bytes to flush @/* )* INTERNAL* This code assumes that cache blocks are 16 (403) or 32 (else) bytes in size.*/FUNC_BEGIN(cacheArchFlush) cmpwi p0,_DATA_CACHE /* check cache type in p0 */ bne cacheArchError /* invalid cache? return ERROR */ add p2,p2,p1 /* bytes += address */#if ( (CPU == PPC405) || (CPU == PPC405F) ) lis p5, HIADJ(ppc405CACHE_ALIGN_SIZE) lwz p5, LO(ppc405CACHE_ALIGN_SIZE)(p5)#endif /* PPC405F || PPC405 */#if (CPU == PPC85XX) lis p5, HIADJ(ppcE500CACHE_ALIGN_SIZE) lwz p5, LO(ppcE500CACHE_ALIGN_SIZE)(p5)#endif#if ( CPU == PPC403 ) clrrwi p1,p1,4 /* round address to 16 byte boundary */#else /* CPU == PPC403 */ clrrwi p1,p1,5 /* round address to 32 byte boundary */#endif /* CPU == PPC403 */#if ( (CPU == PPC405) || (CPU == PPC405F) )/* * For a PPC405 RevC if MMU is enabled, we need workaround for errata # 37 * However we do this workaround for all revs. */ mfmsr p3 /* read msr */ INT_MASK(p3, p4) /* mask ee bit and ce bit */ mtmsr p4 /* DISABLE INTERRUPT */#endif /* PPC405F || PPC405 */fluDat:#if ( (CPU == PPC405) || (CPU == PPC405F) ) /* workaround for errata # 37 */ dcbt r0,p1#endif /* PPC405F || PPC405 */ dcbst r0,p1 /* data cache flush (PPC "store") ??dcbf?? */#if ( (CPU == PPC405F) || (CPU == PPC405) || (CPU == PPC85XX)) add p1,p1,p5 /* address += ppc405CACHE_ALIGN_SIZE */#else /* PPC405F || PPC405 */ addi p1,p1,_CACHE_ALIGN_SIZE /* address += _CACHE_ALIGN_SIZE */#endif /* PPC405F || PPC405 */ cmplw p1,p2 /* (address < bytes) ? */ blt fluDat /* if so, repeat */#if ( (CPU == PPC405) || (CPU == PPC405F) ) mtmsr p3 /* restore old msr after errata #37 fixup */ sync#endif /* PPC405F || PPC405 */ b cacheArchOK /* return OK */FUNC_END(cacheArchFlush)/******************************************************************************** cacheArchTextUpdate - synchronize the PowerPC instruction and data caches** This routine flushes the PowerPC data cache, and then invalidates the* instruction cache. The instruction cache is forced to fetch code that* may have been created via the data path.** RETURNS: OK, always.* STATUS cacheArchTextUpdate* (* void * address, /@ virtual address @/* size_t bytes /@ number of bytes to update @/* )* INTERNAL* This code assumes that cache blocks are 16 (403) or 32 (else) bytes in size.** NOMANUAL*/FUNC_BEGIN(cacheArchTextUpdate) add p1,p1,p0 /* bytes += address */#if ( (CPU == PPC405) || (CPU == PPC405F) ) lis p5, HIADJ(ppc405CACHE_ALIGN_SIZE) lwz p5, LO(ppc405CACHE_ALIGN_SIZE)(p5)#endif /* PPC405F || PPC405 */#if (CPU == PPC85XX) lis p5, HIADJ(ppcE500CACHE_ALIGN_SIZE) lwz p5, LO(ppcE500CACHE_ALIGN_SIZE)(p5)#endif#if ( CPU == PPC403 ) clrrwi p0,p0,4 /* round address to 16 byte boundary */#else /* CPU == PPC403 */ clrrwi p0,p0,5 /* round address to 32 byte boundary */#endif /* CPU == PPC403 */#if ( (CPU == PPC405) || (CPU == PPC405F) )/* * For a PPC405 RevC if MMU is enabled, we need workaround for errata 37 * But we do it for all revs of PPC405 */ mfmsr p3 /* read msr */ INT_MASK(p3, p4) /* mask ee bit and ce bit */ mtmsr p4 /* DISABLE INTERRUPT */#endif /* PPC405F || PPC405 */ /* loop */updTop:#if ( (CPU == PPC405) || (CPU == PPC405F) ) dcbt r0,p0#endif /* PPC405F || PPC405 */ dcbst r0,p0 /* update memory */ sync /* wait for update */ ICBI(p0,p2,p4,p6,p7,glr1) /* invalidate copy(ies) in icache */#if ( (CPU == PPC405F) || (CPU == PPC405) || (CPU == PPC85XX) ) add p0,p0,p5 /* address += ppc405CACHE_ALIGN_SIZE */#else /* PPC405F || PPC405 */ addi p0,p0,_CACHE_ALIGN_SIZE /* address += _CACHE_ALIGN_SIZE */#endif /* PPC405F || PPC405 */ cmplw p0,p1 /* (address < bytes) ? */ blt updTop /* if so, repeat */ isync /* remove copy in own instruction buffer */#if ( (CPU == PPC405) || (CPU == PPC405F) ) mtmsr p3 /* restore old msr after errata #37 fixup */#endif /* PPC405F || PPC405 */ b cacheArchOK /* return OK *//******************************************************************************** cacheArchError - set errno and return ERROR** To save space, several routines exit through cacheArchError() if* an invalid cache is specified.** NOMANUAL*/cacheArchError: mfspr r0,LR stw r0,4(sp) stwu sp,-16(sp) bl cacheErrnoSet lwz r0,20(sp) addi sp,sp,16 mtspr LR,r0 blr/******************************************************************************** cacheArchOK - return OK** To save space, several routines exit normally through cacheArchOK().** NOMANUAL*/cacheArchOK: sync /* SYNC for good measure (multiprocessor?) */ li p0,OK /* return OK */ blrFUNC_END(cacheArchTextUpdate)/********************************************************************************* cacheArchPipeFlush - flush the processor pipe** This function forces the processor pipes to be flushed.** RETURNS: always OK** NOMANUAL**/FUNC_BEGIN(cacheArchPipeFlush) eieio sync li p0,OK /* return OK */ blrFUNC_END(cacheArchPipeFlush)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -