📄 proc-xscale.s
字号:
mov pc, lr/* * cpu_xscale_icache_invalidate_page(page) * * invalidate all Icache lines associated with this area of memory * * page: page to invalidate */ .align 5ENTRY(cpu_xscale_icache_invalidate_page) mov r1, #PAGESIZE1: mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line add r0, r0, #CACHELINESIZE mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line add r0, r0, #CACHELINESIZE mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line add r0, r0, #CACHELINESIZE mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line add r0, r0, #CACHELINESIZE subs r1, r1, #4 * CACHELINESIZE bne 1b mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB mov pc, lr/* ================================ CACHE LOCKING============================ * * The XScale MicroArchitecture implements support for locking entries into * the data and instruction cache. The following functions implement the core * low level instructions needed to accomplish the locking. The developer's * manual states that the code that performs the locking must be in non-cached * memory. To accomplish this, the code in xscale-cache-lock.c copies the * following functions from the cache into a non-cached memory region that * is allocated through consistent_alloc(). * */ .align 5/* * xscale_icache_lock * * r0: starting address to lock * r1: end address to lock */ENTRY(xscale_icache_lock)iLockLoop: bic r0, r0, #CACHELINESIZE - 1 mcr p15, 0, r0, c9, c1, 0 @ lock into cache cmp r0, r1 @ are we done? add r0, r0, #CACHELINESIZE @ advance to next cache line bls iLockLoop mov pc, lr/* * xscale_icache_unlock */ENTRY(xscale_icache_unlock) mcr p15, 0, r0, c9, c1, 1 @ Unlock icache mov pc, lr /* * xscale_dcache_lock * * r0: starting address to lock * r1: end address to lock */ENTRY(xscale_dcache_lock) mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mov r2, #1 mcr p15, 0, r2, c9, c2, 0 @ Put dcache in lock mode cpwait ip @ Wait for completion mrs r2, cpsr orr r3, r2, #F_BIT | I_BITdLockLoop: msr cpsr_c, r3 mcr p15, 0, r0, c7, c10, 1 @ Write back line if it is dirty mcr p15, 0, r0, c7, c6, 1 @ Flush/invalidate line msr cpsr_c, r2 ldr ip, [r0], #CACHELINESIZE @ Preload 32 bytes into cache from @ location [r0]. Post-increment @ r3 to next cache line cmp r0, r1 @ Are we done? bls dLockLoop mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mov r2, #0 mcr p15, 0, r2, c9, c2, 0 @ Get out of lock mode cpwait_ret lr, ip/* * xscale_dcache_unlock */ENTRY(xscale_dcache_unlock) mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, ip, c9, c2, 1 @ Unlock cache mov pc, lr/* * Needed to determine the length of the code that needs to be copied. */ .align 5ENTRY(xscale_cache_dummy) mov pc, lr/* ================================== TLB ================================= *//* * cpu_xscale_tlb_invalidate_all() * * Invalidate all TLB entries */ .align 5ENTRY(cpu_xscale_tlb_invalidate_all) mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs cpwait_ret lr, ip/* * cpu_xscale_tlb_invalidate_range(start, end) * * invalidate TLB entries covering the specified range * * start: range start address * end: range end address */ .align 5ENTRY(cpu_xscale_tlb_invalidate_range) bic r0, r0, #(PAGESIZE - 1) & 0x00ff bic r0, r0, #(PAGESIZE - 1) & 0xff00 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry add r0, r0, #PAGESIZE cmp r0, r1 blo 1b cpwait_ret lr, ip/* * cpu_xscale_tlb_invalidate_page(page, flags) * * invalidate the TLB entries for the specified page. * * page: page to invalidate * flags: non-zero if we include the I TLB */ .align 5ENTRY(cpu_xscale_tlb_invalidate_page) mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer teq r1, #0 mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry mcrne p15, 0, r3, c8, c5, 1 @ invalidate I TLB entry cpwait_ret lr, ip/* ================================ TLB LOCKING============================== * * The XScale MicroArchitecture implements support for locking entries into * the Instruction and Data TLBs. The following functions provide the * low level support for supporting these under Linux. xscale-lock.c * implements some higher level management code. Most of the following * is taken straight out of the Developer's Manual. *//* * Lock I-TLB entry * * r0: Virtual address to translate and lock */ .align 5ENTRY(xscale_itlb_lock) mrs r2, cpsr orr r3, r2, #F_BIT | I_BIT msr cpsr_c, r3 @ Disable interrupts mcr p15, 0, r0, c8, c5, 1 @ Invalidate I-TLB entry mcr p15, 0, r0, c10, c4, 0 @ Translate and lock msr cpsr_c, r2 @ Restore interrupts cpwait_ret lr, ip/* * Lock D-TLB entry * * r0: Virtual address to translate and lock */ .align 5ENTRY(xscale_dtlb_lock) mrs r2, cpsr orr r3, r2, #F_BIT | I_BIT msr cpsr_c, r3 @ Disable interrupts mcr p15, 0, r0, c8, c6, 1 @ Invalidate D-TLB entry mcr p15, 0, r0, c10, c8, 0 @ Translate and lock msr cpsr_c, r2 @ Restore interrupts cpwait_ret lr, ip/* * Unlock all I-TLB entries */ .align 5ENTRY(xscale_itlb_unlock) mcr p15, 0, ip, c10, c4, 1 @ Unlock I-TLB mcr p15, 0, ip, c8, c5, 0 @ Invalidate I-TLB cpwait_ret lr, ip/* * Unlock all D-TLB entries */ENTRY(xscale_dtlb_unlock) mcr p15, 0, ip, c10, c8, 1 @ Unlock D-TBL mcr p15, 0, ip, c8, c6, 0 @ Invalidate D-TLB cpwait_ret lr, ip/* =============================== PageTable ============================== *//* * cpu_xscale_set_pgd(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */ .align 5ENTRY(cpu_xscale_set_pgd) clean_d_cache r1, r2 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs cpwait_ret lr, ip/* * cpu_xscale_set_pmd(pmdp, pmd) * * Set a level 1 translation table entry, and clean it out of * any caches such that the MMUs can load it correctly. * * pmdp: pointer to PMD entry * pmd: PMD value to store */ .align 5ENTRY(cpu_xscale_set_pmd)#if PMD_CACHE_WRITE_ALLOCATE && !CACHE_WRITE_THROUGH and r2, r1, #PMD_TYPE_MASK|PMD_SECT_CACHEABLE|PMD_SECT_BUFFERABLE cmp r2, #PMD_TYPE_SECT|PMD_SECT_CACHEABLE|PMD_SECT_BUFFERABLE orreq r1, r1, #PMD_SECT_TEX(1)#elif CACHE_WRITE_THROUGH and r2, r1, #PMD_TYPE_MASK|PMD_SECT_CACHEABLE|PMD_SECT_BUFFERABLE cmp r2, #PMD_TYPE_SECT|PMD_SECT_CACHEABLE|PMD_SECT_BUFFERABLE biceq r1, r1, #PMD_SECT_BUFFERABLE#endif str r1, [r0] mov ip, #0 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mov pc, lr/* * cpu_xscale_set_pte(ptep, pte) * * Set a PTE and flush it out * * Errata 40: must set memory to write-through for user read-only pages. */ .align 5ENTRY(cpu_xscale_set_pte) str r1, [r0], #-1024 @ linux version bic r2, r1, #0xff0 orr r2, r2, #PTE_TYPE_EXT @ extended page eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY tst r3, #L_PTE_USER | L_PTE_EXEC @ User or Exec? orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w @ combined with user -> user r/w @ @ Handle the X bit. We want to set this bit for the minicache @ (U = E = B = W = 0, C = 1) or when write allocate is enabled, @ and we have a writeable, cacheable region. If we ignore the @ U and E bits, we can allow user space to use the minicache as @ well. @ @ X = C & ~W & ~B @ | C & W & B & write_allocate @ eor ip, r1, #L_PTE_CACHEABLE tst ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE#if PTE_CACHE_WRITE_ALLOCATE && !CACHE_WRITE_THROUGH eorne ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE tstne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE#endif orreq r2, r2, #PTE_EXT_TEX(1)#if CACHE_WRITE_THROUGH bic r2, r2, #L_PTE_BUFFERABLE#else @ @ Errata 40: The B bit must be cleared for a user read-only @ cacheable page. @ @ B = B & ~((U|E) & C & ~W) @ and ip, r1, #L_PTE_USER | L_PTE_EXEC | L_PTE_WRITE | L_PTE_CACHEABLE teq ip, #L_PTE_USER | L_PTE_CACHEABLE teqne ip, #L_PTE_EXEC | L_PTE_CACHEABLE teqne ip, #L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE biceq r2, r2, #PTE_BUFFERABLE#endif tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? movne r2, #0 @ no -> fault str r2, [r0] @ hardware version @ We try to map 64K page entries when possible. @ We do that for kernel space only since the usage pattern from @ the setting of VM area is quite simple. User space is not worth @ the implied complexity because of ever randomly changing PTEs @ (page aging, swapout, etc) requiring constant coherency checks. @ Since PTEs are usually set in increasing order, we test the @ possibility for a large page only when given the last PTE of a @ 64K boundary. tsteq r1, #L_PTE_USER andeq r1, r0, #(15 << 2) teqeq r1, #(15 << 2) beq 1f mov ip, #0 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mov pc, lr @ See if we have 16 identical PTEs but with consecutive base addresses1: bic r3, r2, #0x0000f000 mov r1, #0x0000f0002: eor r2, r2, r3 teq r2, r1 bne 4f subs r1, r1, #0x00001000 ldr r2, [r0, #-4]! bne 2b eors r2, r2, r3 bne 4f @ Now create our LARGE PTE from the current EXT one. bic r3, r3, #PTE_TYPE_MASK orr r3, r3, #PTE_TYPE_LARGE and r2, r3, #0x30 @ EXT_AP --> LARGE_AP0 orr r2, r2, r2, lsl #2 @ add LARGE_AP1 orr r2, r2, r2, lsl #4 @ add LARGE_AP3 + LARGE_AP2 and r1, r3, #0x3c0 @ EXT_TEX bic r3, r3, #0x3c0 orr r2, r2, r1, lsl #(12 - 6) @ --> LARGE_TEX orr r2, r2, r3 @ add remaining bits @ then put it in the pagetable mov r3, r23: strd r2, [r0], #8 tst r0, #(15 << 2) bne 3b @ Then sync the 2 corresponding cache lines sub r0, r0, #(16 << 2) mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line4: orr r0, r0, #(15 << 2) mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mov ip, #0 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mov pc, lr .ltorgcpu_manu_name: .asciz "Intel"cpu_80200_name: .asciz "XScale-80200"cpu_pxa250_name: .asciz "XScale-PXA250" .align .section ".text.init", #alloc, #execinstr__xscale_setup: mov r0, #F_BIT|I_BIT|SVC_MODE msr cpsr_c, r0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs mcr p15, 0, r4, c2, c0, 0 @ load page table pointer mov r0, #0x1f @ Domains 0, 1 = client mcr p15, 0, r0, c3, c0, 0 @ load domain access register mov r0, #1 @ Allow user space to access mcr p15, 0, r0, c15, c1, 0 @ ... CP 0 only.#if CACHE_WRITE_THROUGH mov r0, #0x20#else mov r0, #0x00#endif mcr p15, 0, r0, c1, c1, 0 @ set auxiliary control reg mrc p15, 0, r0, c1, c0, 0 @ get control register bic r0, r0, #0x0200 @ ......R......... bic r0, r0, #0x0082 @ ........B.....A. orr r0, r0, #0x0005 @ .............C.M orr r0, r0, #0x3900 @ ..VIZ..S........#ifdef CONFIG_XSCALE_CACHE_ERRATA bic r0, r0, #0x0004 @ see cpu_xscale_proc_init#endif mov pc, lr .text/* * Purpose : Function pointers used to access above functions - all calls * come through these */ .type xscale_processor_functions, #objectENTRY(xscale_processor_functions) .word cpu_xscale_data_abort .word cpu_xscale_check_bugs .word cpu_xscale_proc_init .word cpu_xscale_proc_fin .word cpu_xscale_reset .word cpu_xscale_do_idle /* cache */ .word cpu_xscale_cache_clean_invalidate_all .word cpu_xscale_cache_clean_invalidate_range .word cpu_xscale_flush_ram_page /* dcache */ .word cpu_xscale_dcache_invalidate_range .word cpu_xscale_dcache_clean_range .word cpu_xscale_dcache_clean_page .word cpu_xscale_dcache_clean_entry /* icache */ .word cpu_xscale_icache_invalidate_range .word cpu_xscale_icache_invalidate_page /* tlb */ .word cpu_xscale_tlb_invalidate_all .word cpu_xscale_tlb_invalidate_range .word cpu_xscale_tlb_invalidate_page /* pgtable */ .word cpu_xscale_set_pgd .word cpu_xscale_set_pmd .word cpu_xscale_set_pte .size xscale_processor_functions, . - xscale_processor_functions .type cpu_80200_info, #objectcpu_80200_info: .long cpu_manu_name .long cpu_80200_name .size cpu_80200_info, . - cpu_80200_info .type cpu_pxa250_info, #objectcpu_pxa250_info: .long cpu_manu_name .long cpu_pxa250_name .size cpu_pxa250_info, . - cpu_pxa250_info .type cpu_arch_name, #objectcpu_arch_name: .asciz "armv5" .size cpu_arch_name, . - cpu_arch_name .type cpu_elf_name, #objectcpu_elf_name: .asciz "v5" .size cpu_elf_name, . - cpu_elf_name .align .section ".proc.info", #alloc, #execinstr .type __80200_proc_info,#object__80200_proc_info: .long 0x69052000 .long 0xfffffff0#if CACHE_WRITE_THROUGH .long 0x00000c0a#else .long 0x00000c0e#endif b __xscale_setup .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long cpu_80200_info .long xscale_processor_functions .size __80200_proc_info, . - __80200_proc_info .type __pxa250_proc_info,#object__pxa250_proc_info: .long 0x69052100 .long 0xfffff7f0#if CACHE_WRITE_THROUGH .long 0x00000c0a#else .long 0x00000c0e#endif b __xscale_setup .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP .long cpu_pxa250_info .long xscale_processor_functions .size __pxa250_proc_info, . - __pxa250_proc_info
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -