📄 misc_32.s
字号:
/* * This file contains miscellaneous low-level functions. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) * and Paul Mackerras. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */#include <linux/config.h>#include <linux/sys.h>#include <asm/unistd.h>#include <asm/errno.h>#include <asm/reg.h>#include <asm/page.h>#include <asm/cache.h>#include <asm/cputable.h>#include <asm/mmu.h>#include <asm/ppc_asm.h>#include <asm/thread_info.h>#include <asm/asm-offsets.h> .text/* * This returns the high 64 bits of the product of two 64-bit numbers. */_GLOBAL(mulhdu) cmpwi r6,0 cmpwi cr1,r3,0 mr r10,r4 mulhwu r4,r4,r5 beq 1f mulhwu r0,r10,r6 mullw r7,r10,r5 addc r7,r0,r7 addze r4,r41: beqlr cr1 /* all done if high part of A is 0 */ mr r10,r3 mullw r9,r3,r5 mulhwu r3,r3,r5 beq 2f mullw r0,r10,r6 mulhwu r8,r10,r6 addc r7,r0,r7 adde r4,r4,r8 addze r3,r32: addc r4,r4,r9 addze r3,r3 blr/* * Returns (address we're running at) - (address we were linked at) * for use before the text and data are mapped to KERNELBASE. */_GLOBAL(reloc_offset) mflr r0 bl 1f1: mflr r3 LOADADDR(r4,1b) subf r3,r4,r3 mtlr r0 blr/* * add_reloc_offset(x) returns x + reloc_offset(). */_GLOBAL(add_reloc_offset) mflr r0 bl 1f1: mflr r5 LOADADDR(r4,1b) subf r5,r4,r5 add r3,r3,r5 mtlr r0 blr/* * sub_reloc_offset(x) returns x - reloc_offset(). */_GLOBAL(sub_reloc_offset) mflr r0 bl 1f1: mflr r5 lis r4,1b@ha addi r4,r4,1b@l subf r5,r4,r5 subf r3,r5,r3 mtlr r0 blr/* * reloc_got2 runs through the .got2 section adding an offset * to each entry. */_GLOBAL(reloc_got2) mflr r11 lis r7,__got2_start@ha addi r7,r7,__got2_start@l lis r8,__got2_end@ha addi r8,r8,__got2_end@l subf r8,r7,r8 srwi. r8,r8,2 beqlr mtctr r8 bl 1f1: mflr r0 lis r4,1b@ha addi r4,r4,1b@l subf r0,r4,r0 add r7,r0,r72: lwz r0,0(r7) add r0,r0,r3 stw r0,0(r7) addi r7,r7,4 bdnz 2b mtlr r11 blr/* * identify_cpu, * called with r3 = data offset and r4 = CPU number * doesn't change r3 */_GLOBAL(identify_cpu) addis r8,r3,cpu_specs@ha addi r8,r8,cpu_specs@l mfpvr r71: lwz r5,CPU_SPEC_PVR_MASK(r8) and r5,r5,r7 lwz r6,CPU_SPEC_PVR_VALUE(r8) cmplw 0,r6,r5 beq 1f addi r8,r8,CPU_SPEC_ENTRY_SIZE b 1b1: addis r6,r3,cur_cpu_spec@ha addi r6,r6,cur_cpu_spec@l sub r8,r8,r3 stw r8,0(r6) blr/* * do_cpu_ftr_fixups - goes through the list of CPU feature fixups * and writes nop's over sections of code that don't apply for this cpu. * r3 = data offset (not changed) */_GLOBAL(do_cpu_ftr_fixups) /* Get CPU 0 features */ addis r6,r3,cur_cpu_spec@ha addi r6,r6,cur_cpu_spec@l lwz r4,0(r6) add r4,r4,r3 lwz r4,CPU_SPEC_FEATURES(r4) /* Get the fixup table */ addis r6,r3,__start___ftr_fixup@ha addi r6,r6,__start___ftr_fixup@l addis r7,r3,__stop___ftr_fixup@ha addi r7,r7,__stop___ftr_fixup@l /* Do the fixup */1: cmplw 0,r6,r7 bgelr addi r6,r6,16 lwz r8,-16(r6) /* mask */ and r8,r8,r4 lwz r9,-12(r6) /* value */ cmplw 0,r8,r9 beq 1b lwz r8,-8(r6) /* section begin */ lwz r9,-4(r6) /* section end */ subf. r9,r8,r9 beq 1b /* write nops over the section of code */ /* todo: if large section, add a branch at the start of it */ srwi r9,r9,2 mtctr r9 add r8,r8,r3 lis r0,0x60000000@h /* nop */3: stw r0,0(r8) andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l beq 2f dcbst 0,r8 /* suboptimal, but simpler */ sync icbi 0,r82: addi r8,r8,4 bdnz 3b sync /* additional sync needed on g4 */ isync b 1b/* * call_setup_cpu - call the setup_cpu function for this cpu * r3 = data offset, r24 = cpu number * * Setup function is called with: * r3 = data offset * r4 = ptr to CPU spec (relocated) */_GLOBAL(call_setup_cpu) addis r4,r3,cur_cpu_spec@ha addi r4,r4,cur_cpu_spec@l lwz r4,0(r4) add r4,r4,r3 lwz r5,CPU_SPEC_SETUP(r4) cmpi 0,r5,0 add r5,r5,r3 beqlr mtctr r5 bctr#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)/* This gets called by via-pmu.c to switch the PLL selection * on 750fx CPU. This function should really be moved to some * other place (as most of the cpufreq code in via-pmu */_GLOBAL(low_choose_750fx_pll) /* Clear MSR:EE */ mfmsr r7 rlwinm r0,r7,0,17,15 mtmsr r0 /* If switching to PLL1, disable HID0:BTIC */ cmplwi cr0,r3,0 beq 1f mfspr r5,SPRN_HID0 rlwinm r5,r5,0,27,25 sync mtspr SPRN_HID0,r5 isync sync1: /* Calc new HID1 value */ mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */ rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */ rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */ or r4,r4,r5 mtspr SPRN_HID1,r4 /* Store new HID1 image */ rlwinm r6,r1,0,0,18 lwz r6,TI_CPU(r6) slwi r6,r6,2 addis r6,r6,nap_save_hid1@ha stw r4,nap_save_hid1@l(r6) /* If switching to PLL0, enable HID0:BTIC */ cmplwi cr0,r3,0 bne 1f mfspr r5,SPRN_HID0 ori r5,r5,HID0_BTIC sync mtspr SPRN_HID0,r5 isync sync1: /* Return */ mtmsr r7 blr_GLOBAL(low_choose_7447a_dfs) /* Clear MSR:EE */ mfmsr r7 rlwinm r0,r7,0,17,15 mtmsr r0 /* Calc new HID1 value */ mfspr r4,SPRN_HID1 insrwi r4,r3,1,9 /* insert parameter into bit 9 */ sync mtspr SPRN_HID1,r4 sync isync /* Return */ mtmsr r7 blr#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx *//* * complement mask on the msr then "or" some values on. * _nmask_and_or_msr(nmask, value_to_or) */_GLOBAL(_nmask_and_or_msr) mfmsr r0 /* Get current msr */ andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ or r0,r0,r4 /* Or on the bits in r4 (second parm) */ SYNC /* Some chip revs have problems here... */ mtmsr r0 /* Update machine state */ isync blr /* Done *//* * Flush MMU TLB */_GLOBAL(_tlbia)#if defined(CONFIG_40x) sync /* Flush to memory before changing mapping */ tlbia isync /* Flush shadow TLB */#elif defined(CONFIG_44x) li r3,0 sync /* Load high watermark */ lis r4,tlb_44x_hwater@ha lwz r5,tlb_44x_hwater@l(r4)1: tlbwe r3,r3,PPC44x_TLB_PAGEID addi r3,r3,1 cmpw 0,r3,r5 ble 1b isync#elif defined(CONFIG_FSL_BOOKE) /* Invalidate all entries in TLB0 */ li r3, 0x04 tlbivax 0,3 /* Invalidate all entries in TLB1 */ li r3, 0x0c tlbivax 0,3 /* Invalidate all entries in TLB2 */ li r3, 0x14 tlbivax 0,3 /* Invalidate all entries in TLB3 */ li r3, 0x1c tlbivax 0,3 msync#ifdef CONFIG_SMP tlbsync#endif /* CONFIG_SMP */#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */#if defined(CONFIG_SMP) rlwinm r8,r1,0,0,18 lwz r8,TI_CPU(r8) oris r8,r8,10 mfmsr r10 SYNC rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear DR */ mtmsr r0 SYNC_601 isync lis r9,mmu_hash_lock@h ori r9,r9,mmu_hash_lock@l tophys(r9,r9)10: lwarx r7,0,r9 cmpwi 0,r7,0 bne- 10b stwcx. r8,0,r9 bne- 10b sync tlbia sync TLBSYNC li r0,0 stw r0,0(r9) /* clear mmu_hash_lock */ mtmsr r10 SYNC_601 isync#else /* CONFIG_SMP */ sync tlbia sync#endif /* CONFIG_SMP */#endif /* ! defined(CONFIG_40x) */ blr/* * Flush MMU TLB for a particular address */_GLOBAL(_tlbie)#if defined(CONFIG_40x) tlbsx. r3, 0, r3 bne 10f sync /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate * the TLB entry. */ tlbwe r3, r3, TLB_TAG isync10:#elif defined(CONFIG_44x) mfspr r4,SPRN_MMUCR mfspr r5,SPRN_PID /* Get PID */ rlwimi r4,r5,0,24,31 /* Set TID */ mtspr SPRN_MMUCR,r4 tlbsx. r3, 0, r3 bne 10f sync /* There are only 64 TLB entries, so r3 < 64, * which means bit 22, is clear. Since 22 is * the V bit in the TLB_PAGEID, loading this * value will invalidate the TLB entry. */ tlbwe r3, r3, PPC44x_TLB_PAGEID isync10:#elif defined(CONFIG_FSL_BOOKE) rlwinm r4, r3, 0, 0, 19 ori r5, r4, 0x08 /* TLBSEL = 1 */ ori r6, r4, 0x10 /* TLBSEL = 2 */ ori r7, r4, 0x18 /* TLBSEL = 3 */ tlbivax 0, r4 tlbivax 0, r5 tlbivax 0, r6 tlbivax 0, r7 msync#if defined(CONFIG_SMP) tlbsync#endif /* CONFIG_SMP */#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */#if defined(CONFIG_SMP) rlwinm r8,r1,0,0,18 lwz r8,TI_CPU(r8) oris r8,r8,11 mfmsr r10 SYNC rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear DR */ mtmsr r0 SYNC_601 isync lis r9,mmu_hash_lock@h ori r9,r9,mmu_hash_lock@l tophys(r9,r9)10: lwarx r7,0,r9 cmpwi 0,r7,0 bne- 10b stwcx. r8,0,r9 bne- 10b eieio tlbie r3 sync TLBSYNC li r0,0 stw r0,0(r9) /* clear mmu_hash_lock */ mtmsr r10 SYNC_601 isync#else /* CONFIG_SMP */ tlbie r3 sync#endif /* CONFIG_SMP */#endif /* ! CONFIG_40x */ blr/* * Flush instruction cache. * This is a no-op on the 601. */_GLOBAL(flush_instruction_cache)#if defined(CONFIG_8xx) isync lis r5, IDC_INVALL@h mtspr SPRN_IC_CST, r5#elif defined(CONFIG_4xx)#ifdef CONFIG_403GCX li r3, 512 mtctr r3 lis r4, KERNELBASE@h1: iccci 0, r4 addi r4, r4, 16 bdnz 1b#else lis r3, KERNELBASE@h iccci 0,r3#endif#elif CONFIG_FSL_BOOKEBEGIN_FTR_SECTION mfspr r3,SPRN_L1CSR0 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC /* msync; isync recommended here */ mtspr SPRN_L1CSR0,r3 isync blrEND_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) mfspr r3,SPRN_L1CSR1 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR mtspr SPRN_L1CSR1,r3#else mfspr r3,SPRN_PVR rlwinm r3,r3,16,16,31 cmpwi 0,r3,1 beqlr /* for 601, do nothing */ /* 603/604 processor - use invalidate-all bit in HID0 */ mfspr r3,SPRN_HID0 ori r3,r3,HID0_ICFI mtspr SPRN_HID0,r3#endif /* CONFIG_8xx/4xx */ isync blr
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -