⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 misc.s

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 S
📖 第 1 页 / 共 2 页
字号:
/* * BK Id: SCCS/s.misc.S 1.36 12/01/01 20:09:06 benh *//* * This file contains miscellaneous low-level functions. *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) * and Paul Mackerras. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */#include <linux/config.h>#include <linux/sys.h>#include <asm/unistd.h>#include <asm/errno.h>#include <asm/processor.h>#include <asm/page.h>#include <asm/cache.h>#include <asm/cputable.h>#include "ppc_asm.h"	.text	.align	5_GLOBAL(__delay)	cmpwi	0,r3,0	mtctr	r3	beqlr1:	bdnz	1b	blr/* * Returns (address we're running at) - (address we were linked at) * for use before the text and data are mapped to KERNELBASE. */_GLOBAL(reloc_offset)	mflr	r0	bl	1f1:	mflr	r3	lis	r4,1b@ha	addi	r4,r4,1b@l	subf	r3,r4,r3	mtlr	r0	blr/* * identify_cpu, * called with r3 = data offset and r4 = CPU number * doesn't change r3 */_GLOBAL(identify_cpu)	addis	r8,r3,cpu_specs@ha	addi	r8,r8,cpu_specs@l	mfpvr	r71:	lwz	r5,CPU_SPEC_PVR_MASK(r8)	and	r5,r5,r7	lwz	r6,CPU_SPEC_PVR_VALUE(r8)	cmplw	0,r6,r5	beq	1f	addi	r8,r8,CPU_SPEC_ENTRY_SIZE	b	1b1:	addis	r6,r3,cur_cpu_spec@ha	addi	r6,r6,cur_cpu_spec@l	slwi	r4,r4,2	sub	r8,r8,r3	stwx	r8,r4,r6	blr/* * do_cpu_ftr_fixups - goes through the list of CPU feature fixups * and writes nop's over sections of code that don't apply for this cpu. * r3 = data offset (not changed) */_GLOBAL(do_cpu_ftr_fixups)	/* Get CPU 0 features */	addis	r6,r3,cur_cpu_spec@ha	addi	r6,r6,cur_cpu_spec@l	lwz	r4,0(r6)	add	r4,r4,r3	lwz	r4,CPU_SPEC_FEATURES(r4)	/* Get the fixup table */	addis	r6,r3,__start___ftr_fixup@ha	addi	r6,r6,__start___ftr_fixup@l	addis	r7,r3,__stop___ftr_fixup@ha	addi	r7,r7,__stop___ftr_fixup@l	/* Do the fixup */1:	cmplw	0,r6,r7	bgelr	addi	r6,r6,16	lwz	r8,-16(r6)	/* mask */	and	r8,r8,r4	lwz	r9,-12(r6)	/* value */	cmplw	0,r8,r9	beq	1b	lwz	r8,-8(r6)	/* section begin */	lwz	r9,-4(r6)	/* section end */	subf.	r9,r8,r9	beq	1b	/* write nops over the section of code */	/* todo: if large section, add a branch at the start of it */	srwi	r9,r9,2	mtctr	r9	add	r8,r8,r3	lis	r0,0x60000000@h	/* nop */3:	stw	r0,0(r8)	andi.	r10,r4,CPU_FTR_SPLIT_ID_CACHE@l	beq	2f	dcbst	0,r8		/* suboptimal, but simpler */	sync	icbi	0,r82:	addi	r8,r8,4	bdnz	3b	sync			/* additional sync needed on g4 */	isync	b	1b/* * call_setup_cpu - call the setup_cpu function for this cpu * r3 = data offset, r24 = cpu number * * Don't change register layout, the setup function may rely * on r5 containing a relocated pointer to the current cpu spec. */_GLOBAL(call_setup_cpu)	addis	r5,r3,cur_cpu_spec@ha	addi	r5,r5,cur_cpu_spec@l	slwi	r4,r24,2	lwzx	r5,r4,r5	add	r5,r5,r3	lwz	r6,CPU_SPEC_SETUP(r5)	add	r6,r6,r3	mtctr	r6	mr	r3,r24	bctr/* void __save_flags_ptr(unsigned long *flags) */_GLOBAL(__save_flags_ptr)	mfmsr	r4	stw	r4,0(r3)	blr	/* 	 * Need these nops here for taking over save/restore to	 * handle lost intrs	 * -- Cort	 */	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop_GLOBAL(__save_flags_ptr_end)/* void __restore_flags(unsigned long flags) */	_GLOBAL(__restore_flags)/* * Just set/clear the MSR_EE bit through restore/flags but do not * change anything else.  This is needed by the RT system and makes * sense anyway. *    -- Cort */	mfmsr 	r4	/* Copy all except the MSR_EE bit from r4 (current MSR value)	   to r3.  This is the sort of thing the rlwimi instruction is	   designed for.  -- paulus. */	rlwimi	r3,r4,0,17,15	 /* Check if things are setup the way we want _already_. */	cmpw	0,r3,r4	beqlr1:	SYNC	mtmsr	r3	SYNC	blr	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop_GLOBAL(__restore_flags_end)_GLOBAL(__cli)	mfmsr	r0		/* Get current interrupt state */	rlwinm	r3,r0,16+1,32-1,31	/* Extract old value of 'EE' */	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */	SYNC			/* Some chip revs have problems here... */	mtmsr	r0		/* Update machine state */	blr			/* Done */	/* 	 * Need these nops here for taking over save/restore to	 * handle lost intrs	 * -- Cort	 */	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop_GLOBAL(__cli_end)_GLOBAL(__sti)	mfmsr	r3		/* Get current state */	ori	r3,r3,MSR_EE	/* Turn on 'EE' bit */	SYNC			/* Some chip revs have problems here... */	mtmsr	r3		/* Update machine state */	blr	/* 	 * Need these nops here for taking over save/restore to	 * handle lost intrs	 * -- Cort	 */	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop_GLOBAL(__sti_end)/* * complement mask on the msr then "or" some values on. *     _nmask_and_or_msr(nmask, value_to_or) */_GLOBAL(_nmask_and_or_msr)	mfmsr	r0		/* Get current msr */	andc	r0,r0,r3	/* And off the bits set in r3 (first parm) */	or	r0,r0,r4	/* Or on the bits in r4 (second parm) */	SYNC			/* Some chip revs have problems here... */	mtmsr	r0		/* Update machine state */	isync	blr			/* Done *//* * Flush MMU TLB */_GLOBAL(_tlbia)#if defined(CONFIG_SMP)	mfmsr	r10	SYNC	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */	mtmsr	r0	SYNC	lis	r9,hash_table_lock@h	ori	r9,r9,hash_table_lock@l	lwz	r8,PROCESSOR(r2)	oris	r8,r8,1010:	lwarx	r7,0,r9	cmpi	0,r7,0	bne-	10b	stwcx.	r8,0,r9	bne-	10b#endif /* CONFIG_SMP */	sync	tlbia	sync#ifdef CONFIG_SMP	TLBSYNC	li	r0,0	stw	r0,0(r9)		/* clear hash_table_lock */	mtmsr	r10	SYNC#endif	blr	/* * Flush MMU TLB for a particular address */_GLOBAL(_tlbie)#if defined(CONFIG_SMP)	mfmsr	r10	SYNC	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */	mtmsr	r0	SYNC	lis	r9,hash_table_lock@h	ori	r9,r9,hash_table_lock@l	lwz	r8,PROCESSOR(r2)	oris	r8,r8,1110:	lwarx	r7,0,r9	cmpi	0,r7,0	bne-	10b	stwcx.	r8,0,r9	bne-	10b	eieio#endif /* CONFIG_SMP */	tlbie	r3	sync#ifdef CONFIG_SMP	TLBSYNC	li	r0,0	stw	r0,0(r9)		/* clear hash_table_lock */	mtmsr	r10	SYNC#endif	blr/* * Flush instruction cache. * This is a no-op on the 601. */_GLOBAL(flush_instruction_cache)#if defined(CONFIG_8xx)	isync	lis	r5, IDC_INVALL@h	mtspr	IC_CST, r5#elif defined(CONFIG_4xx)	lis	r3, KERNELBASE@h	iccci	0,r3#else	mfspr	r3,PVR	rlwinm	r3,r3,16,16,31	cmpi	0,r3,1	beqlr			/* for 601, do nothing */	/* 603/604 processor - use invalidate-all bit in HID0 */	mfspr	r3,HID0	ori	r3,r3,HID0_ICFI	mtspr	HID0,r3#endif /* CONFIG_8xx */	isync	blr/* * Write any modified data cache blocks out to memory * and invalidate the corresponding instruction cache blocks. * This is a no-op on the 601. * * flush_icache_range(unsigned long start, unsigned long stop) */_GLOBAL(flush_icache_range)	mfspr	r5,PVR	rlwinm	r5,r5,16,16,31	cmpi	0,r5,1	beqlr				/* for 601, do nothing */	li	r5,L1_CACHE_LINE_SIZE-1	andc	r3,r3,r5	subf	r4,r3,r4	add	r4,r4,r5	srwi.	r4,r4,LG_L1_CACHE_LINE_SIZE	beqlr	mtctr	r4	mr	r6,r31:	dcbst	0,r3	addi	r3,r3,L1_CACHE_LINE_SIZE	bdnz	1b	sync				/* wait for dcbst's to get to ram */	mtctr	r42:	icbi	0,r6	addi	r6,r6,L1_CACHE_LINE_SIZE	bdnz	2b	sync				/* additional sync needed on g4 */	isync	blr/* * Write any modified data cache blocks out to memory. * Does not invalidate the corresponding cache lines (especially for * any corresponding instruction cache). * * clean_dcache_range(unsigned long start, unsigned long stop) */_GLOBAL(clean_dcache_range)	li	r5,L1_CACHE_LINE_SIZE-1	andc	r3,r3,r5	subf	r4,r3,r4	add	r4,r4,r5	srwi.	r4,r4,LG_L1_CACHE_LINE_SIZE	beqlr	mtctr	r41:	dcbst	0,r3	addi	r3,r3,L1_CACHE_LINE_SIZE	bdnz	1b	sync				/* wait for dcbst's to get to ram */	blr/* * Write any modified data cache blocks out to memory and invalidate them. * Does not invalidate the corresponding instruction cache blocks. * * flush_dcache_range(unsigned long start, unsigned long stop) */_GLOBAL(flush_dcache_range)	li	r5,L1_CACHE_LINE_SIZE-1	andc	r3,r3,r5	subf	r4,r3,r4	add	r4,r4,r5	srwi.	r4,r4,LG_L1_CACHE_LINE_SIZE	beqlr	mtctr	r41:	dcbf	0,r3	addi	r3,r3,L1_CACHE_LINE_SIZE	bdnz	1b	sync				/* wait for dcbst's to get to ram */	blr/* * Like above, but invalidate the D-cache.  This is used by the 8xx * to invalidate the cache so the PPC core doesn't get stale data * from the CPM (no cache snooping here :-). * * invalidate_dcache_range(unsigned long start, unsigned long stop) */_GLOBAL(invalidate_dcache_range)	li	r5,L1_CACHE_LINE_SIZE-1	andc	r3,r3,r5	subf	r4,r3,r4	add	r4,r4,r5	srwi.	r4,r4,LG_L1_CACHE_LINE_SIZE	beqlr	mtctr	r41:	dcbi	0,r3	addi	r3,r3,L1_CACHE_LINE_SIZE	bdnz	1b	sync				/* wait for dcbi's to get to ram */	blr/* * Flush a particular page from the data cache to RAM. * Note: this is necessary because the instruction cache does *not* * snoop from the data cache. * This is a no-op on the 601 which has a unified cache. * *	void __flush_page_to_ram(void *page) */_GLOBAL(__flush_page_to_ram)	mfspr	r5,PVR	rlwinm	r5,r5,16,16,31	cmpi	0,r5,1	beqlr					/* for 601, do nothing */	rlwinm	r3,r3,0,0,19			/* Get page base address */	li	r4,4096/L1_CACHE_LINE_SIZE	/* Number of lines in a page */	mtctr	r4	mr	r6,r30:	dcbst	0,r3				/* Write line to ram */	addi	r3,r3,L1_CACHE_LINE_SIZE	bdnz	0b	sync	mtctr	r41:	icbi	0,r6	addi	r6,r6,L1_CACHE_LINE_SIZE	bdnz	1b	sync	isync	blr/* * Flush a particular page from the instruction cache. * Note: this is necessary because the instruction cache does *not* * snoop from the data cache. * This is a no-op on the 601 which has a unified cache. * *	void __flush_icache_page(void *page) */_GLOBAL(__flush_icache_page)	mfspr	r5,PVR	rlwinm	r5,r5,16,16,31	cmpi	0,r5,1	beqlr					/* for 601, do nothing */	li	r4,4096/L1_CACHE_LINE_SIZE	/* Number of lines in a page */	mtctr	r41:	icbi	0,r3	addi	r3,r3,L1_CACHE_LINE_SIZE	bdnz	1b	sync	isync	blr	/* * Clear a page using the dcbz instruction, which doesn't cause any * memory traffic (except to write out any cache lines which get * displaced).  This only works on cacheable memory. */_GLOBAL(clear_page)	li	r0,4096/L1_CACHE_LINE_SIZE	mtctr	r0#ifdef CONFIG_8xx	li	r4, 01:	stw	r4, 0(r3)	stw	r4, 4(r3)	stw	r4, 8(r3)	stw	r4, 12(r3)#else1:	dcbz	0,r3#endif	addi	r3,r3,L1_CACHE_LINE_SIZE	bdnz	1b	blr/* * Copy a whole page.  We use the dcbz instruction on the destination * to reduce memory traffic (it eliminates the unnecessary reads of * the destination into cache).  This requires that the destination * is cacheable. */#define COPY_16_BYTES		\	lwz	r6,4(r4);	\	lwz	r7,8(r4);	\	lwz	r8,12(r4);	\	lwzu	r9,16(r4);	\	stw	r6,4(r3);	\	stw	r7,8(r3);	\	stw	r8,12(r3);	\	stwu	r9,16(r3)_GLOBAL(copy_page)	addi	r3,r3,-4

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -