⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 misc.s

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 S
📖 第 1 页 / 共 2 页
字号:
/* * This file contains miscellaneous low-level functions. *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) * and Paul Mackerras. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */#include <linux/config.h>#include <linux/sys.h>#include <asm/unistd.h>#include <asm/errno.h>#include <asm/processor.h>#include <asm/page.h>#include <asm/cache.h>#include "ppc_asm.h"#if defined(CONFIG_4xx) || defined(CONFIG_8xx)#define CACHE_LINE_SIZE		16#define LG_CACHE_LINE_SIZE	4#define MAX_COPY_PREFETCH	1#elif !defined(CONFIG_PPC64BRIDGE)#define CACHE_LINE_SIZE		32#define LG_CACHE_LINE_SIZE	5#define MAX_COPY_PREFETCH	4#else#define CACHE_LINE_SIZE		128#define LG_CACHE_LINE_SIZE	7#define MAX_COPY_PREFETCH	1#endif /* CONFIG_4xx || CONFIG_8xx */	.text/* * Returns (address we're running at) - (address we were linked at) * for use before the text and data are mapped to KERNELBASE. */_GLOBAL(reloc_offset)	mflr	r0	bl	1f1:	mflr	r3	lis	r4,1b@ha	addi	r4,r4,1b@l	subf	r3,r4,r3	mtlr	r0	blr/* void __no_use_save_flags(unsigned long *flags) */_GLOBAL(__no_use_save_flags)	mfmsr	r4	stw	r4,0(r3)	blr/* void __no_use_restore_flags(unsigned long flags) */	_GLOBAL(__no_use_restore_flags)/* * Just set/clear the MSR_EE bit through restore/flags but do not * change anything else.  This is needed by the RT system and makes * sense anyway. *    -- Cort */	mfmsr 	r4	/* Copy all except the MSR_EE bit from r4 (current MSR value)	   to r3.  This is the sort of thing the rlwimi instruction is	   designed for.  -- paulus. */	rlwimi	r3,r4,0,17,15	 /* Check if things are setup the way we want _already_. */	cmpw	0,r3,r4	beqlr	/* are we enabling interrupts? */	rlwinm.	r0,r3,0,16,16	beq	1f	/* if so, check if there are any lost interrupts */	lis	r7,ppc_n_lost_interrupts@ha	lwz	r7,ppc_n_lost_interrupts@l(r7)	cmpi	0,r7,0		/* lost interrupts to process first? */	bne-	do_lost_interrupts1:	sync	mtmsr	r3	isync	blr	_GLOBAL(__no_use_cli)	mfmsr	r0		/* Get current interrupt state */	rlwinm	r3,r0,16+1,32-1,31	/* Extract old value of 'EE' */	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */	sync			/* Some chip revs have problems here... */	mtmsr	r0		/* Update machine state */	blr			/* Done */_GLOBAL(__no_use_sti)	lis	r4,ppc_n_lost_interrupts@ha	lwz	r4,ppc_n_lost_interrupts@l(r4)	mfmsr	r3		/* Get current state */	ori	r3,r3,MSR_EE	/* Turn on 'EE' bit */	cmpi	0,r4,0		/* lost interrupts to process first? */	bne-	do_lost_interrupts	sync			/* Some chip revs have problems here... */	mtmsr	r3		/* Update machine state */	blr/* * We were about to enable interrupts but we have to simulate * some interrupts that were lost by enable_irq first. */_GLOBAL(do_lost_interrupts)	stwu	r1,-16(r1)	mflr	r0	stw	r0,20(r1)	stw	r3,8(r1)1:	bl	fake_interrupt	lis	r4,ppc_n_lost_interrupts@ha	lwz	r4,ppc_n_lost_interrupts@l(r4)	cmpi	0,r4,0	bne-	1b	lwz	r3,8(r1)	sync	mtmsr	r3	lwz	r0,20(r1)	mtlr	r0	addi	r1,r1,16	blr/* * complement mask on the msr then "or" some values on. *     _nmask_and_or_msr(nmask, value_to_or) */	_GLOBAL(_nmask_and_or_msr)	mfmsr	r0		/* Get current msr */	andc	r0,r0,r3	/* And off the bits set in r3 (first parm) */	or	r0,r0,r4		/* Or on the bits in r4 (second parm) */	sync			/* Some chip revs have problems here... */	mtmsr	r0		/* Update machine state */	blr			/* Done *//* * Flush MMU TLB */_GLOBAL(_tlbia)#if defined(CONFIG_SMP)	mfmsr	r10	sync	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */	mtmsr	r0	SYNC	lis	r9,hash_table_lock@h	ori	r9,r9,hash_table_lock@l	lwz	r8,PROCESSOR(r2)	oris	r8,r8,1010:	lwarx	r7,0,r9	cmpi	0,r7,0	bne-	10b	stwcx.	r8,0,r9	bne-	10b	eieio#endif /* CONFIG_SMP */	sync	tlbia	sync#ifdef CONFIG_SMP	tlbsync	sync	li	r0,0	stw	r0,0(r9)		/* clear hash_table_lock */	mtmsr	r10	SYNC#endif	blr	/* * Flush MMU TLB for a particular address */_GLOBAL(_tlbie)#if defined(CONFIG_SMP)	mfmsr	r10	sync	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */	mtmsr	r0	SYNC	lis	r9,hash_table_lock@h	ori	r9,r9,hash_table_lock@l	lwz	r8,PROCESSOR(r2)	oris	r8,r8,1110:	lwarx	r7,0,r9	cmpi	0,r7,0	bne-	10b	stwcx.	r8,0,r9	bne-	10b	eieio#endif /* CONFIG_SMP */	tlbie	r3	sync#ifdef CONFIG_SMP	tlbsync	sync	li	r0,0	stw	r0,0(r9)		/* clear hash_table_lock */	mtmsr	r10	SYNC#endif	blr/* * Flush instruction cache. * This is a no-op on the 601. */_GLOBAL(flush_instruction_cache)#ifdef CONFIG_8xx	isync	lis	r5, IDC_INVALL@h	mtspr	IC_CST, r5#else	mfspr	r3,PVR	rlwinm	r3,r3,16,16,31	cmpi	0,r3,1	beqlr			/* for 601, do nothing */	/* 603/604 processor - use invalidate-all bit in HID0 */	mfspr	r3,HID0	ori	r3,r3,HID0_ICFI	mtspr	HID0,r3#endif /* CONFIG_8xx */	SYNC	blr/* * Write any modified data cache blocks out to memory * and invalidate the corresponding instruction cache blocks. * This is a no-op on the 601. * * flush_icache_range(unsigned long start, unsigned long stop) */_GLOBAL(flush_icache_range)	mfspr	r5,PVR	rlwinm	r5,r5,16,16,31	cmpi	0,r5,1	beqlr				/* for 601, do nothing */	li	r5,CACHE_LINE_SIZE-1	andc	r3,r3,r5	subf	r4,r3,r4	add	r4,r4,r5	srwi.	r4,r4,LG_CACHE_LINE_SIZE	beqlr	mtctr	r4	mr	r6,r31:	dcbst	0,r3	addi	r3,r3,CACHE_LINE_SIZE	bdnz	1b	sync				/* wait for dcbst's to get to ram */	mtctr	r42:	icbi	0,r6	addi	r6,r6,CACHE_LINE_SIZE	bdnz	2b	sync	isync	blr/* * Like above, but only do the D-cache. * * flush_dcache_range(unsigned long start, unsigned long stop) */_GLOBAL(flush_dcache_range)       li      r5,CACHE_LINE_SIZE-1       andc    r3,r3,r5       subf    r4,r3,r4       add     r4,r4,r5       srwi.   r4,r4,LG_CACHE_LINE_SIZE       beqlr       mtctr   r41:     dcbst   0,r3       addi    r3,r3,CACHE_LINE_SIZE       bdnz    1b       sync                            /* wait for dcbst's to get to ram */       blr/* * Flush a particular page from the data cache to RAM. * Note: this is necessary because the instruction cache does *not* * snoop from the data cache. * This is a no-op on the 601 which has a unified cache. * *	void __flush_page_to_ram(void *page) */_GLOBAL(__flush_page_to_ram)	mfspr	r5,PVR	rlwinm	r5,r5,16,16,31	cmpi	0,r5,1	beqlr				/* for 601, do nothing */	rlwinm	r3,r3,0,0,19		/* Get page base address */	li	r4,4096/CACHE_LINE_SIZE	/* Number of lines in a page */	mtctr	r4	mr	r6,r30:	dcbst	0,r3			/* Write line to ram */	addi	r3,r3,CACHE_LINE_SIZE	bdnz	0b	sync	mtctr	r41:	icbi	0,r6	addi	r6,r6,CACHE_LINE_SIZE	bdnz	1b	sync	isync	blr/* * Flush a particular page from the instruction cache. * Note: this is necessary because the instruction cache does *not* * snoop from the data cache. * This is a no-op on the 601 which has a unified cache. * *	void __flush_icache_page(void *page) */_GLOBAL(__flush_icache_page)	mfspr	r5,PVR	rlwinm	r5,r5,16,16,31	cmpi	0,r5,1	beqlr				/* for 601, do nothing */	li	r4,4096/CACHE_LINE_SIZE	/* Number of lines in a page */	mtctr	r41:	icbi	0,r3	addi	r3,r3,CACHE_LINE_SIZE	bdnz	1b	sync	isync	blr	/* * Clear a page using the dcbz instruction, which doesn't cause any * memory traffic (except to write out any cache lines which get * displaced).  This only works on cacheable memory. */_GLOBAL(clear_page)	li	r0,4096/CACHE_LINE_SIZE	mtctr	r0#ifdef CONFIG_8xx	li	r4, 01:	stw	r4, 0(r3)	stw	r4, 4(r3)	stw	r4, 8(r3)	stw	r4, 12(r3)#else1:	dcbz	0,r3#endif	addi	r3,r3,CACHE_LINE_SIZE	bdnz	1b	blr/* * Copy a whole page.  We use the dcbz instruction on the destination * to reduce memory traffic (it eliminates the unnecessary reads of * the destination into cache).  This requires that the destination * is cacheable. */#define COPY_16_BYTES		\	lwz	r6,4(r4);	\	lwz	r7,8(r4);	\	lwz	r8,12(r4);	\	lwzu	r9,16(r4);	\	stw	r6,4(r3);	\	stw	r7,8(r3);	\	stw	r8,12(r3);	\	stwu	r9,16(r3)_GLOBAL(copy_page)	addi	r3,r3,-4	addi	r4,r4,-4	li	r5,4#ifndef CONFIG_8xx#if MAX_COPY_PREFETCH > 1	li	r0,MAX_COPY_PREFETCH	li	r11,4	mtctr	r011:	dcbt	r11,r4	addi	r11,r11,CACHE_LINE_SIZE

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -