⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cpu_asm.s

📁 RTEMS (Real-Time Executive for Multiprocessor Systems) is a free open source real-time operating sys
💻 S
📖 第 1 页 / 共 2 页
字号:
/* *  This file contains the basic algorithms for all assembly code used *  in an specific CPU port of RTEMS.  These algorithms must be implemented *  in assembly language * *  History: *    Baseline: no_cpu *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com> *          COPYRIGHT (c) 1996 by Transition Networks Inc. *          To anyone who acknowledges that the modifications to this file to *          port it to the MIPS64ORION are provided "AS IS" without any *          express or implied warranty: *             permission to use, copy, modify, and distribute this file *             for any purpose is hereby granted without fee, provided that *             the above copyright notice and this notice appears in all *             copies, and that the name of Transition Networks not be used in *             advertising or publicity pertaining to distribution of the *             software without specific, written prior permission. Transition *             Networks makes no representations about the suitability *             of this software for any purpose. *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become *          the baseline of the more general MIPS port.   *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework, *          rewriting as much as possible in C and added the JMR3904 BSP *          so testing could be performed on a simulator. *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR *	    performance, tweaking this code and the isr vectoring routines *          to reduce overhead & latencies.  Added optional *	    instrumentation as well. *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S, *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels *          and deferred FP contexts. *    2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing *          by increasing the amount of context saved/restored. *    2004: 24March, Art Ferrer, NASA/GSFC, added save of FP status/control *          register to fix intermittent FP error encountered on ST5 mission *          implementation on Mongoose V processor. *   *  COPYRIGHT (c) 1989-2002. *  On-Line Applications Research Corporation (OAR). * *  The license and distribution terms for this file may be *  found in the file LICENSE in this distribution or at *  http://www.rtems.com/license/LICENSE. * *  $Id: cpu_asm.S,v 1.28.2.2 2004/04/03 16:29:06 joel Exp $ */#include <asm.h>#include "iregdef.h"#include "idtcpu.h"#define ASSEMBLY_ONLY#include <rtems/score/cpu.h>		/* enable debugging shadow writes to misc ram, this is a vestigal* Mongoose-ism debug tool- but may be handy in the future so we* left it in...*//* #define INSTRUMENT_ISR_VECTORING *//* #define INSTRUMENT_EXECUTING_THREAD */	/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx ) *  and MIPS ISA Level 1 (R3xxx). */#if __mips == 3/* 64 bit register operations */#define NOP	#define ADD	dadd#define STREG	sd#define LDREG	ld#define MFCO	dmfc0#define MTCO	dmtc0#define ADDU	addu#define ADDIU	addiu#define R_SZ	8#define F_SZ	8#define SZ_INT	8#define SZ_INT_POW2 3/* XXX if we don't always want 64 bit register ops, then another ifdef */#elif __mips == 1/* 32 bit register operations*/#define NOP	nop#define ADD	add#define STREG	sw#define LDREG	lw#define MFCO	mfc0#define MTCO	mtc0#define ADDU	add#define ADDIU	addi#define R_SZ	4#define F_SZ	4#define SZ_INT	4#define SZ_INT_POW2 2#else#error "mips assembly: what size registers do I deal with?"#endif#define ISR_VEC_SIZE	4#define EXCP_STACK_SIZE (NREGS*R_SZ)	#ifdef __GNUC__#define ASM_EXTERN(x,size) .extern x,size#else#define ASM_EXTERN(x,size)#endif/* NOTE: these constants must match the Context_Control structure in cpu.h */#define S0_OFFSET 0#define S1_OFFSET 1#define S2_OFFSET 2#define S3_OFFSET 3#define S4_OFFSET 4#define S5_OFFSET 5#define S6_OFFSET 6#define S7_OFFSET 7#define SP_OFFSET 8#define FP_OFFSET 9#define RA_OFFSET 10#define C0_SR_OFFSET 11#define C0_EPC_OFFSET 12/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */#define FP0_OFFSET  0 #define FP1_OFFSET  1 #define FP2_OFFSET  2 #define FP3_OFFSET  3 #define FP4_OFFSET  4 #define FP5_OFFSET  5 #define FP6_OFFSET  6 #define FP7_OFFSET  7 #define FP8_OFFSET  8 #define FP9_OFFSET  9 #define FP10_OFFSET 10 #define FP11_OFFSET 11 #define FP12_OFFSET 12 #define FP13_OFFSET 13 #define FP14_OFFSET 14 #define FP15_OFFSET 15 #define FP16_OFFSET 16 #define FP17_OFFSET 17 #define FP18_OFFSET 18 #define FP19_OFFSET 19 #define FP20_OFFSET 20 #define FP21_OFFSET 21 #define FP22_OFFSET 22 #define FP23_OFFSET 23 #define FP24_OFFSET 24 #define FP25_OFFSET 25 #define FP26_OFFSET 26 #define FP27_OFFSET 27 #define FP28_OFFSET 28 #define FP29_OFFSET 29 #define FP30_OFFSET 30 #define FP31_OFFSET 31 #define FPCS_OFFSET 32	ASM_EXTERN(__exceptionStackFrame, SZ_INT)			/* *  _CPU_Context_save_fp_context * *  This routine is responsible for saving the FP context *  at *fp_context_ptr.  If the point to load the FP context *  from is changed then the pointer is modified by this routine. * *  Sometimes a macro implementation of this is in cpu.h which dereferences *  the ** and a similarly named routine in this file is passed something *  like a (Context_Control_fp *).  The general rule on making this decision *  is to avoid writing assembly language. *//* void _CPU_Context_save_fp( *   void **fp_context_ptr * ); */#if ( CPU_HARDWARE_FP == TRUE )FRAME(_CPU_Context_save_fp,sp,0,ra)        .set noreorder        .set noat	/* 	** Make sure the FPU is on before we save state.  This code 	** is here because the FPU context switch might occur when an 	** integer task is switching out with a FP task switching in.	*/	MFC0	t0,C0_SR	li	t2,SR_CU1		move	t1,t0	or	t0,t2		/* turn on the fpu */#if __mips == 3	li	t2,SR_EXL | SR_IE#elif __mips == 1	li	t2,SR_IEC#endif	not	t2	and	t0,t2		/* turn off interrupts */		MTC0	t0,C0_SR				ld	a1,(a0)	move	t0,ra	jal	_CPU_Context_save_fp_from_exception	NOP		/* 	** Reassert the task's state because we've not saved it yet.	*/	MTC0	t1,C0_SR		j	t0		NOP		.globl _CPU_Context_save_fp_from_exception_CPU_Context_save_fp_from_exception:        swc1 $f0,FP0_OFFSET*F_SZ(a1)        swc1 $f1,FP1_OFFSET*F_SZ(a1)        swc1 $f2,FP2_OFFSET*F_SZ(a1)        swc1 $f3,FP3_OFFSET*F_SZ(a1)        swc1 $f4,FP4_OFFSET*F_SZ(a1)        swc1 $f5,FP5_OFFSET*F_SZ(a1)        swc1 $f6,FP6_OFFSET*F_SZ(a1)        swc1 $f7,FP7_OFFSET*F_SZ(a1)        swc1 $f8,FP8_OFFSET*F_SZ(a1)        swc1 $f9,FP9_OFFSET*F_SZ(a1)        swc1 $f10,FP10_OFFSET*F_SZ(a1)        swc1 $f11,FP11_OFFSET*F_SZ(a1)        swc1 $f12,FP12_OFFSET*F_SZ(a1)        swc1 $f13,FP13_OFFSET*F_SZ(a1)        swc1 $f14,FP14_OFFSET*F_SZ(a1)        swc1 $f15,FP15_OFFSET*F_SZ(a1)        swc1 $f16,FP16_OFFSET*F_SZ(a1)        swc1 $f17,FP17_OFFSET*F_SZ(a1)        swc1 $f18,FP18_OFFSET*F_SZ(a1)        swc1 $f19,FP19_OFFSET*F_SZ(a1)        swc1 $f20,FP20_OFFSET*F_SZ(a1)        swc1 $f21,FP21_OFFSET*F_SZ(a1)        swc1 $f22,FP22_OFFSET*F_SZ(a1)        swc1 $f23,FP23_OFFSET*F_SZ(a1)        swc1 $f24,FP24_OFFSET*F_SZ(a1)        swc1 $f25,FP25_OFFSET*F_SZ(a1)        swc1 $f26,FP26_OFFSET*F_SZ(a1)        swc1 $f27,FP27_OFFSET*F_SZ(a1)        swc1 $f28,FP28_OFFSET*F_SZ(a1)        swc1 $f29,FP29_OFFSET*F_SZ(a1)        swc1 $f30,FP30_OFFSET*F_SZ(a1)        swc1 $f31,FP31_OFFSET*F_SZ(a1)        cfc1 a0,$31                    /* Read FP status/conrol reg */        cfc1 a0,$31                    /* Two reads clear pipeline */        NOP        NOP        sw a0, FPCS_OFFSET*F_SZ(a1)    /* Store value to FPCS location */        NOP        j ra        NOP        .set atENDFRAME(_CPU_Context_save_fp)#endif/* *  _CPU_Context_restore_fp_context * *  This routine is responsible for restoring the FP context *  at *fp_context_ptr.  If the point to load the FP context *  from is changed then the pointer is modified by this routine. * *  Sometimes a macro implementation of this is in cpu.h which dereferences *  the ** and a similarly named routine in this file is passed something *  like a (Context_Control_fp *).  The general rule on making this decision *  is to avoid writing assembly language. *//* void _CPU_Context_restore_fp( *   void **fp_context_ptr * ) */#if ( CPU_HARDWARE_FP == TRUE )FRAME(_CPU_Context_restore_fp,sp,0,ra)        .set noat        .set noreorder		/* 	** Make sure the FPU is on before we retrieve state.  This code 	** is here because the FPU context switch might occur when an 	** integer task is switching out with a FP task switching in.	*/	MFC0	t0,C0_SR	li	t2,SR_CU1		move	t1,t0	or	t0,t2		/* turn on the fpu */#if __mips == 3	li	t2,SR_EXL | SR_IE#elif __mips == 1	li	t2,SR_IEC#endif	not	t2	and	t0,t2		/* turn off interrupts */		MTC0	t0,C0_SR		ld	a1,(a0)	move	t0,ra	jal	_CPU_Context_restore_fp_from_exception	NOP	/* 	** Reassert the old task's state because we've not restored the	** new one yet.	*/	MTC0	t1,C0_SR		j	t0	NOP		.globl _CPU_Context_restore_fp_from_exception_CPU_Context_restore_fp_from_exception:        lwc1 $f0,FP0_OFFSET*4(a1)        lwc1 $f1,FP1_OFFSET*4(a1)        lwc1 $f2,FP2_OFFSET*4(a1)        lwc1 $f3,FP3_OFFSET*4(a1)        lwc1 $f4,FP4_OFFSET*4(a1)        lwc1 $f5,FP5_OFFSET*4(a1)        lwc1 $f6,FP6_OFFSET*4(a1)        lwc1 $f7,FP7_OFFSET*4(a1)        lwc1 $f8,FP8_OFFSET*4(a1)        lwc1 $f9,FP9_OFFSET*4(a1)        lwc1 $f10,FP10_OFFSET*4(a1)        lwc1 $f11,FP11_OFFSET*4(a1)        lwc1 $f12,FP12_OFFSET*4(a1)        lwc1 $f13,FP13_OFFSET*4(a1)        lwc1 $f14,FP14_OFFSET*4(a1)        lwc1 $f15,FP15_OFFSET*4(a1)        lwc1 $f16,FP16_OFFSET*4(a1)        lwc1 $f17,FP17_OFFSET*4(a1)        lwc1 $f18,FP18_OFFSET*4(a1)        lwc1 $f19,FP19_OFFSET*4(a1)        lwc1 $f20,FP20_OFFSET*4(a1)        lwc1 $f21,FP21_OFFSET*4(a1)        lwc1 $f22,FP22_OFFSET*4(a1)        lwc1 $f23,FP23_OFFSET*4(a1)        lwc1 $f24,FP24_OFFSET*4(a1)        lwc1 $f25,FP25_OFFSET*4(a1)        lwc1 $f26,FP26_OFFSET*4(a1)        lwc1 $f27,FP27_OFFSET*4(a1)        lwc1 $f28,FP28_OFFSET*4(a1)        lwc1 $f29,FP29_OFFSET*4(a1)        lwc1 $f30,FP30_OFFSET*4(a1)        lwc1 $f31,FP31_OFFSET*4(a1)        cfc1 a0,$31                  /* Read from FP status/control reg */        cfc1 a0,$31                  /* Two reads clear pipeline */        NOP                          /* NOPs ensure execution */        NOP        lw a0,FPCS_OFFSET*4(a1)      /* Load saved FPCS value */        NOP        ctc1 a0,$31                  /* Restore FPCS register */        NOP        j ra        NOP        .set atENDFRAME(_CPU_Context_restore_fp)#endif/*  _CPU_Context_switch * *  This routine performs a normal non-FP context switch. *//* void _CPU_Context_switch( *   Context_Control  *run, *   Context_Control  *heir * ) */FRAME(_CPU_Context_switch,sp,0,ra)        .set noreorder        MFC0	t0,C0_SR#if __mips == 3	li	t1,SR_EXL | SR_IE#elif __mips == 1	li	t1,SR_IEC#endif	STREG	t0,C0_SR_OFFSET*R_SZ(a0)	/* save the task's SR */	not	t1        and	t0,t1				/* mask off interrupts while we context switch */        MTC0	t0,C0_SR	NOP        STREG ra,RA_OFFSET*R_SZ(a0)		/* save current context */        STREG sp,SP_OFFSET*R_SZ(a0)        STREG fp,FP_OFFSET*R_SZ(a0)        STREG s0,S0_OFFSET*R_SZ(a0)        STREG s1,S1_OFFSET*R_SZ(a0)        STREG s2,S2_OFFSET*R_SZ(a0)        STREG s3,S3_OFFSET*R_SZ(a0)        STREG s4,S4_OFFSET*R_SZ(a0)        STREG s5,S5_OFFSET*R_SZ(a0)        STREG s6,S6_OFFSET*R_SZ(a0)        STREG s7,S7_OFFSET*R_SZ(a0)		/* 	** this code grabs the userspace EPC if we're dispatching from	** an interrupt frame or supplies the address of the dispatch	** routines if not.  This is entirely for the gdbstub's benefit so 	** it can know where each task is running.	**	** Its value is only set when calling threadDispatch from	** the interrupt handler and is cleared immediately when this 	** routine gets it.	*/		la	t0,__exceptionStackFrame	/* see if we're coming in from an exception */	LDREG	t1, (t0)	NOP	beqz	t1,1f	STREG	zero, (t0)			/* and clear it */	NOP	LDREG	t0,R_EPC*R_SZ(t1)		/* get the userspace EPC from the frame */	b	2f		1:	la    t0,_Thread_Dispatch		/* if ==0, we're switched out */2:	STREG   t0,C0_EPC_OFFSET*R_SZ(a0)	_CPU_Context_switch_restore:	LDREG ra,RA_OFFSET*R_SZ(a1)		/* restore context */        LDREG sp,SP_OFFSET*R_SZ(a1)        LDREG fp,FP_OFFSET*R_SZ(a1)        LDREG s0,S0_OFFSET*R_SZ(a1)        LDREG s1,S1_OFFSET*R_SZ(a1)        LDREG s2,S2_OFFSET*R_SZ(a1)        LDREG s3,S3_OFFSET*R_SZ(a1)        LDREG s4,S4_OFFSET*R_SZ(a1)        LDREG s5,S5_OFFSET*R_SZ(a1)        LDREG s6,S6_OFFSET*R_SZ(a1)        LDREG s7,S7_OFFSET*R_SZ(a1)        LDREG t0, C0_SR_OFFSET*R_SZ(a1)	//	NOP//#if __mips == 3//        andi  t0,SR_EXL//        bnez  t0,_CPU_Context_1   /* set exception level from restore context *///        li    t0,~SR_EXL//        MFC0  t1,C0_SR//        NOP//        and   t1,t0//        MTC0  t1,C0_SR////#elif __mips == 1////        andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled *///        beq   t0,$0,_CPU_Context_1          /* set level from restore context *///        MFC0  t0,C0_SR//        NOP//        or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  *///        MTC0  t0,C0_SR                      /* set with enabled *///	  NOP/*** Incorporate the incoming task's FP coprocessor state and interrupt mask/enable** into the status register.  We jump thru the requisite hoops to ensure we ** maintain all other SR bits as global values.**** Get the task's FPU enable, int mask & int enable bits.  Although we keep the ** software int enables on a per-task basis, the rtems_task_create** Interrupt Level & int level manipulation functions cannot enable/disable them, ** so they are automatically enabled for all tasks.  To turn them off, a task  ** must itself manipulate the SR register.  **** Although something of a hack on this processor, we treat the SR register** int enables as the RTEMS interrupt level.  We use the int level** value as a bitmask, not as any sort of greater than/less than metric.** Manipulation of a task's interrupt level directly corresponds to manipulation** of that task's SR bits, as seen in cpu.c**** Note, interrupts are disabled before context is saved, though the task's** interrupt enable state is recorded.  The task swapping in will apply its** specific SR bits, including interrupt enable.  If further task-specific** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and** cpu.h task initialization code that will be affected.  */	li	t2,SR_CU1	or	t2,SR_IMASK	/* int enable bits */#if __mips == 3	or	t2,SR_EXL + SR_IE#elif __mips == 1	/* 	** Save current, previous & old int enables.  This is key because	** we can dispatch from within the stack frame used by an	** interrupt service.  The int enables nest, but not beyond	** previous and old because of the dispatch interlock seen	** in the interrupt processing code	*/	or	t2,SR_IEC + SR_IEP + SR_IEO#endif	and	t0,t2		/* keep only the per-task bits */			MFC0	t1,C0_SR	/* grab the current SR */	not	t2			and	t1,t2		/* mask off the old task's bits */	or	t1,t0		/* or in the new task's bits */        MTC0	t1,C0_SR	/* and load the new SR */	NOP	/* _CPU_Context_1: */        j	ra        NOPENDFRAME(_CPU_Context_switch)	/* *  _CPU_Context_restore * *  This routine is generally used only to restart self in an *  efficient manner.  It may simply be a label in _CPU_Context_switch. * *  NOTE: May be unnecessary to reload some registers. * *  void _CPU_Context_restore( *    Context_Control *new_context *  ); */FRAME(_CPU_Context_restore,sp,0,ra)        .set noreorder        move	a1,a0        j	_CPU_Context_switch_restore

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -