⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 locore.s

📁 早期freebsd实现
💻 S
📖 第 1 页 / 共 2 页
字号:
 *	Level 6:	Internal SIO used uPD7201A *	Level 7:	Non-maskable: Abort Key (Dispatched vector to ROM monitor) */	.globl	_scintr, __siointr, _hardclock_lev2intr:	clrw	sp@-	moveml	#0xC0C0,sp@-	jbsr	_scintr	moveml	sp@+,#0x0303	addql	#2,sp	jra	rei_lev3intr:	clrw	sp@-	moveml	#0xC0C0,sp@-	moveml	sp@+,#0x0303	addql	#2,sp	jra	rei_lev5intr:	clrw	sp@-			| push pad word	moveml	#0xC0C0,sp@-		| save scratch regs	movl	#CLOCK_REG,a0		| get clock CR addr	movb	#CLK_CLR,a0@		| reset system clock	lea	sp@(16),a1		| get pointer to PS	movl	a1@,sp@-		| push padded PS	movl	a1@(4),sp@-		| push PC	jbsr	_hardclock		| call generic clock int routine	addql	#8,sp			| pop params	moveml	sp@+,#0x0303		| restore scratch regs	addql	#2,sp			| pop pad word	jra	rei			| all done_lev6intr:	clrw	sp@-	moveml	#0xC0C0,sp@-	jbsr	__siointr	moveml	sp@+,#0x0303	addql	#2,sp	jra	rei/* * Emulation of VAX REI instruction. * * This code deals with checking for and servicing ASTs * (profiling, scheduling) and software interrupts (network, softclock). * We check for ASTs first, just like the VAX.  To avoid excess overhead * the T_ASTFLT handling code will also check for software interrupts so we * do not have to do it here. * * This code is complicated by the fact that sendsig may have been called * necessitating a stack cleanup.  A cleanup should only be needed at this * point for coprocessor mid-instruction frames (type 9), but we also test * for bus error frames (type 10 and 11). */#if 0	.comm	_ssir,1rei:#ifdef DEBUG	tstl	_panicstr		| have we paniced?	jne	Ldorte			| yes, do not make matters worse#endif	btst	#PCB_ASTB,_u+PCB_FLAGS+1| AST pending?	jeq	Lchksir			| no, go check for SIR	btst	#5,sp@			| yes, are we returning to user mode?	jne	Lchksir			| no, go check for SIR	clrw	sp@-			| pad SR to longword	moveml	#0xFFFF,sp@-		| save all registers	movl	usp,a1			| including	movl	a1,sp@(60)		|    the users SP	clrl	sp@-			| VA == none	clrl	sp@-			| code == none	movl	#T_ASTFLT,sp@-		| type == async system trap	jbsr	_trap			| go handle it	lea	sp@(12),sp		| pop value args	movl	sp@(60),a0		| restore	movl	a0,usp			|   user SP	moveml	sp@+,#0x7FFF		| and all remaining registers	addql	#4,sp			| toss SSP	tstw	sp@+			| do we need to clean up stack?	jeq	Ldorte			| no, just continue	btst	#7,sp@(6)		| type 9/10/11 frame?	jeq	Ldorte			| no, nothing to do	btst	#5,sp@(6)		| type 9?	jne	Last1			| no, skip	movw	sp@,sp@(12)		| yes, push down SR	movl	sp@(2),sp@(14)		| and PC	clrw	sp@(18)			| and mark as type 0 frame	lea	sp@(12),sp		| clean the excess	jra	Ldorte			| all doneLast1:	btst	#4,sp@(6)		| type 10?	jne	Last2			| no, skip	movw	sp@,sp@(24)		| yes, push down SR	movl	sp@(2),sp@(26)		| and PC	clrw	sp@(30)			| and mark as type 0 frame	lea	sp@(24),sp		| clean the excess	jra	Ldorte			| all doneLast2:	movw	sp@,sp@(84)		| type 11, push down SR	movl	sp@(2),sp@(86)		| and PC	clrw	sp@(90)			| and mark as type 0 frame	lea	sp@(84),sp		| clean the excess	jra	Ldorte			| all doneLchksir:	tstb	_ssir			| SIR pending?	jeq	Ldorte			| no, all done	movl	d0,sp@-			| need a scratch register	movw	sp@(4),d0		| get SR	andw	#PSL_IPL7,d0		| mask all but IPL	jne	Lnosir			| came from interrupt, no can do	movl	sp@+,d0			| restore scratch registerLgotsir:	movw	#SPL1,sr		| prevent others from servicing int	tstb	_ssir			| too late?	jeq	Ldorte			| yes, oh well...	clrw	sp@-			| pad SR to longword	moveml	#0xFFFF,sp@-		| save all registers	movl	usp,a1			| including	movl	a1,sp@(60)		|    the users SP	clrl	sp@-			| VA == none	clrl	sp@-			| code == none	movl	#T_SSIR,sp@-		| type == software interrupt	jbsr	_trap			| go handle it	lea	sp@(12),sp		| pop value args	movl	sp@(60),a0		| restore	movl	a0,usp			|   user SP	moveml	sp@+,#0x7FFF		| and all remaining registers	addql	#6,sp			| pop SSP and align word	rteLnosir:	movl	sp@+,d0			| restore scratch registerLdorte:#elserei:					| dummy Entry of rei#endif	rte				| real return/* * Primitives */ #ifdef GPROF#ifdef __GNUC__#define	ENTRY(name) \	.globl _/**/name; _/**/name: link a6,#0; jbsr mcount; unlk a6#define ALTENTRY(name, rname) \	ENTRY(name); jra rname+12#else#define	ENTRY(name) \	.globl _/**/name; _/**/name: jbsr mcount#define ALTENTRY(name, rname) \	ENTRY(name); jra rname+6#endif#else#define	ENTRY(name) \	.globl _/**/name; _/**/name:#define ALTENTRY(name, rname) \	.globl _/**/name; _/**/name:#endif/* * non-local gotos */ALTENTRY(savectx, _setjmp)ENTRY(setjmp)	movl	sp@(4),a0	| savearea pointer	moveml	#0xFCFC,a0@	| save d2-d7/a2-a7	movl	sp@,a0@(48)	| and return address	moveq	#0,d0		| return 0	rtsENTRY(qsetjmp)	movl	sp@(4),a0	| savearea pointer	lea	a0@(40),a0	| skip regs we do not save	movl	a6,a0@+		| save FP	movl	sp,a0@+		| save SP	movl	sp@,a0@		| and return address	moveq	#0,d0		| return 0	rtsENTRY(longjmp)	movl	sp@(4),a0	moveml	a0@+,#0xFCFC	movl	a0@,sp@	moveq	#1,d0	rts	.globl	_getsfc, _getdfc_getsfc:	movc	sfc,d0	rts_getdfc:	movc	dfc,d0	rts/* * Set processor priority level calls.  Most could (should) be replaced * by inline asm expansions.  However, SPL0 and SPLX require special * handling.  If we are returning to the base processor priority (SPL0) * we need to check for our emulated software interrupts. */ENTRY(spl0)	moveq	#0,d0	movw	sr,d0			| get old SR for return	movw	#PSL_LOWIPL,sr		| restore new SR|	jra	Lsplsir	rtsENTRY(splx)	moveq	#0,d0	movw	sr,d0			| get current SR for return	movw	sp@(6),d1		| get new value	movw	d1,sr			| restore new SR|	andw	#PSL_IPL7,d1		| mask all but PSL_IPL|	jne	Lspldone		| non-zero, all done|Lsplsir:|	tstb	_ssir			| software interrupt pending?|	jeq	Lspldone		| no, all done|	subql	#4,sp			| make room for RTE frame|	movl	sp@(4),sp@(2)		| position return address|	clrw	sp@(6)			| set frame type 0|	movw	#PSL_LOWIPL,sp@		| and new SR|	jra	Lgotsir			| go handle it|Lspldone:	rtsENTRY(spl1)	moveq	#0,d0	movw	sr,d0	movw	#SPL1,sr	rtsALTENTRY(splscsi, _spl2)ENTRY(spl2)	moveq	#0,d0	movw	sr,d0	movw	#SPL2,sr	rtsENTRY(spl3)	moveq	#0,d0	movw	sr,d0	movw	#SPL3,sr	rtsENTRY(spl4)	moveq	#0,d0	movw	sr,d0	movw	#SPL4,sr	rtsENTRY(spl5)	moveq	#0,d0	movw	sr,d0	movw	#SPL5,sr	rtsENTRY(spl6)	moveq	#0,d0	movw	sr,d0	movw	#SPL6,sr	rtsALTENTRY(splhigh, _spl7)ENTRY(spl7)	moveq	#0,d0	movw	sr,d0	movw	#PSL_HIGHIPL,sr	rtsENTRY(_insque)	movw	sr,d0	movw	#PSL_HIGHIPL,sr		| atomic	movl	sp@(8),a0		| where to insert (after)	movl	sp@(4),a1		| element to insert (e)	movl	a0@,a1@			| e->next = after->next	movl	a0,a1@(4)		| e->prev = after	movl	a1,a0@			| after->next = e	movl	a1@,a0	movl	a1,a0@(4)		| e->next->prev = e	movw	d0,sr	rtsENTRY(_remque)	movw	sr,d0	movw	#PSL_HIGHIPL,sr		| atomic	movl	sp@(4),a0		| element to remove (e)	movl	a0@,a1	movl	a0@(4),a0	movl	a0,a1@(4)		| e->next->prev = e->prev	movl	a1,a0@			| e->prev->next = e->next	movw	d0,sr	rtsALTENTRY(blkclr, _bzero)ENTRY(bzero)	movl	sp@(4),a0	movl	sp@(8),d0	jeq	Lbzero1	movl	a0,d1	btst	#0,d1	jeq	Lbzero2	clrb	a0@+	subql	#1,d0	jeq	Lbzero1Lbzero2:	movl	d0,d1	andl	#31,d0	lsrl	#5,d1	jeq	Lbzero3Lbzero4:	clrl	a0@+; clrl	a0@+; clrl	a0@+; clrl	a0@+;	clrl	a0@+; clrl	a0@+; clrl	a0@+; clrl	a0@+;	subql	#1,d1	jne	Lbzero4	tstl	d0	jeq	Lbzero1Lbzero3:	clrb	a0@+	subql	#1,d0	jne	Lbzero3Lbzero1:	rts/* * strlen(str) */ENTRY(strlen)	moveq	#-1,d0	movl	sp@(4),a0	| stringLslloop:	addql	#1,d0		| increment count	tstb	a0@+		| null?	jne	Lslloop		| no, keep going	rts/* * bcmp(s1, s2, len) * * WARNING!  This guy only works with counts up to 64K */ENTRY(bcmp)	movl	sp@(4),a0		| string 1	movl	sp@(8),a1		| string 2	moveq	#0,d0	movw	sp@(14),d0		| length	jeq	Lcmpdone		| if zero, nothing to do	subqw	#1,d0			| set up for DBcc loopLcmploop:	cmpmb	a0@+,a1@+		| equal?	dbne	d0,Lcmploop		| yes, keep going	addqw	#1,d0			| +1 gives zero on matchLcmpdone:	rts	/* * {ov}bcopy(from, to, len) * * Works for counts up to 128K. */ALTENTRY(ovbcopy, _bcopy)ENTRY(bcopy)	movl	sp@(12),d0		| get count	jeq	Lcpyexit		| if zero, return	movl	sp@(4),a0		| src address	movl	sp@(8),a1		| dest address	cmpl	a1,a0			| src before dest?	jlt	Lcpyback		| yes, copy backwards (avoids overlap)	movl	a0,d1	btst	#0,d1			| src address odd?	jeq	Lcfeven			| no, go check dest	movb	a0@+,a1@+		| yes, copy a byte	subql	#1,d0			| update count	jeq	Lcpyexit		| exit if doneLcfeven:	movl	a1,d1	btst	#0,d1			| dest address odd?	jne	Lcfbyte			| yes, must copy by bytes	movl	d0,d1			| no, get count	lsrl	#2,d1			| convert to longwords	jeq	Lcfbyte			| no longwords, copy bytes	subql	#1,d1			| set up for dbfLcflloop:	movl	a0@+,a1@+		| copy longwords	dbf	d1,Lcflloop		| til done	andl	#3,d0			| get remaining count	jeq	Lcpyexit		| done if noneLcfbyte:	subql	#1,d0			| set up for dbfLcfbloop:	movb	a0@+,a1@+		| copy bytes	dbf	d0,Lcfbloop		| til doneLcpyexit:	rtsLcpyback:	addl	d0,a0			| add count to src	addl	d0,a1			| add count to dest	movl	a0,d1	btst	#0,d1			| src address odd?	jeq	Lcbeven			| no, go check dest	movb	a0@-,a1@-		| yes, copy a byte	subql	#1,d0			| update count	jeq	Lcpyexit		| exit if doneLcbeven:	movl	a1,d1	btst	#0,d1			| dest address odd?	jne	Lcbbyte			| yes, must copy by bytes	movl	d0,d1			| no, get count	lsrl	#2,d1			| convert to longwords	jeq	Lcbbyte			| no longwords, copy bytes	subql	#1,d1			| set up for dbfLcblloop:	movl	a0@-,a1@-		| copy longwords	dbf	d1,Lcblloop		| til done	andl	#3,d0			| get remaining count	jeq	Lcpyexit		| done if noneLcbbyte:	subql	#1,d0			| set up for dbfLcbbloop:	movb	a0@-,a1@-		| copy bytes	dbf	d0,Lcbbloop		| til done	rts	.data/* * Memory Infomation Field for secondary booter memory allocator */	.globl  _prgcore	.globl	_dipsw1,_dipsw2_prgcore:	.long	0	.long	0	.long	0_gotoROM:	.long	0_dipsw1:	.long	0_dipsw2:	.long	0

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -