⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 head.s

📁 讲述linux的初始化过程
💻 S
📖 第 1 页 / 共 3 页
字号:
/* * Map the memory where the exception handlers will * be copied to when hash constants have been patched.   */#ifdef CONFIG_APUS_FAST_EXCEPT	lis	r8,0xfff0#else	lis	r8,0#endif	ori	r8,r8,0x2		/* 128KB, supervisor */	mtspr	DBAT3U,r8	mtspr	DBAT3L,r8	lis	r12,__ptov_table_begin@h	ori	r12,r12,__ptov_table_begin@l	add	r12,r12,r10	         /* table begin phys address */	lis	r13,__ptov_table_end@h	ori	r13,r13,__ptov_table_end@l	add	r13,r13,r10	         /* table end phys address */	subi	r12,r12,4	subi	r13,r13,41:	lwzu	r14,4(r12)               /* virt address of instruction */	add     r14,r14,r10              /* phys address of instruction */	lwz     r15,0(r14)               /* instruction, now insert top */	rlwimi  r15,r11,16,16,31         /* half of pv const in low half*/	stw	r15,0(r14)               /* of instruction and restore. */	dcbst	r0,r14			 /* write it to memory */	sync	icbi	r0,r14			 /* flush the icache line */	cmpw	r12,r13	bne     1b	isync				/* No speculative loading until now */	blr	apus_interrupt_entry:	/* This is horrible, but there's no way around it. Enable the	 * data cache so the IRQ hardware register can be accessed	 * without cache intervention. Then disable interrupts and get	 * the current emulated m68k IPL value. 	 */		mfmsr	20	xori	r20,r20,MSR_DR	sync	mtmsr	r20	sync	lis	r4,APUS_IPL_EMU@h	li	r20,(IPLEMU_SETRESET|IPLEMU_DISABLEINT)	stb	r20,APUS_IPL_EMU@l(r4)	eieio	lbz	r3,APUS_IPL_EMU@l(r4)	li	r2,IPLEMU_IPLMASK	rlwinm. r20,r3,32-3,29,31	bne	2f	mr	r20,r2		/* lvl7! Need to reset state machine. */	b	3f2:	cmp	0,r20,r2	beq	1f3:	eieio	stb     r2,APUS_IPL_EMU@l(r4)	ori	r20,r20,IPLEMU_SETRESET	eieio	stb     r20,APUS_IPL_EMU@l(r4)1:	eieio	li	r20,IPLEMU_DISABLEINT	stb	r20,APUS_IPL_EMU@l(r4)	/* At this point we could do some magic to avoid the overhead	 * of calling the C interrupt handler in case of a spurious	 * interrupt. Could not get a simple hack to work though.	 */		mfmsr	r20	xori	r20,r20,MSR_DR	sync	mtmsr	r20	sync	stw	r3,(_CCR+4)(r21);	addi	r3,r1,STACK_FRAME_OVERHEAD;	li	r20,MSR_KERNEL;	bl	transfer_to_handler;	.long	do_IRQ;	.long	ret_from_except/*********************************************************************** *  Please note that on APUS the exception handlers are located at the *  physical address 0xfff0000. For this reason, the exception handlers *  cannot use relative branches to access the code below. ***********************************************************************/#endif /* CONFIG_APUS */#ifdef CONFIG_SMP#ifdef CONFIG_GEMINI	.globl	__secondary_start_gemini__secondary_start_gemini:        mfspr   r4,HID0        ori     r4,r4,HID0_ICFI        li      r3,0        ori     r3,r3,HID0_ICE        andc    r4,r4,r3        mtspr   HID0,r4        sync        bl      prom_init        b       __secondary_start#endif /* CONFIG_GEMINI */		.globl	__secondary_start_psurge__secondary_start_psurge:	li	r24,1			/* cpu # */	/* we come in here with IR=0 and DR=1, and DBAT 0	   set to map the 0xf0000000 - 0xffffffff region */	mfmsr	r0	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */	sync	mtmsr	r0	isync	.globl	__secondary_start__secondary_start:#ifdef CONFIG_PPC64BRIDGE	mfmsr	r0	clrldi	r0,r0,1		/* make sure it's in 32-bit mode */	sync	MTMSRD(r0)	isync#else	bl	enable_caches#endif	/* get current */	lis	r2,current_set@h	ori	r2,r2,current_set@l	tophys(r2,r2)	slwi	r24,r24,2		/* get current_set[cpu#] */	lwzx	r2,r2,r24	/* stack */	addi	r1,r2,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD	li	r0,0	tophys(r3,r1)	stw	r0,0(r3)	/* load up the MMU */	bl	load_up_mmu	/* ptr to phys current thread */	tophys(r4,r2)	addi	r4,r4,THREAD	/* phys address of our thread_struct */	CLR_TOP32(r4)	mtspr	SPRG3,r4	li	r3,0	mtspr	SPRG2,r3	/* 0 => r1 has kernel sp */	/* enable MMU and jump to start_secondary */	li	r4,MSR_KERNEL	lis	r3,start_secondary@h	ori	r3,r3,start_secondary@l	mtspr	SRR0,r3	mtspr	SRR1,r4	SYNC	RFI#endif /* CONFIG_SMP *//* * Enable caches and 604-specific features if necessary. */enable_caches:	mfspr	r9,PVR	rlwinm	r9,r9,16,16,31	cmpi	0,r9,1	beq	6f			/* not needed for 601 */	mfspr	r11,HID0	andi.	r0,r11,HID0_DCE	ori	r11,r11,HID0_ICE|HID0_DCE	ori	r8,r11,HID0_ICFI	bne	3f			/* don't invalidate the D-cache */	ori	r8,r8,HID0_DCI		/* unless it wasn't enabled */3:	sync	mtspr	HID0,r8			/* enable and invalidate caches */	sync	mtspr	HID0,r11		/* enable caches */	sync	isync	cmpi	0,r9,4			/* check for 604 */	cmpi	1,r9,9			/* or 604e */	cmpi	2,r9,10			/* or mach5 / 604r */	cmpi	3,r9,8			/* check for 750 (G3) */	cmpi	4,r9,12			/* or 7400 (G4) */	cror	2,2,6	cror	2,2,10	bne	4f	ori	r11,r11,HID0_SIED|HID0_BHTE /* for 604[e|r], enable */	bne	2,5f	ori	r11,r11,HID0_BTCD	/* superscalar exec & br history tbl */	b	5f4:	cror	14,14,18	bne	3,6f	/* for G3/G4:	 * enable Store Gathering (SGE), Address Brodcast (ABE),	 * Branch History Table (BHTE), Branch Target ICache (BTIC)	 */	ori	r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC	oris	r11,r11,HID0_DPM@h	/* enable dynamic power mgmt */	li	r3,HID0_SPD	andc	r11,r11,r3		/* clear SPD: enable speculative */ 	li	r3,0 	mtspr	ICTC,r3			/* Instruction Cache Throttling off */5:	isync	mtspr	HID0,r11	sync	isync6:	blr/* * Load stuff into the MMU.  Intended to be called with * IR=0 and DR=0. */load_up_mmu:	/* Load the SDR1 register (hash table base & size) */	lis	r6,_SDR1@ha	tophys(r6,r6)	lwz	r6,_SDR1@l(r6)	mtspr	SDR1,r6#ifdef CONFIG_PPC64BRIDGE		/* clear the ASR so we only use the pseudo-segment registers. */	li	r6,0	mtasr	r6#endif /* CONFIG_PPC64BRIDGE */	li	r0,16		/* load up segment register values */	mtctr	r0		/* for context 0 */	lis	r3,0x2000	/* Ku = 1, VSID = 0 */	li	r4,03:	mtsrin	r3,r4	addi	r3,r3,1		/* increment VSID */	addis	r4,r4,0x1000	/* address of next segment */	bdnz	3b#ifndef CONFIG_POWER4/* Load the BAT registers with the values set up by MMU_init.   MMU_init takes care of whether we're on a 601 or not. */	mfpvr	r3	srwi	r3,r3,16	cmpwi	r3,1	lis	r3,BATS@ha	addi	r3,r3,BATS@l	tophys(r3,r3)	LOAD_BAT(0,r3,r4,r5)	LOAD_BAT(1,r3,r4,r5)	LOAD_BAT(2,r3,r4,r5)	LOAD_BAT(3,r3,r4,r5)#endif /* CONFIG_POWER4 */	blr/* * This is where the main kernel code starts. */start_here:#ifndef CONFIG_PPC64BRIDGE	bl	enable_caches#endif	/* ptr to current */	lis	r2,init_task_union@h	ori	r2,r2,init_task_union@l	/* Set up for using our exception vectors */	/* ptr to phys current thread */	tophys(r4,r2)	addi	r4,r4,THREAD	/* init task's THREAD */	CLR_TOP32(r4)	mtspr	SPRG3,r4	li	r3,0	mtspr	SPRG2,r3	/* 0 => r1 has kernel sp */	/* Clear out the BSS */	lis	r11,_end@ha	addi	r11,r11,_end@l	lis	r8,__bss_start@ha	addi	r8,r8,__bss_start@l	subf	r11,r8,r11	addi	r11,r11,3	rlwinm.	r11,r11,30,2,31	beq	2f	addi	r8,r8,-4	mtctr	r11	li	r0,03:	stwu	r0,4(r8)	bdnz	3b2:	/* stack */	addi	r1,r2,TASK_UNION_SIZE	li	r0,0	stwu	r0,-STACK_FRAME_OVERHEAD(r1)/* * Decide what sort of machine this is and initialize the MMU. */	mr	r3,r31	mr	r4,r30	mr	r5,r29	mr	r6,r28	mr	r7,r27	bl	identify_machine	bl	MMU_init#ifdef CONFIG_APUS	/* Copy exception code to exception vector base on APUS. */	lis	r4,KERNELBASE@h#ifdef CONFIG_APUS_FAST_EXCEPT	lis	r3,0xfff0		/* Copy to 0xfff00000 */#else	lis	r3,0			/* Copy to 0x00000000 */#endif	li	r5,0x4000		/* # bytes of memory to copy */	li	r6,0	bl	copy_and_flush		/* copy the first 0x4000 bytes */#endif  /* CONFIG_APUS *//* * Go back to running unmapped so we can load up new values * for SDR1 (hash table pointer) and the segment registers * and change to using our exception vectors. */	lis	r4,2f@h	ori	r4,r4,2f@l	tophys(r4,r4)	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)	FIX_SRR1(r3,r5)	mtspr	SRR0,r4	mtspr	SRR1,r3	SYNC	RFI/* Load up the kernel context */2:	SYNC			/* Force all PTE updates to finish */	tlbia			/* Clear all TLB entries */	sync			/* wait for tlbia/tlbie to finish */#ifdef CONFIG_SMP	tlbsync			/* ... on all CPUs */	sync#endif	bl	load_up_mmu/* Now turn on the MMU for real! */	li	r4,MSR_KERNEL	FIX_SRR1(r4,r5)	lis	r3,start_kernel@h	ori	r3,r3,start_kernel@l	mtspr	SRR0,r3	mtspr	SRR1,r4	SYNC	RFI/* * Set up the segment registers for a new context. */_GLOBAL(set_context)	rlwinm	r3,r3,4,8,27	/* VSID = context << 4 */	addis	r3,r3,0x6000	/* Set Ks, Ku bits */	li	r0,12		/* TASK_SIZE / SEGMENT_SIZE */	mtctr	r0	li	r4,03:#ifdef CONFIG_PPC64BRIDGE	slbie	r4#endif /* CONFIG_PPC64BRIDGE */	mtsrin	r3,r4	addi	r3,r3,1		/* next VSID */	addis	r4,r4,0x1000	/* address of next segment */	bdnz	3b	SYNC	blr/*  * An undocumented "feature" of 604e requires that the v bit * be cleared before changing BAT values. * * Also, newer IBM firmware does not clear bat3 and 4 so * this makes sure it's done. *  -- Cort  */clear_bats:#if !defined(CONFIG_GEMINI)	li	r20,0	mfspr	r9,PVR	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */	cmpwi	r9, 1	beq	1f		mtspr	DBAT0U,r20	mtspr	DBAT0L,r20		mtspr	DBAT1U,r20	mtspr	DBAT1L,r20	mtspr	DBAT2U,r20	mtspr	DBAT2L,r20		mtspr	DBAT3U,r20	mtspr	DBAT3L,r201:		mtspr	IBAT0U,r20	mtspr	IBAT0L,r20	mtspr	IBAT1U,r20	mtspr	IBAT1L,r20	mtspr	IBAT2U,r20	mtspr	IBAT2L,r20	mtspr	IBAT3U,r20	mtspr	IBAT3L,r20#endif /* !defined(CONFIG_GEMINI) */	blr#ifndef CONFIG_GEMINIflush_tlbs:	lis	r20, 0x401:	addic.	r20, r20, -0x1000	tlbie	r20	blt	1b	sync	blrmmu_off: 	addi	r4, r3, __after_mmu_off - _start	mfmsr	r3	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */	beqlr	andc	r3,r3,r0	mtspr	SRR0,r4	mtspr	SRR1,r3	sync	RFI#endif#ifndef CONFIG_POWER4	/* * Use the first pair of BAT registers to map the 1st 16MB * of RAM to KERNELBASE.  From this point on we can't safely * call OF any more. */initial_bats:	lis	r11,KERNELBASE@h#ifndef CONFIG_PPC64BRIDGE	mfspr	r9,PVR	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */	cmpi	0,r9,1	bne	4f	ori	r11,r11,4		/* set up BAT registers for 601 */	li	r8,0x7f			/* valid, block length = 8MB */	oris	r9,r11,0x800000@h	/* set up BAT reg for 2nd 8M */	oris	r10,r8,0x800000@h	/* set up BAT reg for 2nd 8M */	mtspr	IBAT0U,r11		/* N.B. 601 has valid bit in */	mtspr	IBAT0L,r8		/* lower BAT register */	mtspr	IBAT1U,r9	mtspr	IBAT1L,r10	isync	blr#endif /* CONFIG_PPC64BRIDGE */4:	tophys(r8,r11)#ifdef CONFIG_SMP	ori	r8,r8,0x12		/* R/W access, M=1 */#else	ori	r8,r8,2			/* R/W access */#endif /* CONFIG_SMP */#ifdef CONFIG_APUS	ori	r11,r11,BL_8M<<2|0x2	/* set up 8MB BAT registers for 604 */#else	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */#endif /* CONFIG_APUS */	#ifdef CONFIG_PPC64BRIDGE	/* clear out the high 32 bits in the BAT */	clrldi	r11,r11,32	clrldi	r8,r8,32#endif /* CONFIG_PPC64BRIDGE */	mtspr	DBAT0L,r8		/* N.B. 6xx (not 601) have valid */	mtspr	DBAT0U,r11		/* bit in upper BAT register */	mtspr	IBAT0L,r8	mtspr	IBAT0U,r11	isync	blr#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)setup_disp_bat:	/*	 * setup the display bat prepared for us in prom.c	 */	mflr	r8	bl	reloc_offset	mtlr	r8	addis	r8,r3,disp_BAT@ha	addi	r8,r8,disp_BAT@l	lwz	r11,0(r8)	lwz	r8,4(r8)	mfspr	r9,PVR	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */	cmpi	0,r9,1	beq	1f	mtspr	DBAT3L,r8	mtspr	DBAT3U,r11	blr1:	mtspr	IBAT3L,r8	mtspr	IBAT3U,r11	blr#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */#endif /* CONFIG_POWER4 */#ifdef CONFIG_8260/* Jump into the system reset for the rom. * We first disable the MMU, and then jump to the ROM reset address. * * r3 is the board info structure, r4 is the location for starting. * I use this for building a small kernel that can load other kernels, * rather than trying to write or rely on a rom monitor that can tftp load. */       .globl  m8260_goromm8260_gorom:	mfmsr	r0	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */	sync	mtmsr	r0	sync	mfspr	r11, HID0	lis	r10, 0	ori	r10,r10,HID0_ICE|HID0_DCE	andc	r11, r11, r10	mtspr	HID0, r11	isync	li	r5, MSR_	lis	r6,2f@h	addis	r6,r6,-KERNELBASE@h	ori	r6,r6,2f@l	mtspr	SRR0,r6	mtspr	SRR1,r5	isync	sync	rfi2:	mtlr	r4	blr#endif#ifdef CONFIG_MOL/* * Mac-on-linux hook_table. Don't put this in the data section - * the base address must be within the first 32KB of RAM. */	.globl mol_interfacemol_interface:	.long   MOL_INTERFACE_VERSION	.fill	24,4,0		/* space for 24 hooks */#endif/* * We put a few things here that have to be page-aligned. * This stuff goes at the beginning of the data segment, * which is page-aligned. */	.data	.globl	sdatasdata:	.globl	empty_zero_pageempty_zero_page:	.space	4096	.globl	swapper_pg_dirswapper_pg_dir:	.space	4096	/* * This space gets a copy of optional info passed to us by the bootstrap * Used to pass parameters into the kernel like root=/dev/sda1, etc. */		.globl	cmd_linecmd_line:	.space	512

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -