📄 head.s
字号:
/* * linux/boot/head.S * Copyright (C) 1991, 1992 Linus Torvalds *//* * head.S contains the 32-bit startup code. * * 1-Jan-96 Modified by Chris Brady for use as a boot/loader for MemTest-86. * Setup the memory management for flat non-paged linear addressing. * 17 May 2004 : Added X86_PWRCAP for AMD64 (Memtest86+ - Samuel D.) */.text#define __ASSEMBLY__#include "defs.h"#include "config.h"#include "test.h"/* * References to members of the boot_cpu_data structure. */#define CPU_PARAMS cpu_id#define X86 0#define X86_MODEL 1#define X86_MASK 2#define X86_CPUID 4#define X86_CAPABILITY 8#define X86_VENDOR_ID 12#define X86_CACHE 24#define X86_PWRCAP 40#define X86_EXT 44 .code32 .globl startup_32startup_32: cld cli /* Ensure I have a stack pointer */ testl %esp, %esp jnz 0f movl $(LOW_TEST_ADR + _GLOBAL_OFFSET_TABLE_), %esp leal stack_top@GOTOFF(%esp), %esp0: /* Load the GOT pointer */ call 0f0: popl %ebx addl $_GLOBAL_OFFSET_TABLE_+[.-0b], %ebx /* Pick the appropriate stack address */ leal stack_top@GOTOFF(%ebx), %esp /* Reload all of the segment registers */ leal gdt@GOTOFF(%ebx), %eax movl %eax, 2 + gdt_descr@GOTOFF(%ebx) lgdt gdt_descr@GOTOFF(%ebx) leal flush@GOTOFF(%ebx), %eax pushl $KERNEL_CS pushl %eax lretflush: movl $KERNEL_DS, %eax movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movw %ax, %ss/* * Zero BSS */ cmpl $1, zerobss@GOTOFF(%ebx) jnz zerobss_done xorl %eax, %eax leal _bss@GOTOFF(%ebx), %edi leal _end@GOTOFF(%ebx), %ecx subl %edi, %ecx1: movl %eax, (%edi) addl $4, %edi subl $4, %ecx jnz 1b movl $0, zerobss@GOTOFF(%ebx)zerobss_done:/* * Clear the video display */ cmpl $1, clear_display@GOTOFF(%ebx) jnz clear_display_done movw $0x0720, %ax movl $0xb8000, %edi movl $0xc0000, %ecx1: movw %ax, (%edi) addl $2, %edi cmpl %ecx, %edi jnz 1b movl $0, clear_display@GOTOFF(%ebx)clear_display_done:/* * Setup and exception handler */ leal idt@GOTOFF(%ebx), %edi leal vec0@GOTOFF(%ebx), %edx movl $(KERNEL_CS << 16),%eax movw %dx, %ax /* selector = 0x0010 = cs */ movw $0x8E00, %dx /* interrupt gate - dpl=0, present */ movl %eax, (%edi) movl %edx, 4(%edi) addl $8, %edi leal vec1@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec2@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec3@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec4@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec5@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec6@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec7@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec8@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec9@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec10@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec11@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec12@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec13@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec14@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec15@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec16@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec17@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec18@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi leal vec19@GOTOFF(%ebx),%edx movl $(KERNEL_CS << 16),%eax movw %dx,%ax /* selector = 0x0010 = cs */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movl %eax,(%edi) movl %edx,4(%edi) addl $8,%edi /* Now that it is initialized load the interrupt descriptor table */ leal idt@GOTOFF(%ebx), %eax movl %eax, 2 + idt_descr@GOTOFF(%ebx) lidt idt_descr@GOTOFF(%ebx)/* Find out the CPU type */ leal cpu_id@GOTOFF(%ebx), %esi movl %ebx, %edi movl $-1, X86_CPUID(%esi) # -1 for no CPUID initially/* check if it is 486 or 386. */ movl $3, X86(%esi) # at least 386 pushfl # push EFLAGS popl %eax # get EFLAGS movl %eax, %ecx # save original EFLAGS xorl $0x40000, %eax # flip AC bit in EFLAGS pushl %eax # copy to EFLAGS popfl # set EFLAGS pushfl # get new EFLAGS popl %eax # put it in eax xorl %ecx, %eax # change in flags andl $0x40000, %eax # check if AC bit changed je id_done movl $4, X86(%esi) # at least 486 movl %ecx, %eax xorl $0x200000, %eax # check ID flag pushl %eax popfl # if we are on a straight 486DX, SX, or pushfl # 487SX we can't change it popl %eax xorl %ecx, %eax pushl %ecx # restore original EFLAGS popfl andl $0x200000, %eax jne have_cpuid /* Test for Cyrix CPU types */ xorw %ax, %ax # clear ax sahf # clear flags movw $5, %ax movw $2, %bx div %bl # do operation that does not change flags lahf # get flags cmp $2, %ah # check for change in flags jne id_done # if not Cyrix movl $2, X86(%esi) # Use two to identify as Cyrix jmp id_donehave_cpuid: /* get vendor info */ xorl %eax, %eax # call CPUID with 0 -> return vendor ID cpuid movl %eax, X86_CPUID(%esi) # save CPUID level movl %ebx, X86_VENDOR_ID(%esi) # first 4 chars movl %edx, X86_VENDOR_ID+4(%esi) # next 4 chars movl %ecx, X86_VENDOR_ID+8(%esi) # last 4 chars orl %eax, %eax # do we have processor info as well? je id_done movl $1, %eax # Use the CPUID instruction to get CPU type cpuid # # CDH start # Check FPU, initialize if present # testl $1, %edx # FPU available? jz no_fpu finit no_fpu: # # CDH end # movl %eax, X86_EXT(%esi) #save complete extended CPUID to X86_EXT movb %al, %cl # save reg for future use andb $0x0f, %ah # mask processor family movb %ah, X86(%esi) andb $0xf0, %al # mask model shrb $4, %al movb %al, X86_MODEL(%esi) andb $0x0f, %cl # mask mask revision movb %cl, X86_MASK(%esi) movl %edx, X86_CAPABILITY(%esi) movl $0, X86_CACHE(%esi) movl $0, X86_CACHE+4(%esi) movl $0, X86_CACHE+8(%esi) movl $0, X86_CACHE+12(%esi) movl X86_VENDOR_ID+8(%esi), %eax cmpl $0x6c65746e,%eax # Is this an Intel CPU? "GenuineIntel" jne not_intel movb %bl, X86_PWRCAP(%esi) # Store BrandID in AMD PWRCAP if the CPU is from Intel movl $2, %eax # Use the CPUID instruction to get cache info cpuid movl %eax, X86_CACHE(%esi) movl %ebx, X86_CACHE+4(%esi) movl %ecx, X86_CACHE+8(%esi) movl %edx, X86_CACHE+12(%esi) jmp id_donenot_intel: movl X86_VENDOR_ID+8(%esi),%eax cmpl $0x444d4163, %eax # Is this an AMD CPU? "AuthenticAMD" jne not_amd movl $0x80000005, %eax # Use the CPUID instruction to get cache info cpuid movl %ecx, X86_CACHE(%esi) movl %edx, X86_CACHE+4(%esi) movl $0x80000006,%eax # Use the CPUID instruction to get cache info cpuid movl %ecx,X86_CACHE+8(%esi) movl $0x80000007,%eax # Use the CPUID instruction to get AMD Powercap cpuid movl %edx,X86_PWRCAP(%esi)not_amd: movl X86_VENDOR_ID+8(%esi), %eax cmpl $0x3638784D, %eax # Is this a Transmeta CPU? "GenuineTMx86" jne not_transmeta movl $0x80000000, %eax # Use the CPUID instruction to check for cache info cpuid cmp $6, %al # Is cache info available? jb id_done movl $0x80000005, %eax # Use the CPUID instruction to get L1 cache info cpuid movl %ecx, X86_CACHE(%esi) movl %edx, X86_CACHE+4(%esi) movl $0x80000006, %eax # Use the CPUID instruction to get L2 cache info cpuid movl %ecx, X86_CACHE+8(%esi)not_transmeta: movl X86_VENDOR_ID+8(%esi), %eax cmpl $0x64616574, %eax # Is this a Via/Cyrix CPU? "CyrixInstead" jne not_cyrix movl X86_CPUID(%esi), %eax # get CPUID level cmpl $2, %eax # Is there cache information available ? jne id_done movl $2, %eax # Use the CPUID instruction to get cache info cpuid movl %edx, X86_CACHE(%esi)not_cyrix: movl X86_VENDOR_ID+8(%esi), %eax cmpl $0x736C7561, %eax # Is this a Via/Centaur CPU "CentaurHauls" jne not_centaur movl $0x80000000, %eax # Use the CPUID instruction to check for cache info cpuid cmp $6, %al # Is cache info available? jb id_done movl $0x80000005, %eax # Use the CPUID instruction to get L1 cache info cpuid movl %ecx, X86_CACHE(%esi) movl %edx, X86_CACHE+4(%esi) movl $0x80000006, %eax # Use the CPUID instruction to get L2 cache info cpuid movl %ecx, X86_CACHE+8(%esi)not_centaur:id_done: movl %edi, %ebx /* Restore GOT pointer */ leal _dl_start@GOTOFF(%ebx), %eax call *%eax call do_test /* In case we return simulate an exception */ pushfl pushl %cs call 0f0: pushl $0 /* error code */ pushl $257 /* vector */ jmp int_handvec0: pushl $0 /* error code */ pushl $0 /* vector */ jmp int_handvec1: pushl $0 /* error code */ pushl $1 /* vector */ jmp int_handvec2: pushl $0 /* error code */ pushl $2 /* vector */ jmp int_handvec3: pushl $0 /* error code */ pushl $3 /* vector */ jmp int_handvec4: pushl $0 /* error code */ pushl $4 /* vector */ jmp int_handvec5: pushl $0 /* error code */ pushl $5 /* vector */ jmp int_handvec6: pushl $0 /* error code */ pushl $6 /* vector */ jmp int_handvec7: pushl $0 /* error code */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -