processor_32.h

来自「linux 内核源代码」· C头文件 代码 · 共 787 行 · 第 1/2 页

H
787
字号
	.io_bitmap	= { [ 0 ... IO_BITMAP_LONGS] = ~0 },		\}#define start_thread(regs, new_eip, new_esp) do {		\	__asm__("movl %0,%%gs": :"r" (0));			\	regs->xfs = 0;						\	set_fs(USER_DS);					\	regs->xds = __USER_DS;					\	regs->xes = __USER_DS;					\	regs->xss = __USER_DS;					\	regs->xcs = __USER_CS;					\	regs->eip = new_eip;					\	regs->esp = new_esp;					\} while (0)/* Forward declaration, a strange C thing */struct task_struct;struct mm_struct;/* Free all resources held by a thread. */extern void release_thread(struct task_struct *);/* Prepare to copy thread state - unlazy all lazy status */extern void prepare_to_copy(struct task_struct *tsk);/* * create a kernel thread without removing it from tasklists */extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);extern unsigned long thread_saved_pc(struct task_struct *tsk);void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);unsigned long get_wchan(struct task_struct *p);#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))#define KSTK_TOP(info)                                                 \({                                                                     \       unsigned long *__ptr = (unsigned long *)(info);                 \       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \})/* * The below -8 is to reserve 8 bytes on top of the ring0 stack. * This is necessary to guarantee that the entire "struct pt_regs" * is accessable even if the CPU haven't stored the SS/ESP registers * on the stack (interrupt gate does not save these registers * when switching to the same priv ring). * Therefore beware: accessing the xss/esp fields of the * "struct pt_regs" is possible, but they may contain the * completely wrong values. */#define task_pt_regs(task)                                             \({                                                                     \       struct pt_regs *__regs__;                                       \       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \       __regs__ - 1;                                                   \})#define KSTK_EIP(task) (task_pt_regs(task)->eip)#define KSTK_ESP(task) (task_pt_regs(task)->esp)struct microcode_header {	unsigned int hdrver;	unsigned int rev;	unsigned int date;	unsigned int sig;	unsigned int cksum;	unsigned int ldrver;	unsigned int pf;	unsigned int datasize;	unsigned int totalsize;	unsigned int reserved[3];};struct microcode {	struct microcode_header hdr;	unsigned int bits[0];};typedef struct microcode microcode_t;typedef struct microcode_header microcode_header_t;/* microcode format is extended from prescott processors */struct extended_signature {	unsigned int sig;	unsigned int pf;	unsigned int cksum;};struct extended_sigtable {	unsigned int count;	unsigned int cksum;	unsigned int reserved[3];	struct extended_signature sigs[0];};/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */static inline void rep_nop(void){	__asm__ __volatile__("rep;nop": : :"memory");}#define cpu_relax()	rep_nop()static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread){	tss->x86_tss.esp0 = thread->esp0;	/* This can only happen when SEP is enabled, no need to test "SEP"arately */	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {		tss->x86_tss.ss1 = thread->sysenter_cs;		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);	}}static inline unsigned long native_get_debugreg(int regno){	unsigned long val = 0; 	/* Damn you, gcc! */	switch (regno) {	case 0:		asm("movl %%db0, %0" :"=r" (val)); break;	case 1:		asm("movl %%db1, %0" :"=r" (val)); break;	case 2:		asm("movl %%db2, %0" :"=r" (val)); break;	case 3:		asm("movl %%db3, %0" :"=r" (val)); break;	case 6:		asm("movl %%db6, %0" :"=r" (val)); break;	case 7:		asm("movl %%db7, %0" :"=r" (val)); break;	default:		BUG();	}	return val;}static inline void native_set_debugreg(int regno, unsigned long value){	switch (regno) {	case 0:		asm("movl %0,%%db0"	: /* no output */ :"r" (value));		break;	case 1:		asm("movl %0,%%db1"	: /* no output */ :"r" (value));		break;	case 2:		asm("movl %0,%%db2"	: /* no output */ :"r" (value));		break;	case 3:		asm("movl %0,%%db3"	: /* no output */ :"r" (value));		break;	case 6:		asm("movl %0,%%db6"	: /* no output */ :"r" (value));		break;	case 7:		asm("movl %0,%%db7"	: /* no output */ :"r" (value));		break;	default:		BUG();	}}/* * Set IOPL bits in EFLAGS from given mask */static inline void native_set_iopl_mask(unsigned mask){	unsigned int reg;	__asm__ __volatile__ ("pushfl;"			      "popl %0;"			      "andl %1, %0;"			      "orl %2, %0;"			      "pushl %0;"			      "popfl"				: "=&r" (reg)				: "i" (~X86_EFLAGS_IOPL), "r" (mask));}#ifdef CONFIG_PARAVIRT#include <asm/paravirt.h>#else#define paravirt_enabled() 0#define __cpuid native_cpuidstatic inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread){	native_load_esp0(tss, thread);}/* * These special macros can be used to get or set a debugging register */#define get_debugreg(var, register)				\	(var) = native_get_debugreg(register)#define set_debugreg(value, register)				\	native_set_debugreg(register, value)#define set_iopl_mask native_set_iopl_mask#endif /* CONFIG_PARAVIRT *//* * Generic CPUID function * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx * resulting in stale register contents being returned. */static inline void cpuid(unsigned int op,			 unsigned int *eax, unsigned int *ebx,			 unsigned int *ecx, unsigned int *edx){	*eax = op;	*ecx = 0;	__cpuid(eax, ebx, ecx, edx);}/* Some CPUID calls want 'count' to be placed in ecx */static inline void cpuid_count(unsigned int op, int count,			       unsigned int *eax, unsigned int *ebx,			       unsigned int *ecx, unsigned int *edx){	*eax = op;	*ecx = count;	__cpuid(eax, ebx, ecx, edx);}/* * CPUID functions returning a single datum */static inline unsigned int cpuid_eax(unsigned int op){	unsigned int eax, ebx, ecx, edx;	cpuid(op, &eax, &ebx, &ecx, &edx);	return eax;}static inline unsigned int cpuid_ebx(unsigned int op){	unsigned int eax, ebx, ecx, edx;	cpuid(op, &eax, &ebx, &ecx, &edx);	return ebx;}static inline unsigned int cpuid_ecx(unsigned int op){	unsigned int eax, ebx, ecx, edx;	cpuid(op, &eax, &ebx, &ecx, &edx);	return ecx;}static inline unsigned int cpuid_edx(unsigned int op){	unsigned int eax, ebx, ecx, edx;	cpuid(op, &eax, &ebx, &ecx, &edx);	return edx;}/* generic versions from gas */#define GENERIC_NOP1	".byte 0x90\n"#define GENERIC_NOP2    	".byte 0x89,0xf6\n"#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4#define GENERIC_NOP6	".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"#define GENERIC_NOP7	".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"#define GENERIC_NOP8	GENERIC_NOP1 GENERIC_NOP7/* Opteron nops */#define K8_NOP1 GENERIC_NOP1#define K8_NOP2	".byte 0x66,0x90\n" #define K8_NOP3	".byte 0x66,0x66,0x90\n" #define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" #define K8_NOP5	K8_NOP3 K8_NOP2 #define K8_NOP6	K8_NOP3 K8_NOP3#define K8_NOP7	K8_NOP4 K8_NOP3#define K8_NOP8	K8_NOP4 K8_NOP4/* K7 nops *//* uses eax dependencies (arbitary choice) */#define K7_NOP1  GENERIC_NOP1#define K7_NOP2	".byte 0x8b,0xc0\n" #define K7_NOP3	".byte 0x8d,0x04,0x20\n"#define K7_NOP4	".byte 0x8d,0x44,0x20,0x00\n"#define K7_NOP5	K7_NOP4 ASM_NOP1#define K7_NOP6	".byte 0x8d,0x80,0,0,0,0\n"#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"#define K7_NOP8        K7_NOP7 ASM_NOP1/* P6 nops *//* uses eax dependencies (Intel-recommended choice) */#define P6_NOP1	GENERIC_NOP1#define P6_NOP2	".byte 0x66,0x90\n"#define P6_NOP3	".byte 0x0f,0x1f,0x00\n"#define P6_NOP4	".byte 0x0f,0x1f,0x40,0\n"#define P6_NOP5	".byte 0x0f,0x1f,0x44,0x00,0\n"#define P6_NOP6	".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"#define P6_NOP7	".byte 0x0f,0x1f,0x80,0,0,0,0\n"#define P6_NOP8	".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"#ifdef CONFIG_MK8#define ASM_NOP1 K8_NOP1#define ASM_NOP2 K8_NOP2#define ASM_NOP3 K8_NOP3#define ASM_NOP4 K8_NOP4#define ASM_NOP5 K8_NOP5#define ASM_NOP6 K8_NOP6#define ASM_NOP7 K8_NOP7#define ASM_NOP8 K8_NOP8#elif defined(CONFIG_MK7)#define ASM_NOP1 K7_NOP1#define ASM_NOP2 K7_NOP2#define ASM_NOP3 K7_NOP3#define ASM_NOP4 K7_NOP4#define ASM_NOP5 K7_NOP5#define ASM_NOP6 K7_NOP6#define ASM_NOP7 K7_NOP7#define ASM_NOP8 K7_NOP8#elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \      defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \      defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)#define ASM_NOP1 P6_NOP1#define ASM_NOP2 P6_NOP2#define ASM_NOP3 P6_NOP3#define ASM_NOP4 P6_NOP4#define ASM_NOP5 P6_NOP5#define ASM_NOP6 P6_NOP6#define ASM_NOP7 P6_NOP7#define ASM_NOP8 P6_NOP8#else#define ASM_NOP1 GENERIC_NOP1#define ASM_NOP2 GENERIC_NOP2#define ASM_NOP3 GENERIC_NOP3#define ASM_NOP4 GENERIC_NOP4#define ASM_NOP5 GENERIC_NOP5#define ASM_NOP6 GENERIC_NOP6#define ASM_NOP7 GENERIC_NOP7#define ASM_NOP8 GENERIC_NOP8#endif#define ASM_NOP_MAX 8/* Prefetch instructions for Pentium III and AMD Athlon *//* It's not worth to care about 3dnow! prefetches for the K6   because they are microcoded there and very slow.   However we don't do prefetches for pre XP Athlons currently   That should be fixed. */#define ARCH_HAS_PREFETCHstatic inline void prefetch(const void *x){	alternative_input(ASM_NOP4,			  "prefetchnta (%1)",			  X86_FEATURE_XMM,			  "r" (x));}#define ARCH_HAS_PREFETCH#define ARCH_HAS_PREFETCHW#define ARCH_HAS_SPINLOCK_PREFETCH/* 3dnow! prefetch to get an exclusive cache line. Useful for    spinlocks to avoid one state transition in the cache coherency protocol. */static inline void prefetchw(const void *x){	alternative_input(ASM_NOP4,			  "prefetchw (%1)",			  X86_FEATURE_3DNOW,			  "r" (x));}#define spin_lock_prefetch(x)	prefetchw(x)extern void select_idle_routine(const struct cpuinfo_x86 *c);#define cache_line_size() (boot_cpu_data.x86_cache_alignment)extern unsigned long boot_option_idle_override;extern void enable_sep_cpu(void);extern int sysenter_setup(void);/* Defined in head.S */extern struct Xgt_desc_struct early_gdt_descr;extern void cpu_set_gdt(int);extern void switch_to_new_gdt(void);extern void cpu_init(void);extern void init_gdt(int cpu);extern int force_mwait;#endif /* __ASM_I386_PROCESSOR_H */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?