⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 common.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
{	if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {		/* Disable processor serial number */		unsigned long lo,hi;		rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);		lo |= 0x200000;		wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);		printk(KERN_NOTICE "CPU serial number disabled.\n");		clear_bit(X86_FEATURE_PN, c->x86_capability);		/* Disabling the serial number may affect the cpuid level */		c->cpuid_level = cpuid_eax(0);	}}static int __init x86_serial_nr_setup(char *s){	disable_x86_serial_nr = 0;	return 1;}__setup("serialnumber", x86_serial_nr_setup);/* * This does the hard work of actually picking apart the CPU stuff... */static void __cpuinit identify_cpu(struct cpuinfo_x86 *c){	int i;	c->loops_per_jiffy = loops_per_jiffy;	c->x86_cache_size = -1;	c->x86_vendor = X86_VENDOR_UNKNOWN;	c->cpuid_level = -1;	/* CPUID not detected */	c->x86_model = c->x86_mask = 0;	/* So far unknown... */	c->x86_vendor_id[0] = '\0'; /* Unset */	c->x86_model_id[0] = '\0';  /* Unset */	c->x86_max_cores = 1;	c->x86_clflush_size = 32;	memset(&c->x86_capability, 0, sizeof c->x86_capability);	if (!have_cpuid_p()) {		/* First of all, decide if this is a 486 or higher */		/* It's a 486 if we can modify the AC flag */		if ( flag_is_changeable_p(X86_EFLAGS_AC) )			c->x86 = 4;		else			c->x86 = 3;	}	generic_identify(c);	printk(KERN_DEBUG "CPU: After generic identify, caps:");	for (i = 0; i < NCAPINTS; i++)		printk(" %08lx", c->x86_capability[i]);	printk("\n");	if (this_cpu->c_identify) {		this_cpu->c_identify(c);		printk(KERN_DEBUG "CPU: After vendor identify, caps:");		for (i = 0; i < NCAPINTS; i++)			printk(" %08lx", c->x86_capability[i]);		printk("\n");	}	/*	 * Vendor-specific initialization.  In this section we	 * canonicalize the feature flags, meaning if there are	 * features a certain CPU supports which CPUID doesn't	 * tell us, CPUID claiming incorrect flags, or other bugs,	 * we handle them here.	 *	 * At the end of this section, c->x86_capability better	 * indicate the features this CPU genuinely supports!	 */	if (this_cpu->c_init)		this_cpu->c_init(c);	/* Disable the PN if appropriate */	squash_the_stupid_serial_number(c);	/*	 * The vendor-specific functions might have changed features.  Now	 * we do "generic changes."	 */	/* TSC disabled? */	if ( tsc_disable )		clear_bit(X86_FEATURE_TSC, c->x86_capability);	/* FXSR disabled? */	if (disable_x86_fxsr) {		clear_bit(X86_FEATURE_FXSR, c->x86_capability);		clear_bit(X86_FEATURE_XMM, c->x86_capability);	}	/* SEP disabled? */	if (disable_x86_sep)		clear_bit(X86_FEATURE_SEP, c->x86_capability);	if (disable_pse)		clear_bit(X86_FEATURE_PSE, c->x86_capability);	/* If the model name is still unset, do table lookup. */	if ( !c->x86_model_id[0] ) {		char *p;		p = table_lookup_model(c);		if ( p )			strcpy(c->x86_model_id, p);		else			/* Last resort... */			sprintf(c->x86_model_id, "%02x/%02x",				c->x86, c->x86_model);	}	/* Now the feature flags better reflect actual CPU features! */	printk(KERN_DEBUG "CPU: After all inits, caps:");	for (i = 0; i < NCAPINTS; i++)		printk(" %08lx", c->x86_capability[i]);	printk("\n");	/*	 * On SMP, boot_cpu_data holds the common feature set between	 * all CPUs; so make sure that we indicate which features are	 * common between the CPUs.  The first time this routine gets	 * executed, c == &boot_cpu_data.	 */	if ( c != &boot_cpu_data ) {		/* AND the already accumulated flags with these */		for ( i = 0 ; i < NCAPINTS ; i++ )			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];	}	/* Init Machine Check Exception if available. */	mcheck_init(c);}void __init identify_boot_cpu(void){	identify_cpu(&boot_cpu_data);	sysenter_setup();	enable_sep_cpu();	mtrr_bp_init();}void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c){	BUG_ON(c == &boot_cpu_data);	identify_cpu(c);	enable_sep_cpu();	mtrr_ap_init();}#ifdef CONFIG_X86_HTvoid __cpuinit detect_ht(struct cpuinfo_x86 *c){	u32 	eax, ebx, ecx, edx;	int 	index_msb, core_bits;	cpuid(1, &eax, &ebx, &ecx, &edx);	if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))		return;	smp_num_siblings = (ebx & 0xff0000) >> 16;	if (smp_num_siblings == 1) {		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");	} else if (smp_num_siblings > 1 ) {		if (smp_num_siblings > NR_CPUS) {			printk(KERN_WARNING "CPU: Unsupported number of the "					"siblings %d", smp_num_siblings);			smp_num_siblings = 1;			return;		}		index_msb = get_count_order(smp_num_siblings);		c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",		       c->phys_proc_id);		smp_num_siblings = smp_num_siblings / c->x86_max_cores;		index_msb = get_count_order(smp_num_siblings) ;		core_bits = get_count_order(c->x86_max_cores);		c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &					       ((1 << core_bits) - 1);		if (c->x86_max_cores > 1)			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",			       c->cpu_core_id);	}}#endifvoid __cpuinit print_cpu_info(struct cpuinfo_x86 *c){	char *vendor = NULL;	if (c->x86_vendor < X86_VENDOR_NUM)		vendor = this_cpu->c_vendor;	else if (c->cpuid_level >= 0)		vendor = c->x86_vendor_id;	if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))		printk("%s ", vendor);	if (!c->x86_model_id[0])		printk("%d86", c->x86);	else		printk("%s", c->x86_model_id);	if (c->x86_mask || c->cpuid_level >= 0) 		printk(" stepping %02x\n", c->x86_mask);	else		printk("\n");}cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;/* This is hacky. :) * We're emulating future behavior. * In the future, the cpu-specific init functions will be called implicitly * via the magic of initcalls. * They will insert themselves into the cpu_devs structure. * Then, when cpu_init() is called, we can just iterate over that array. */extern int intel_cpu_init(void);extern int cyrix_init_cpu(void);extern int nsc_init_cpu(void);extern int amd_init_cpu(void);extern int centaur_init_cpu(void);extern int transmeta_init_cpu(void);extern int nexgen_init_cpu(void);extern int umc_init_cpu(void);void __init early_cpu_init(void){	intel_cpu_init();	cyrix_init_cpu();	nsc_init_cpu();	amd_init_cpu();	centaur_init_cpu();	transmeta_init_cpu();	nexgen_init_cpu();	umc_init_cpu();	early_cpu_detect();#ifdef CONFIG_DEBUG_PAGEALLOC	/* pse is not compatible with on-the-fly unmapping,	 * disable it even if the cpus claim to support it.	 */	clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);	disable_pse = 1;#endif}/* Make sure %fs is initialized properly in idle threads */struct pt_regs * __devinit idle_regs(struct pt_regs *regs){	memset(regs, 0, sizeof(struct pt_regs));	regs->xfs = __KERNEL_PERCPU;	return regs;}/* Current gdt points %fs at the "master" per-cpu area: after this, * it's on the real one. */void switch_to_new_gdt(void){	struct Xgt_desc_struct gdt_descr;	gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());	gdt_descr.size = GDT_SIZE - 1;	load_gdt(&gdt_descr);	asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");}/* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT * and IDT. We reload them nevertheless, this function acts as a * 'CPU state barrier', nothing should get across. */void __cpuinit cpu_init(void){	int cpu = smp_processor_id();	struct task_struct *curr = current;	struct tss_struct * t = &per_cpu(init_tss, cpu);	struct thread_struct *thread = &curr->thread;	if (cpu_test_and_set(cpu, cpu_initialized)) {		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);		for (;;) local_irq_enable();	}	printk(KERN_INFO "Initializing CPU#%d\n", cpu);	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);	if (tsc_disable && cpu_has_tsc) {		printk(KERN_NOTICE "Disabling TSC...\n");		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);		set_in_cr4(X86_CR4_TSD);	}	load_idt(&idt_descr);	switch_to_new_gdt();	/*	 * Set up and load the per-CPU TSS and LDT	 */	atomic_inc(&init_mm.mm_count);	curr->active_mm = &init_mm;	if (curr->mm)		BUG();	enter_lazy_tlb(&init_mm, curr);	load_esp0(t, thread);	set_tss_desc(cpu,t);	load_TR_desc();	load_LDT(&init_mm.context);#ifdef CONFIG_DOUBLEFAULT	/* Set up doublefault TSS pointer in the GDT */	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);#endif	/* Clear %gs. */	asm volatile ("mov %0, %%gs" : : "r" (0));	/* Clear all 6 debug registers: */	set_debugreg(0, 0);	set_debugreg(0, 1);	set_debugreg(0, 2);	set_debugreg(0, 3);	set_debugreg(0, 6);	set_debugreg(0, 7);	/*	 * Force FPU initialization:	 */	current_thread_info()->status = 0;	clear_used_math();	mxcsr_feature_mask_init();}#ifdef CONFIG_HOTPLUG_CPUvoid __cpuinit cpu_uninit(void){	int cpu = raw_smp_processor_id();	cpu_clear(cpu, cpu_initialized);	/* lazy TLB state */	per_cpu(cpu_tlbstate, cpu).state = 0;	per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -