⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 common.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
{	int i;	c->loops_per_jiffy = loops_per_jiffy;	c->x86_cache_size = -1;	c->x86_vendor = X86_VENDOR_UNKNOWN;	c->cpuid_level = -1;	/* CPUID not detected */	c->x86_model = c->x86_mask = 0;	/* So far unknown... */	c->x86_vendor_id[0] = '\0'; /* Unset */	c->x86_model_id[0] = '\0';  /* Unset */	c->x86_max_cores = 1;	memset(&c->x86_capability, 0, sizeof c->x86_capability);	if (!have_cpuid_p()) {		/* First of all, decide if this is a 486 or higher */		/* It's a 486 if we can modify the AC flag */		if ( flag_is_changeable_p(X86_EFLAGS_AC) )			c->x86 = 4;		else			c->x86 = 3;	}	generic_identify(c);	printk(KERN_DEBUG "CPU: After generic identify, caps:");	for (i = 0; i < NCAPINTS; i++)		printk(" %08lx", c->x86_capability[i]);	printk("\n");	if (this_cpu->c_identify) {		this_cpu->c_identify(c);		printk(KERN_DEBUG "CPU: After vendor identify, caps:");		for (i = 0; i < NCAPINTS; i++)			printk(" %08lx", c->x86_capability[i]);		printk("\n");	}	/*	 * Vendor-specific initialization.  In this section we	 * canonicalize the feature flags, meaning if there are	 * features a certain CPU supports which CPUID doesn't	 * tell us, CPUID claiming incorrect flags, or other bugs,	 * we handle them here.	 *	 * At the end of this section, c->x86_capability better	 * indicate the features this CPU genuinely supports!	 */	if (this_cpu->c_init)		this_cpu->c_init(c);	/* Disable the PN if appropriate */	squash_the_stupid_serial_number(c);	/*	 * The vendor-specific functions might have changed features.  Now	 * we do "generic changes."	 */	/* TSC disabled? */	if ( tsc_disable )		clear_bit(X86_FEATURE_TSC, c->x86_capability);	/* FXSR disabled? */	if (disable_x86_fxsr) {		clear_bit(X86_FEATURE_FXSR, c->x86_capability);		clear_bit(X86_FEATURE_XMM, c->x86_capability);	}	if (disable_pse)		clear_bit(X86_FEATURE_PSE, c->x86_capability);	/* If the model name is still unset, do table lookup. */	if ( !c->x86_model_id[0] ) {		char *p;		p = table_lookup_model(c);		if ( p )			strcpy(c->x86_model_id, p);		else			/* Last resort... */			sprintf(c->x86_model_id, "%02x/%02x",				c->x86_vendor, c->x86_model);	}	/* Now the feature flags better reflect actual CPU features! */	printk(KERN_DEBUG "CPU: After all inits, caps:");	for (i = 0; i < NCAPINTS; i++)		printk(" %08lx", c->x86_capability[i]);	printk("\n");	/*	 * On SMP, boot_cpu_data holds the common feature set between	 * all CPUs; so make sure that we indicate which features are	 * common between the CPUs.  The first time this routine gets	 * executed, c == &boot_cpu_data.	 */	if ( c != &boot_cpu_data ) {		/* AND the already accumulated flags with these */		for ( i = 0 ; i < NCAPINTS ; i++ )			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];	}	/* Init Machine Check Exception if available. */	mcheck_init(c);	if (c == &boot_cpu_data)		sysenter_setup();	enable_sep_cpu();	if (c == &boot_cpu_data)		mtrr_bp_init();	else		mtrr_ap_init();}#ifdef CONFIG_X86_HTvoid __devinit detect_ht(struct cpuinfo_x86 *c){	u32 	eax, ebx, ecx, edx;	int 	index_msb, core_bits;	int 	cpu = smp_processor_id();	cpuid(1, &eax, &ebx, &ecx, &edx);	c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);	if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))		return;	smp_num_siblings = (ebx & 0xff0000) >> 16;	if (smp_num_siblings == 1) {		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");	} else if (smp_num_siblings > 1 ) {		if (smp_num_siblings > NR_CPUS) {			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);			smp_num_siblings = 1;			return;		}		index_msb = get_count_order(smp_num_siblings);		phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",		       phys_proc_id[cpu]);		smp_num_siblings = smp_num_siblings / c->x86_max_cores;		index_msb = get_count_order(smp_num_siblings) ;		core_bits = get_count_order(c->x86_max_cores);		cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &					       ((1 << core_bits) - 1);		if (c->x86_max_cores > 1)			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",			       cpu_core_id[cpu]);	}}#endifvoid __devinit print_cpu_info(struct cpuinfo_x86 *c){	char *vendor = NULL;	if (c->x86_vendor < X86_VENDOR_NUM)		vendor = this_cpu->c_vendor;	else if (c->cpuid_level >= 0)		vendor = c->x86_vendor_id;	if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))		printk("%s ", vendor);	if (!c->x86_model_id[0])		printk("%d86", c->x86);	else		printk("%s", c->x86_model_id);	if (c->x86_mask || c->cpuid_level >= 0) 		printk(" stepping %02x\n", c->x86_mask);	else		printk("\n");}cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE;/* This is hacky. :) * We're emulating future behavior. * In the future, the cpu-specific init functions will be called implicitly * via the magic of initcalls. * They will insert themselves into the cpu_devs structure. * Then, when cpu_init() is called, we can just iterate over that array. */extern int intel_cpu_init(void);extern int cyrix_init_cpu(void);extern int nsc_init_cpu(void);extern int amd_init_cpu(void);extern int centaur_init_cpu(void);extern int transmeta_init_cpu(void);extern int rise_init_cpu(void);extern int nexgen_init_cpu(void);extern int umc_init_cpu(void);void __init early_cpu_init(void){	intel_cpu_init();	cyrix_init_cpu();	nsc_init_cpu();	amd_init_cpu();	centaur_init_cpu();	transmeta_init_cpu();	rise_init_cpu();	nexgen_init_cpu();	umc_init_cpu();	early_cpu_detect();#ifdef CONFIG_DEBUG_PAGEALLOC	/* pse is not compatible with on-the-fly unmapping,	 * disable it even if the cpus claim to support it.	 */	clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);	disable_pse = 1;#endif}/* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT * and IDT. We reload them nevertheless, this function acts as a * 'CPU state barrier', nothing should get across. */void __devinit cpu_init(void){	int cpu = smp_processor_id();	struct tss_struct * t = &per_cpu(init_tss, cpu);	struct thread_struct *thread = &current->thread;	struct desc_struct *gdt = get_cpu_gdt_table(cpu);	__u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);	if (cpu_test_and_set(cpu, cpu_initialized)) {		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);		for (;;) local_irq_enable();	}	printk(KERN_INFO "Initializing CPU#%d\n", cpu);	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);	if (tsc_disable && cpu_has_tsc) {		printk(KERN_NOTICE "Disabling TSC...\n");		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);		set_in_cr4(X86_CR4_TSD);	}	/*	 * Initialize the per-CPU GDT with the boot GDT,	 * and set up the GDT descriptor:	 */ 	memcpy(gdt, cpu_gdt_table, GDT_SIZE);	/* Set up GDT entry for 16bit stack */ 	*(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=		((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |		((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |		(CPU_16BIT_STACK_SIZE - 1);	cpu_gdt_descr[cpu].size = GDT_SIZE - 1; 	cpu_gdt_descr[cpu].address = (unsigned long)gdt;	load_gdt(&cpu_gdt_descr[cpu]);	load_idt(&idt_descr);	/*	 * Delete NT	 */	__asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");	/*	 * Set up and load the per-CPU TSS and LDT	 */	atomic_inc(&init_mm.mm_count);	current->active_mm = &init_mm;	if (current->mm)		BUG();	enter_lazy_tlb(&init_mm, current);	load_esp0(t, thread);	set_tss_desc(cpu,t);	load_TR_desc();	load_LDT(&init_mm.context);	/* Set up doublefault TSS pointer in the GDT */	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);	/* Clear %fs and %gs. */	asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");	/* Clear all 6 debug registers: */	set_debugreg(0, 0);	set_debugreg(0, 1);	set_debugreg(0, 2);	set_debugreg(0, 3);	set_debugreg(0, 6);	set_debugreg(0, 7);	/*	 * Force FPU initialization:	 */	current_thread_info()->status = 0;	clear_used_math();	mxcsr_feature_mask_init();}#ifdef CONFIG_HOTPLUG_CPUvoid __devinit cpu_uninit(void){	int cpu = raw_smp_processor_id();	cpu_clear(cpu, cpu_initialized);	/* lazy TLB state */	per_cpu(cpu_tlbstate, cpu).state = 0;	per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -