⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 speedstep-centrino.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	set_cpus_allowed(current, cpumask_of_cpu(cpu));	if (smp_processor_id() != cpu)		return 0;	rdmsr(MSR_IA32_PERF_STATUS, l, h);	clock_freq = extract_clock(l, cpu, 0);	if (unlikely(clock_freq == 0)) {		/*		 * On some CPUs, we can see transient MSR values (which are		 * not present in _PSS), while CPU is doing some automatic		 * P-state transition (like TM2). Get the last freq set 		 * in PERF_CTL.		 */		rdmsr(MSR_IA32_PERF_CTL, l, h);		clock_freq = extract_clock(l, cpu, 1);	}	set_cpus_allowed(current, saved_mask);	return clock_freq;}static int centrino_cpu_init(struct cpufreq_policy *policy){	struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);	unsigned freq;	unsigned l, h;	int ret;	int i;	/* Only Intel makes Enhanced Speedstep-capable CPUs */	if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))		return -ENODEV;	if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))		centrino_driver.flags |= CPUFREQ_CONST_LOOPS;	if (policy->cpu != 0)		return -ENODEV;	for (i = 0; i < N_IDS; i++)		if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))			break;	if (i != N_IDS)		centrino_cpu[policy->cpu] = &cpu_ids[i];	if (!centrino_cpu[policy->cpu]) {		dprintk("found unsupported CPU with "		"Enhanced SpeedStep: send /proc/cpuinfo to "		MAINTAINER "\n");		return -ENODEV;	}	if (centrino_cpu_init_table(policy)) {		return -ENODEV;	}	/* Check to see if Enhanced SpeedStep is enabled, and try to	   enable it if not. */	rdmsr(MSR_IA32_MISC_ENABLE, l, h);	if (!(l & (1<<16))) {		l |= (1<<16);		dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);		wrmsr(MSR_IA32_MISC_ENABLE, l, h);		/* check to see if it stuck */		rdmsr(MSR_IA32_MISC_ENABLE, l, h);		if (!(l & (1<<16))) {			printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n");			return -ENODEV;		}	}	freq = get_cur_freq(policy->cpu);	policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */	policy->cur = freq;	dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur);	ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points);	if (ret)		return (ret);	cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu);	return 0;}static int centrino_cpu_exit(struct cpufreq_policy *policy){	unsigned int cpu = policy->cpu;	if (!centrino_model[cpu])		return -ENODEV;	cpufreq_frequency_table_put_attr(cpu);	centrino_model[cpu] = NULL;	return 0;}/** * centrino_verify - verifies a new CPUFreq policy * @policy: new policy * * Limit must be within this model's frequency range at least one * border included. */static int centrino_verify (struct cpufreq_policy *policy){	return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points);}/** * centrino_setpolicy - set a new CPUFreq policy * @policy: new policy * @target_freq: the target frequency * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) * * Sets a new CPUFreq policy. */static int centrino_target (struct cpufreq_policy *policy,			    unsigned int target_freq,			    unsigned int relation){	unsigned int    newstate = 0;	unsigned int	msr, oldmsr = 0, h = 0, cpu = policy->cpu;	struct cpufreq_freqs	freqs;	cpumask_t		online_policy_cpus;	cpumask_t		saved_mask;	cpumask_t		set_mask;	cpumask_t		covered_cpus;	int			retval = 0;	unsigned int		j, k, first_cpu, tmp;	if (unlikely(centrino_model[cpu] == NULL))		return -ENODEV;	if (unlikely(cpufreq_frequency_table_target(policy,			centrino_model[cpu]->op_points,			target_freq,			relation,			&newstate))) {		return -EINVAL;	}#ifdef CONFIG_HOTPLUG_CPU	/* cpufreq holds the hotplug lock, so we are safe from here on */	cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);#else	online_policy_cpus = policy->cpus;#endif	saved_mask = current->cpus_allowed;	first_cpu = 1;	cpus_clear(covered_cpus);	for_each_cpu_mask(j, online_policy_cpus) {		/*		 * Support for SMP systems.		 * Make sure we are running on CPU that wants to change freq		 */		cpus_clear(set_mask);		if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)			cpus_or(set_mask, set_mask, online_policy_cpus);		else			cpu_set(j, set_mask);		set_cpus_allowed(current, set_mask);		preempt_disable();		if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {			dprintk("couldn't limit to CPUs in this domain\n");			retval = -EAGAIN;			if (first_cpu) {				/* We haven't started the transition yet. */				goto migrate_end;			}			preempt_enable();			break;		}		msr = centrino_model[cpu]->op_points[newstate].index;		if (first_cpu) {			rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);			if (msr == (oldmsr & 0xffff)) {				dprintk("no change needed - msr was and needs "					"to be %x\n", oldmsr);				retval = 0;				goto migrate_end;			}			freqs.old = extract_clock(oldmsr, cpu, 0);			freqs.new = extract_clock(msr, cpu, 0);			dprintk("target=%dkHz old=%d new=%d msr=%04x\n",				target_freq, freqs.old, freqs.new, msr);			for_each_cpu_mask(k, online_policy_cpus) {				freqs.cpu = k;				cpufreq_notify_transition(&freqs,					CPUFREQ_PRECHANGE);			}			first_cpu = 0;			/* all but 16 LSB are reserved, treat them with care */			oldmsr &= ~0xffff;			msr &= 0xffff;			oldmsr |= msr;		}		wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);		if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {			preempt_enable();			break;		}		cpu_set(j, covered_cpus);		preempt_enable();	}	for_each_cpu_mask(k, online_policy_cpus) {		freqs.cpu = k;		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);	}	if (unlikely(retval)) {		/*		 * We have failed halfway through the frequency change.		 * We have sent callbacks to policy->cpus and		 * MSRs have already been written on coverd_cpus.		 * Best effort undo..		 */		if (!cpus_empty(covered_cpus)) {			for_each_cpu_mask(j, covered_cpus) {				set_cpus_allowed(current, cpumask_of_cpu(j));				wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);			}		}		tmp = freqs.new;		freqs.new = freqs.old;		freqs.old = tmp;		for_each_cpu_mask(j, online_policy_cpus) {			freqs.cpu = j;			cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);			cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);		}	}	set_cpus_allowed(current, saved_mask);	return 0;migrate_end:	preempt_enable();	set_cpus_allowed(current, saved_mask);	return 0;}static struct freq_attr* centrino_attr[] = {	&cpufreq_freq_attr_scaling_available_freqs,	NULL,};static struct cpufreq_driver centrino_driver = {	.name		= "centrino", /* should be speedstep-centrino,					 but there's a 16 char limit */	.init		= centrino_cpu_init,	.exit		= centrino_cpu_exit,	.verify		= centrino_verify,	.target		= centrino_target,	.get		= get_cur_freq,	.attr           = centrino_attr,	.owner		= THIS_MODULE,};/** * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver * * Initializes the Enhanced SpeedStep support. Returns -ENODEV on * unsupported devices, -ENOENT if there's no voltage table for this * particular CPU model, -EINVAL on problems during initiatization, * and zero on success. * * This is quite picky.  Not only does the CPU have to advertise the * "est" flag in the cpuid capability flags, we look for a specific * CPU model and stepping, and we need to have the exact model name in * our voltage tables.  That is, be paranoid about not releasing * someone's valuable magic smoke. */static int __init centrino_init(void){	struct cpuinfo_x86 *cpu = &cpu_data(0);	if (!cpu_has(cpu, X86_FEATURE_EST))		return -ENODEV;	return cpufreq_register_driver(&centrino_driver);}static void __exit centrino_exit(void){	cpufreq_unregister_driver(&centrino_driver);}MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors.");MODULE_LICENSE ("GPL");late_initcall(centrino_init);module_exit(centrino_exit);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -