processor.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,497 行 · 第 1/5 页

C
2,497
字号
		state.pointer = px;		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));		status = acpi_extract_package(&(pss->package.elements[i]), 			&format, &state);		if (ACPI_FAILURE(status)) {			ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n"));			result = -EFAULT;			kfree(pr->performance->states);			goto end;		}		ACPI_DEBUG_PRINT((ACPI_DB_INFO, 			"State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",			i, 			(u32) px->core_frequency, 			(u32) px->power, 			(u32) px->transition_latency, 			(u32) px->bus_master_latency,			(u32) px->control, 			(u32) px->status));		if (!px->core_frequency) {			ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data: freq is zero\n"));			result = -EFAULT;			kfree(pr->performance->states);			goto end;		}	}end:	acpi_os_free(buffer.pointer);	return_VALUE(result);}static intacpi_processor_get_performance_info (	struct acpi_processor	*pr){	int			result = 0;	acpi_status		status = AE_OK;	acpi_handle		handle = NULL;	ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info");	if (!pr || !pr->performance || !pr->handle)		return_VALUE(-EINVAL);	acpi_processor_set_pdc(pr);	status = acpi_get_handle(pr->handle, "_PCT", &handle);	if (ACPI_FAILURE(status)) {		ACPI_DEBUG_PRINT((ACPI_DB_INFO, 			"ACPI-based processor performance control unavailable\n"));		return_VALUE(-ENODEV);	}	result = acpi_processor_get_performance_control(pr);	if (result)		return_VALUE(result);	result = acpi_processor_get_performance_states(pr);	if (result)		return_VALUE(result);	result = acpi_processor_get_platform_limit(pr);	if (result)		return_VALUE(result);	return_VALUE(0);}#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF/* /proc/acpi/processor/../performance interface (DEPRECATED) */static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);static struct file_operations acpi_processor_perf_fops = {	.open 		= acpi_processor_perf_open_fs,	.read		= seq_read,	.llseek		= seq_lseek,	.release	= single_release,};static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset){	struct acpi_processor	*pr = (struct acpi_processor *)seq->private;	int			i = 0;	ACPI_FUNCTION_TRACE("acpi_processor_perf_seq_show");	if (!pr)		goto end;	if (!pr->performance) {		seq_puts(seq, "<not supported>\n");		goto end;	}	seq_printf(seq, "state count:             %d\n"			"active state:            P%d\n",			pr->performance->state_count,			pr->performance->state);	seq_puts(seq, "states:\n");	for (i = 0; i < pr->performance->state_count; i++)		seq_printf(seq, "   %cP%d:                  %d MHz, %d mW, %d uS\n",			(i == pr->performance->state?'*':' '), i,			(u32) pr->performance->states[i].core_frequency,			(u32) pr->performance->states[i].power,			(u32) pr->performance->states[i].transition_latency);end:	return 0;}static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file){	return single_open(file, acpi_processor_perf_seq_show,						PDE(inode)->data);}static ssize_tacpi_processor_write_performance (        struct file		*file,        const char		__user *buffer,        size_t			count,        loff_t			*data){	int			result = 0;	struct seq_file		*m = (struct seq_file *) file->private_data;	struct acpi_processor	*pr = (struct acpi_processor *) m->private;	struct acpi_processor_performance *perf;	char			state_string[12] = {'\0'};	unsigned int            new_state = 0;	struct cpufreq_policy   policy;	ACPI_FUNCTION_TRACE("acpi_processor_write_performance");	if (!pr || (count > sizeof(state_string) - 1))		return_VALUE(-EINVAL);	perf = pr->performance;	if (!perf)		return_VALUE(-EINVAL);		if (copy_from_user(state_string, buffer, count))		return_VALUE(-EFAULT);		state_string[count] = '\0';	new_state = simple_strtoul(state_string, NULL, 0);	if (new_state >= perf->state_count)		return_VALUE(-EINVAL);	cpufreq_get_policy(&policy, pr->id);	policy.cpu = pr->id;	policy.min = perf->states[new_state].core_frequency * 1000;	policy.max = perf->states[new_state].core_frequency * 1000;	result = cpufreq_set_policy(&policy);	if (result)		return_VALUE(result);	return_VALUE(count);}static voidacpi_cpufreq_add_file (	struct acpi_processor *pr){	struct proc_dir_entry	*entry = NULL;	struct acpi_device	*device = NULL;	ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");	if (acpi_bus_get_device(pr->handle, &device))		return_VOID;	/* add file 'performance' [R/W] */	entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,		  S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device));	if (!entry)		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,			"Unable to create '%s' fs entry\n",			ACPI_PROCESSOR_FILE_PERFORMANCE));	else {		entry->proc_fops = &acpi_processor_perf_fops;		entry->proc_fops->write = acpi_processor_write_performance;		entry->data = acpi_driver_data(device);		entry->owner = THIS_MODULE;	}	return_VOID;}static voidacpi_cpufreq_remove_file (	struct acpi_processor *pr){	struct acpi_device	*device = NULL;	ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");	if (acpi_bus_get_device(pr->handle, &device))		return_VOID;	/* remove file 'performance' */	remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,		  acpi_device_dir(device));	return_VOID;}#elsestatic void acpi_cpufreq_add_file (struct acpi_processor *pr) { return; }static void acpi_cpufreq_remove_file (struct acpi_processor *pr) { return; }#endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */int acpi_processor_register_performance (	struct acpi_processor_performance * performance,	unsigned int cpu){	struct acpi_processor *pr;	ACPI_FUNCTION_TRACE("acpi_processor_register_performance");	if (!acpi_processor_ppc_is_init)		return_VALUE(-EINVAL);	down(&performance_sem);	pr = processors[cpu];	if (!pr) {		up(&performance_sem);		return_VALUE(-ENODEV);	}	if (pr->performance) {		up(&performance_sem);		return_VALUE(-EBUSY);	}	pr->performance = performance;	if (acpi_processor_get_performance_info(pr)) {		pr->performance = NULL;		up(&performance_sem);		return_VALUE(-EIO);	}	acpi_cpufreq_add_file(pr);	up(&performance_sem);	return_VALUE(0);}EXPORT_SYMBOL(acpi_processor_register_performance);void acpi_processor_unregister_performance (	struct acpi_processor_performance * performance,	unsigned int cpu){	struct acpi_processor *pr;	ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance");	if (!acpi_processor_ppc_is_init)		return_VOID;	down(&performance_sem);	pr = processors[cpu];	if (!pr) {		up(&performance_sem);		return_VOID;	}	kfree(pr->performance->states);	pr->performance = NULL;	acpi_cpufreq_remove_file(pr);	up(&performance_sem);	return_VOID;}EXPORT_SYMBOL(acpi_processor_unregister_performance);/* for the rest of it, check arch/i386/kernel/cpu/cpufreq/acpi.c */#else  /* !CONFIG_CPU_FREQ */static void acpi_processor_ppc_init(void) { return; }static void acpi_processor_ppc_exit(void) { return; }static int acpi_processor_ppc_has_changed(struct acpi_processor *pr) {	static unsigned int printout = 1;	if (printout) {		printk(KERN_WARNING "Warning: Processor Platform Limit event detected, but not handled.\n");		printk(KERN_WARNING "Consider compiling CPUfreq support into your kernel.\n");		printout = 0;	}	return 0;}#endif /* CONFIG_CPU_FREQ *//* --------------------------------------------------------------------------                              Throttling Control   -------------------------------------------------------------------------- */static intacpi_processor_get_throttling (	struct acpi_processor	*pr){	int			state = 0;	u32			value = 0;	u32			duty_mask = 0;	u32			duty_value = 0;	ACPI_FUNCTION_TRACE("acpi_processor_get_throttling");	if (!pr)		return_VALUE(-EINVAL);	if (!pr->flags.throttling)		return_VALUE(-ENODEV);	pr->throttling.state = 0;	local_irq_disable();	duty_mask = pr->throttling.state_count - 1;	duty_mask <<= pr->throttling.duty_offset;	value = inl(pr->throttling.address);	/*	 * Compute the current throttling state when throttling is enabled	 * (bit 4 is on).	 */	if (value & 0x10) {		duty_value = value & duty_mask;		duty_value >>= pr->throttling.duty_offset;		if (duty_value)			state = pr->throttling.state_count-duty_value;	}	pr->throttling.state = state;	local_irq_enable();	ACPI_DEBUG_PRINT((ACPI_DB_INFO, 		"Throttling state is T%d (%d%% throttling applied)\n",		state, pr->throttling.states[state].performance));	return_VALUE(0);}static intacpi_processor_set_throttling (	struct acpi_processor	*pr,	int			state){	u32                     value = 0;	u32                     duty_mask = 0;	u32                     duty_value = 0;	ACPI_FUNCTION_TRACE("acpi_processor_set_throttling");	if (!pr)		return_VALUE(-EINVAL);	if ((state < 0) || (state > (pr->throttling.state_count - 1)))		return_VALUE(-EINVAL);	if (!pr->flags.throttling)		return_VALUE(-ENODEV);	if (state == pr->throttling.state)		return_VALUE(0);	local_irq_disable();	/*	 * Calculate the duty_value and duty_mask.	 */	if (state) {		duty_value = pr->throttling.state_count - state;		duty_value <<= pr->throttling.duty_offset;		/* Used to clear all duty_value bits */		duty_mask = pr->throttling.state_count - 1;		duty_mask <<= acpi_fadt.duty_offset;		duty_mask = ~duty_mask;	}	/*	 * Disable throttling by writing a 0 to bit 4.  Note that we must	 * turn it off before you can change the duty_value.	 */	value = inl(pr->throttling.address);	if (value & 0x10) {		value &= 0xFFFFFFEF;		outl(value, pr->throttling.address);	}	/*	 * Write the new duty_value and then enable throttling.  Note	 * that a state value of 0 leaves throttling disabled.	 */	if (state) {		value &= duty_mask;		value |= duty_value;		outl(value, pr->throttling.address);		value |= 0x00000010;		outl(value, pr->throttling.address);	}	pr->throttling.state = state;	local_irq_enable();	ACPI_DEBUG_PRINT((ACPI_DB_INFO, 		"Throttling state set to T%d (%d%%)\n", state, 		(pr->throttling.states[state].performance?pr->throttling.states[state].performance/10:0)));	return_VALUE(0);}static intacpi_processor_get_throttling_info (	struct acpi_processor	*pr){	int			result = 0;	int			step = 0;	int			i = 0;	ACPI_FUNCTION_TRACE("acpi_processor_get_throttling_info");	ACPI_DEBUG_PRINT((ACPI_DB_INFO,		"pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",		pr->throttling.address,		pr->throttling.duty_offset,		pr->throttling.duty_width));	if (!pr)		return_VALUE(-EINVAL);	/* TBD: Support ACPI 2.0 objects */	if (!pr->throttling.address) {		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));		return_VALUE(0);	}	else if (!pr->throttling.duty_width) {		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));		return_VALUE(0);	}	/* TBD: Support duty_cycle values that span bit 4. */	else if ((pr->throttling.duty_offset		+ pr->throttling.duty_width) > 4) {		ACPI_DEBUG_PRINT((ACPI_DB_WARN, "duty_cycle spans bit 4\n"));		return_VALUE(0);	}	/*	 * PIIX4 Errata: We don't support throttling on the original PIIX4.	 * This shouldn't be an issue as few (if any) mobile systems ever	 * used this part.	 */	if (errata.piix4.throttle) {		ACPI_DEBUG_PRINT((ACPI_DB_INFO, 			"Throttling not supported on PIIX4 A- or B-step\n"));		return_VALUE(0);	}	pr->throttling.state_count = 1 << acpi_fadt.duty_width;	/*	 * Compute state values. Note that throttling displays a linear power/	 * performance relationship (at 50% performance the CPU will consume	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.	 */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?