processor.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,497 行 · 第 1/5 页
C
2,497 行
end: /* * New Cx State? * ------------- * If we're going to start using a new Cx state we must clean up * from the previous and prepare to use the new. */ if (next_state != pr->power.state) acpi_processor_power_activate(pr, next_state); return;}static intacpi_processor_set_power_policy ( struct acpi_processor *pr){ ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy"); /* * This function sets the default Cx state policy (OS idle handler). * Our scheme is to promote quickly to C2 but more conservatively * to C3. We're favoring C2 for its characteristics of low latency * (quick response), good power savings, and ability to allow bus * mastering activity. Note that the Cx state policy is completely * customizable and can be altered dynamically. */ if (!pr) return_VALUE(-EINVAL); /* * C0/C1 * ----- */ pr->power.state = ACPI_STATE_C1; pr->power.default_state = ACPI_STATE_C1; /* * C1/C2 * ----- * Set the default C1 promotion and C2 demotion policies, where we * promote from C1 to C2 after several (10) successive C1 transitions, * as we cannot (currently) measure the time spent in C1. Demote from * C2 to C1 anytime we experience a 'short' (time spent in C2 is less * than the C2 transtion latency). Note the simplifying assumption * that the 'cost' of a transition is amortized when we sleep for at * least as long as the transition's latency (thus the total transition * time is two times the latency). * * TBD: Measure C1 sleep times by instrumenting the core IRQ handler. * TBD: Demote to default C-State after long periods of activity. * TBD: Investigate policy's use of CPU utilization -vs- sleep duration. */ if (pr->power.states[ACPI_STATE_C2].valid) { pr->power.states[ACPI_STATE_C1].promotion.threshold.count = 10; pr->power.states[ACPI_STATE_C1].promotion.threshold.ticks = pr->power.states[ACPI_STATE_C2].latency_ticks; pr->power.states[ACPI_STATE_C1].promotion.state = ACPI_STATE_C2; pr->power.states[ACPI_STATE_C2].demotion.threshold.count = 1; pr->power.states[ACPI_STATE_C2].demotion.threshold.ticks = pr->power.states[ACPI_STATE_C2].latency_ticks; pr->power.states[ACPI_STATE_C2].demotion.state = ACPI_STATE_C1; } /* * C2/C3 * ----- * Set default C2 promotion and C3 demotion policies, where we promote * from C2 to C3 after several (4) cycles of no bus mastering activity * while maintaining sleep time criteria. Demote immediately on a * short or whenever bus mastering activity occurs. */ if ((pr->power.states[ACPI_STATE_C2].valid) && (pr->power.states[ACPI_STATE_C3].valid)) { pr->power.states[ACPI_STATE_C2].promotion.threshold.count = 4; pr->power.states[ACPI_STATE_C2].promotion.threshold.ticks = pr->power.states[ACPI_STATE_C3].latency_ticks; pr->power.states[ACPI_STATE_C2].promotion.threshold.bm = 0x0F; pr->power.states[ACPI_STATE_C2].promotion.state = ACPI_STATE_C3; pr->power.states[ACPI_STATE_C3].demotion.threshold.count = 1; pr->power.states[ACPI_STATE_C3].demotion.threshold.ticks = pr->power.states[ACPI_STATE_C3].latency_ticks; pr->power.states[ACPI_STATE_C3].demotion.threshold.bm = 0x0F; pr->power.states[ACPI_STATE_C3].demotion.state = ACPI_STATE_C2; } return_VALUE(0);}intacpi_processor_get_power_info ( struct acpi_processor *pr){ int result = 0; ACPI_FUNCTION_TRACE("acpi_processor_get_power_info"); if (!pr) return_VALUE(-EINVAL); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "lvl2[0x%08x] lvl3[0x%08x]\n", pr->power.states[ACPI_STATE_C2].address, pr->power.states[ACPI_STATE_C3].address)); /* TBD: Support ACPI 2.0 objects */ /* * C0 * -- * This state exists only as filler in our array. */ pr->power.states[ACPI_STATE_C0].valid = 1; /* * C1 * -- * ACPI requires C1 support for all processors. * * TBD: What about PROC_C1? */ pr->power.states[ACPI_STATE_C1].valid = 1; /* * C2 * -- * We're (currently) only supporting C2 on UP systems. * * TBD: Support for C2 on MP (P_LVL2_UP). */ if (pr->power.states[ACPI_STATE_C2].address) { pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; /* * C2 latency must be less than or equal to 100 microseconds. */ if (acpi_fadt.plvl2_lat > ACPI_PROCESSOR_MAX_C2_LATENCY) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C2 latency too large [%d]\n", acpi_fadt.plvl2_lat)); /* * Only support C2 on UP systems (see TBD above). */ else if (errata.smp) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C2 not supported in SMP mode\n")); /* * Otherwise we've met all of our C2 requirements. * Normalize the C2 latency to expidite policy. */ else { pr->power.states[ACPI_STATE_C2].valid = 1; pr->power.states[ACPI_STATE_C2].latency_ticks = US_TO_PM_TIMER_TICKS(acpi_fadt.plvl2_lat); } } /* * C3 * -- * TBD: Investigate use of WBINVD on UP/SMP system in absence of * bm_control. */ if (pr->power.states[ACPI_STATE_C3].address) { pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; /* * C3 latency must be less than or equal to 1000 microseconds. */ if (acpi_fadt.plvl3_lat > ACPI_PROCESSOR_MAX_C3_LATENCY) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 latency too large [%d]\n", acpi_fadt.plvl3_lat)); /* * Only support C3 when bus mastering arbitration control * is present (able to disable bus mastering to maintain * cache coherency while in C3). */ else if (!pr->flags.bm_control) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 support requires bus mastering control\n")); /* * Only support C3 on UP systems, as bm_control is only viable * on a UP system and flushing caches (e.g. WBINVD) is simply * too costly (at this time). */ else if (errata.smp) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 not supported in SMP mode\n")); /* * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) * DMA transfers are used by any ISA device to avoid livelock. * Note that we could disable Type-F DMA (as recommended by * the erratum), but this is known to disrupt certain ISA * devices thus we take the conservative approach. */ else if (errata.piix4.fdma) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 not supported on PIIX4 with Type-F DMA\n")); } /* * Otherwise we've met all of our C3 requirements. * Normalize the C2 latency to expidite policy. Enable * checking of bus mastering status (bm_check) so we can * use this in our C3 policy. */ else { pr->power.states[ACPI_STATE_C3].valid = 1; pr->power.states[ACPI_STATE_C3].latency_ticks = US_TO_PM_TIMER_TICKS(acpi_fadt.plvl3_lat); pr->flags.bm_check = 1; } } /* * Set Default Policy * ------------------ * Now that we know which state are supported, set the default * policy. Note that this policy can be changed dynamically * (e.g. encourage deeper sleeps to conserve battery life when * not on AC). */ result = acpi_processor_set_power_policy(pr); if (result) return_VALUE(result); /* * If this processor supports C2 or C3 we denote it as being 'power * manageable'. Note that there's really no policy involved for * when only C1 is supported. */ if (pr->power.states[ACPI_STATE_C2].valid || pr->power.states[ACPI_STATE_C3].valid) pr->flags.power = 1; return_VALUE(0);}/* -------------------------------------------------------------------------- Performance Management -------------------------------------------------------------------------- */#ifdef CONFIG_CPU_FREQstatic DECLARE_MUTEX(performance_sem);/* * _PPC support is implemented as a CPUfreq policy notifier: * This means each time a CPUfreq driver registered also with * the ACPI core is asked to change the speed policy, the maximum * value is adjusted so that it is within the platform limit. * * Also, when a new platform limit value is detected, the CPUfreq * policy is adjusted accordingly. */static int acpi_processor_ppc_is_init = 0;static int acpi_processor_ppc_notifier(struct notifier_block *nb, unsigned long event, void *data){ struct cpufreq_policy *policy = data; struct acpi_processor *pr; unsigned int ppc = 0; down(&performance_sem); if (event != CPUFREQ_INCOMPATIBLE) goto out; pr = processors[policy->cpu]; if (!pr || !pr->performance) goto out; ppc = (unsigned int) pr->performance_platform_limit; if (!ppc) goto out; if (ppc > pr->performance->state_count) goto out; cpufreq_verify_within_limits(policy, 0, pr->performance->states[ppc].core_frequency * 1000); out: up(&performance_sem); return 0;}static struct notifier_block acpi_ppc_notifier_block = { .notifier_call = acpi_processor_ppc_notifier,};static intacpi_processor_get_platform_limit ( struct acpi_processor* pr){ acpi_status status = 0; unsigned long ppc = 0; ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit"); if (!pr) return_VALUE(-EINVAL); /* * _PPC indicates the maximum state currently supported by the platform * (e.g. 0 = states 0..n; 1 = states 1..n; etc. */ status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); if(ACPI_FAILURE(status) && status != AE_NOT_FOUND) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PPC\n")); return_VALUE(-ENODEV); } pr->performance_platform_limit = (int) ppc; return_VALUE(0);}static int acpi_processor_ppc_has_changed( struct acpi_processor *pr){ int ret = acpi_processor_get_platform_limit(pr); if (ret < 0) return (ret); else return cpufreq_update_policy(pr->id);}static void acpi_processor_ppc_init(void) { if (!cpufreq_register_notifier(&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) acpi_processor_ppc_is_init = 1; else printk(KERN_DEBUG "Warning: Processor Platform Limit not supported.\n");}static void acpi_processor_ppc_exit(void) { if (acpi_processor_ppc_is_init) cpufreq_unregister_notifier(&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER); acpi_processor_ppc_is_init = 0;}/* * when registering a cpufreq driver with this ACPI processor driver, the * _PCT and _PSS structures are read out and written into struct * acpi_processor_performance. */static int acpi_processor_set_pdc (struct acpi_processor *pr){ acpi_status status = AE_OK; u32 arg0_buf[3]; union acpi_object arg0 = {ACPI_TYPE_BUFFER}; struct acpi_object_list no_object = {1, &arg0}; struct acpi_object_list *pdc; ACPI_FUNCTION_TRACE("acpi_processor_set_pdc"); arg0.buffer.length = 12; arg0.buffer.pointer = (u8 *) arg0_buf; arg0_buf[0] = ACPI_PDC_REVISION_ID; arg0_buf[1] = 0; arg0_buf[2] = 0; pdc = (pr->performance->pdc) ? pr->performance->pdc : &no_object; status = acpi_evaluate_object(pr->handle, "_PDC", pdc, NULL); if ((ACPI_FAILURE(status)) && (pr->performance->pdc)) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Error evaluating _PDC, using legacy perf. control...\n")); return_VALUE(status);}static int acpi_processor_get_performance_control ( struct acpi_processor *pr){ int result = 0; acpi_status status = 0; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *pct = NULL; union acpi_object obj = {0}; ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control"); status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); if(ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PCT\n")); return_VALUE(-ENODEV); } pct = (union acpi_object *) buffer.pointer; if (!pct || (pct->type != ACPI_TYPE_PACKAGE) || (pct->package.count != 2)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PCT data\n")); result = -EFAULT; goto end; } /* * control_register */ obj = pct->package.elements[0]; if ((obj.type != ACPI_TYPE_BUFFER) || (obj.buffer.length < sizeof(struct acpi_pct_register)) || (obj.buffer.pointer == NULL)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PCT data (control_register)\n")); result = -EFAULT; goto end; } memcpy(&pr->performance->control_register, obj.buffer.pointer, sizeof(struct acpi_pct_register)); /* * status_register */ obj = pct->package.elements[1]; if ((obj.type != ACPI_TYPE_BUFFER) || (obj.buffer.length < sizeof(struct acpi_pct_register)) || (obj.buffer.pointer == NULL)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PCT data (status_register)\n")); result = -EFAULT; goto end; } memcpy(&pr->performance->status_register, obj.buffer.pointer, sizeof(struct acpi_pct_register));end: acpi_os_free(buffer.pointer); return_VALUE(result);}static int acpi_processor_get_performance_states ( struct acpi_processor *pr){ int result = 0; acpi_status status = AE_OK; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; struct acpi_buffer format = {sizeof("NNNNNN"), "NNNNNN"}; struct acpi_buffer state = {0, NULL}; union acpi_object *pss = NULL; int i = 0; ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states"); status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); if(ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PSS\n")); return_VALUE(-ENODEV); } pss = (union acpi_object *) buffer.pointer; if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n")); result = -EFAULT; goto end; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", pss->package.count)); pr->performance->state_count = pss->package.count; pr->performance->states = kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, GFP_KERNEL); if (!pr->performance->states) { result = -ENOMEM; goto end; } for (i = 0; i < pr->performance->state_count; i++) { struct acpi_processor_px *px = &(pr->performance->states[i]); state.length = sizeof(struct acpi_processor_px);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?