processor_idle.c
来自「linux 内核源代码」· C语言 代码 · 共 1,790 行 · 第 1/4 页
C
1,790 行
} }#ifdef CONFIG_HOTPLUG_CPU /* * Check for P_LVL2_UP flag before entering C2 and above on * an SMP system. We do it here instead of doing it at _CST/P_LVL * detection phase, to work cleanly with logical CPU hotplug. */ if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) cx = &pr->power.states[ACPI_STATE_C1];#endif /* * Sleep: * ------ * Invoke the current Cx state to put the processor to sleep. */ if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); if (need_resched()) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return; } } switch (cx->type) { case ACPI_STATE_C1: /* * Invoke C1. * Use the appropriate idle routine, the one that would * be used without acpi C-states. */ if (pm_idle_save) pm_idle_save(); else acpi_safe_halt(); /* * TBD: Can't get time duration while in C1, as resumes * go to an ISR rather than here. Need to instrument * base interrupt handler. * * Note: the TSC better not stop in C1, sched_clock() will * skew otherwise. */ sleep_ticks = 0xFFFFFFFF; break; case ACPI_STATE_C2: /* Get start time (ticks) */ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); /* Invoke C2 */ acpi_state_timer_broadcast(pr, cx, 1); acpi_cstate_enter(cx); /* Get end time (ticks) */ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) /* TSC halts in C2, so notify users */ mark_tsc_unstable("possible TSC halt in C2");#endif /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2); /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); /* Re-enable interrupts */ local_irq_enable(); /* Do not account our idle-switching overhead: */ sleep_ticks -= cx->latency_ticks + C2_OVERHEAD; current_thread_info()->status |= TS_POLLING; acpi_state_timer_broadcast(pr, cx, 0); break; case ACPI_STATE_C3: /* * Must be done before busmaster disable as we might * need to access HPET ! */ acpi_state_timer_broadcast(pr, cx, 1); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if (pr->flags.bm_check && pr->flags.bm_control) { if (atomic_inc_return(&c3_cpu_count) == num_online_cpus()) { /* * All CPUs are trying to go to C3 * Disable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); } } else if (!pr->flags.bm_check) { /* SMP with no shared cache... Invalidate cache */ ACPI_FLUSH_CPU_CACHE(); } /* Get start time (ticks) */ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); /* Invoke C3 */ /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_cstate_enter(cx); /* Get end time (ticks) */ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); if (pr->flags.bm_check && pr->flags.bm_control) { /* Enable bus master arbitration */ atomic_dec(&c3_cpu_count); acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); }#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) /* TSC halts in C3, so notify users */ mark_tsc_unstable("TSC halts in C3");#endif /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2); /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); /* Re-enable interrupts */ local_irq_enable(); /* Do not account our idle-switching overhead: */ sleep_ticks -= cx->latency_ticks + C3_OVERHEAD; current_thread_info()->status |= TS_POLLING; acpi_state_timer_broadcast(pr, cx, 0); break; default: local_irq_enable(); return; } cx->usage++; if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) cx->time += sleep_ticks; next_state = pr->power.state;#ifdef CONFIG_HOTPLUG_CPU /* Don't do promotion/demotion */ if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { next_state = cx; goto end; }#endif /* * Promotion? * ---------- * Track the number of longs (time asleep is greater than threshold) * and promote when the count threshold is reached. Note that bus * mastering activity may prevent promotions. * Do not promote above max_cstate. */ if (cx->promotion.state && ((cx->promotion.state - pr->power.states) <= max_cstate)) { if (sleep_ticks > cx->promotion.threshold.ticks && cx->promotion.state->latency <= system_latency_constraint()) { cx->promotion.count++; cx->demotion.count = 0; if (cx->promotion.count >= cx->promotion.threshold.count) { if (pr->flags.bm_check) { if (! (pr->power.bm_activity & cx-> promotion.threshold.bm)) { next_state = cx->promotion.state; goto end; } } else { next_state = cx->promotion.state; goto end; } } } } /* * Demotion? * --------- * Track the number of shorts (time asleep is less than time threshold) * and demote when the usage threshold is reached. */ if (cx->demotion.state) { if (sleep_ticks < cx->demotion.threshold.ticks) { cx->demotion.count++; cx->promotion.count = 0; if (cx->demotion.count >= cx->demotion.threshold.count) { next_state = cx->demotion.state; goto end; } } } end: /* * Demote if current state exceeds max_cstate * or if the latency of the current state is unacceptable */ if ((pr->power.state - pr->power.states) > max_cstate || pr->power.state->latency > system_latency_constraint()) { if (cx->demotion.state) next_state = cx->demotion.state; } /* * New Cx State? * ------------- * If we're going to start using a new Cx state we must clean up * from the previous and prepare to use the new. */ if (next_state != pr->power.state) acpi_processor_power_activate(pr, next_state);}static int acpi_processor_set_power_policy(struct acpi_processor *pr){ unsigned int i; unsigned int state_is_set = 0; struct acpi_processor_cx *lower = NULL; struct acpi_processor_cx *higher = NULL; struct acpi_processor_cx *cx; if (!pr) return -EINVAL; /* * This function sets the default Cx state policy (OS idle handler). * Our scheme is to promote quickly to C2 but more conservatively * to C3. We're favoring C2 for its characteristics of low latency * (quick response), good power savings, and ability to allow bus * mastering activity. Note that the Cx state policy is completely * customizable and can be altered dynamically. */ /* startup state */ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { cx = &pr->power.states[i]; if (!cx->valid) continue; if (!state_is_set) pr->power.state = cx; state_is_set++; break; } if (!state_is_set) return -ENODEV; /* demotion */ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { cx = &pr->power.states[i]; if (!cx->valid) continue; if (lower) { cx->demotion.state = lower; cx->demotion.threshold.ticks = cx->latency_ticks; cx->demotion.threshold.count = 1; if (cx->type == ACPI_STATE_C3) cx->demotion.threshold.bm = bm_history; } lower = cx; } /* promotion */ for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { cx = &pr->power.states[i]; if (!cx->valid) continue; if (higher) { cx->promotion.state = higher; cx->promotion.threshold.ticks = cx->latency_ticks; if (cx->type >= ACPI_STATE_C2) cx->promotion.threshold.count = 4; else cx->promotion.threshold.count = 10; if (higher->type == ACPI_STATE_C3) cx->promotion.threshold.bm = bm_history; } higher = cx; } return 0;}#endif /* !CONFIG_CPU_IDLE */static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr){ if (!pr) return -EINVAL; if (!pr->pblk) return -ENODEV; /* if info is obtained from pblk/fadt, type equals state */ pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;#ifndef CONFIG_HOTPLUG_CPU /* * Check for P_LVL2_UP flag before entering C2 and above on * an SMP system. */ if ((num_online_cpus() > 1) && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) return -ENODEV;#endif /* determine C2 and C3 address from pblk */ pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; /* determine latencies from FADT */ pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "lvl2[0x%08x] lvl3[0x%08x]\n", pr->power.states[ACPI_STATE_C2].address, pr->power.states[ACPI_STATE_C3].address)); return 0;}static int acpi_processor_get_power_info_default(struct acpi_processor *pr){ if (!pr->power.states[ACPI_STATE_C1].valid) { /* set the first C-State to C1 */ /* all processors need to support C1 */ pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; pr->power.states[ACPI_STATE_C1].valid = 1; } /* the C0 state only exists as a filler in our array */ pr->power.states[ACPI_STATE_C0].valid = 1; return 0;}static int acpi_processor_get_power_info_cst(struct acpi_processor *pr){ acpi_status status = 0; acpi_integer count; int current_count; int i; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *cst; if (nocst) return -ENODEV; current_count = 0; status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); return -ENODEV; } cst = buffer.pointer; /* There must be at least 2 elements */ if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { printk(KERN_ERR PREFIX "not enough elements in _CST\n"); status = -EFAULT; goto end; } count = cst->package.elements[0].integer.value; /* Validate number of power states. */ if (count < 1 || count != cst->package.count - 1) { printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); status = -EFAULT; goto end; } /* Tell driver that at least _CST is supported. */ pr->flags.has_cst = 1; for (i = 1; i <= count; i++) { union acpi_object *element; union acpi_object *obj; struct acpi_power_register *reg; struct acpi_processor_cx cx; memset(&cx, 0, sizeof(cx)); element = &(cst->package.elements[i]); if (element->type != ACPI_TYPE_PACKAGE) continue; if (element->package.count != 4) continue; obj = &(element->package.elements[0]); if (obj->type != ACPI_TYPE_BUFFER) continue; reg = (struct acpi_power_register *)obj->buffer.pointer; if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) continue; /* There should be an easy way to extract an integer... */ obj = &(element->package.elements[1]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.type = obj->integer.value; /* * Some buggy BIOSes won't list C1 in _CST - * Let acpi_processor_get_power_info_default() handle them later */ if (i == 1 && cx.type != ACPI_STATE_C1) current_count++;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?