processor_idle.c
来自「linux 内核源代码」· C语言 代码 · 共 1,790 行 · 第 1/4 页
C
1,790 行
cx.address = reg->address; cx.index = current_count + 1; cx.space_id = ACPI_CSTATE_SYSTEMIO; if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { if (acpi_processor_ffh_cstate_probe (pr->id, &cx, reg) == 0) { cx.space_id = ACPI_CSTATE_FFH; } else if (cx.type != ACPI_STATE_C1) { /* * C1 is a special case where FIXED_HARDWARE * can be handled in non-MWAIT way as well. * In that case, save this _CST entry info. * That is, we retain space_id of SYSTEM_IO for * halt based C1. * Otherwise, ignore this info and continue. */ continue; } } obj = &(element->package.elements[2]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.latency = obj->integer.value; obj = &(element->package.elements[3]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.power = obj->integer.value; current_count++; memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); /* * We support total ACPI_PROCESSOR_MAX_POWER - 1 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) */ if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { printk(KERN_WARNING "Limiting number of power states to max (%d)\n", ACPI_PROCESSOR_MAX_POWER); printk(KERN_WARNING "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); break; } } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", current_count)); /* Validate number of power states discovered */ if (current_count < 2) status = -EFAULT; end: kfree(buffer.pointer); return status;}static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx){ if (!cx->address) return; /* * C2 latency must be less than or equal to 100 * microseconds. */ else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "latency too large [%d]\n", cx->latency)); return; } /* * Otherwise we've met all of our C2 requirements. * Normalize the C2 latency to expidite policy */ cx->valid = 1;#ifndef CONFIG_CPU_IDLE cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);#else cx->latency_ticks = cx->latency;#endif return;}static void acpi_processor_power_verify_c3(struct acpi_processor *pr, struct acpi_processor_cx *cx){ static int bm_check_flag; if (!cx->address) return; /* * C3 latency must be less than or equal to 1000 * microseconds. */ else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "latency too large [%d]\n", cx->latency)); return; } /* * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) * DMA transfers are used by any ISA device to avoid livelock. * Note that we could disable Type-F DMA (as recommended by * the erratum), but this is known to disrupt certain ISA * devices thus we take the conservative approach. */ else if (errata.piix4.fdma) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 not supported on PIIX4 with Type-F DMA\n")); return; } /* All the logic here assumes flags.bm_check is same across all CPUs */ if (!bm_check_flag) { /* Determine whether bm_check is needed based on CPU */ acpi_processor_power_init_bm_check(&(pr->flags), pr->id); bm_check_flag = pr->flags.bm_check; } else { pr->flags.bm_check = bm_check_flag; } if (pr->flags.bm_check) { if (!pr->flags.bm_control) { if (pr->flags.has_cst != 1) { /* bus mastering control is necessary */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 support requires BM control\n")); return; } else { /* Here we enter C3 without bus mastering */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 support without BM control\n")); } } } else { /* * WBINVD should be set in fadt, for C3 state to be * supported on when bm_check is not required. */ if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cache invalidation should work properly" " for C3 to be enabled on SMP systems\n")); return; } acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); } /* * Otherwise we've met all of our C3 requirements. * Normalize the C3 latency to expidite policy. Enable * checking of bus mastering status (bm_check) so we can * use this in our C3 policy */ cx->valid = 1;#ifndef CONFIG_CPU_IDLE cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);#else cx->latency_ticks = cx->latency;#endif return;}static int acpi_processor_power_verify(struct acpi_processor *pr){ unsigned int i; unsigned int working = 0; pr->power.timer_broadcast_on_state = INT_MAX; for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { struct acpi_processor_cx *cx = &pr->power.states[i]; switch (cx->type) { case ACPI_STATE_C1: cx->valid = 1; break; case ACPI_STATE_C2: acpi_processor_power_verify_c2(cx); if (cx->valid) acpi_timer_check_state(i, pr, cx); break; case ACPI_STATE_C3: acpi_processor_power_verify_c3(pr, cx); if (cx->valid) acpi_timer_check_state(i, pr, cx); break; } if (cx->valid) working++; } acpi_propagate_timer_broadcast(pr); return (working);}static int acpi_processor_get_power_info(struct acpi_processor *pr){ unsigned int i; int result; /* NOTE: the idle thread may not be running while calling * this function */ /* Zero initialize all the C-states info. */ memset(pr->power.states, 0, sizeof(pr->power.states)); result = acpi_processor_get_power_info_cst(pr); if (result == -ENODEV) result = acpi_processor_get_power_info_fadt(pr); if (result) return result; acpi_processor_get_power_info_default(pr); pr->power.count = acpi_processor_power_verify(pr);#ifndef CONFIG_CPU_IDLE /* * Set Default Policy * ------------------ * Now that we know which states are supported, set the default * policy. Note that this policy can be changed dynamically * (e.g. encourage deeper sleeps to conserve battery life when * not on AC). */ result = acpi_processor_set_power_policy(pr); if (result) return result;#endif /* * if one state of type C2 or C3 is available, mark this * CPU as being "idle manageable" */ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { if (pr->power.states[i].valid) { pr->power.count = i; if (pr->power.states[i].type >= ACPI_STATE_C2) pr->flags.power = 1; } } return 0;}static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset){ struct acpi_processor *pr = seq->private; unsigned int i; if (!pr) goto end; seq_printf(seq, "active state: C%zd\n" "max_cstate: C%d\n" "bus master activity: %08x\n" "maximum allowed latency: %d usec\n", pr->power.state ? pr->power.state - pr->power.states : 0, max_cstate, (unsigned)pr->power.bm_activity, system_latency_constraint()); seq_puts(seq, "states:\n"); for (i = 1; i <= pr->power.count; i++) { seq_printf(seq, " %cC%d: ", (&pr->power.states[i] == pr->power.state ? '*' : ' '), i); if (!pr->power.states[i].valid) { seq_puts(seq, "<not supported>\n"); continue; } switch (pr->power.states[i].type) { case ACPI_STATE_C1: seq_printf(seq, "type[C1] "); break; case ACPI_STATE_C2: seq_printf(seq, "type[C2] "); break; case ACPI_STATE_C3: seq_printf(seq, "type[C3] "); break; default: seq_printf(seq, "type[--] "); break; } if (pr->power.states[i].promotion.state) seq_printf(seq, "promotion[C%zd] ", (pr->power.states[i].promotion.state - pr->power.states)); else seq_puts(seq, "promotion[--] "); if (pr->power.states[i].demotion.state) seq_printf(seq, "demotion[C%zd] ", (pr->power.states[i].demotion.state - pr->power.states)); else seq_puts(seq, "demotion[--] "); seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", pr->power.states[i].latency, pr->power.states[i].usage, (unsigned long long)pr->power.states[i].time); } end: return 0;}static int acpi_processor_power_open_fs(struct inode *inode, struct file *file){ return single_open(file, acpi_processor_power_seq_show, PDE(inode)->data);}static const struct file_operations acpi_processor_power_fops = { .open = acpi_processor_power_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release,};#ifndef CONFIG_CPU_IDLEint acpi_processor_cst_has_changed(struct acpi_processor *pr){ int result = 0; if (!pr) return -EINVAL; if (nocst) { return -ENODEV; } if (!pr->flags.power_setup_done) return -ENODEV; /* Fall back to the default idle loop */ pm_idle = pm_idle_save; synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ pr->flags.power = 0; result = acpi_processor_get_power_info(pr); if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) pm_idle = acpi_processor_idle; return result;}#ifdef CONFIG_SMPstatic void smp_callback(void *v){ /* we already woke the CPU up, nothing more to do */}/* * This function gets called when a part of the kernel has a new latency * requirement. This means we need to get all processors out of their C-state, * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that * wakes them all right up. */static int acpi_processor_latency_notify(struct notifier_block *b, unsigned long l, void *v){ smp_call_function(smp_callback, NULL, 0, 1); return NOTIFY_OK;}static struct notifier_block acpi_processor_latency_notifier = { .notifier_call = acpi_processor_latency_notify,};#endif#else /* CONFIG_CPU_IDLE *//** * acpi_idle_bm_check - checks if bus master activity was detected */static int acpi_idle_bm_check(void){ u32 bm_status = 0; acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); if (bm_status) acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); /* * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect * the true state of bus mastering activity; forcing us to * manually check the BMIDEA bit of each IDE channel. */ else if (errata.piix4.bmisx) { if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) bm_status = 1; } return bm_status;}/** * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state * @pr: the processor * @target: the new target state */static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, struct acpi_processor_cx *target){ if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); pr->flags.bm_rld_set = 0; } if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); pr->flags.bm_rld_set = 1; }}/**
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?