📄 cpufreq.c
字号:
result = cpufreq_frequency_table_target(policy, data->freq_table, target_freq, relation, &next_state); if (unlikely(result)) return -ENODEV; online_policy_cpus = policy->cpus; next_perf_state = data->freq_table[next_state].index; if (perf->state == next_perf_state) { if (unlikely(policy->resume)) { printk(KERN_INFO "Called after resume, resetting to P%d\n", next_perf_state); policy->resume = 0; } else { printk(KERN_INFO "Already at target state (P%d)\n", next_perf_state); return 0; } } switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_CTL; cmd.val = (u32) perf->states[next_perf_state].control; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; cmd.addr.io.port = perf->control_register.address; cmd.addr.io.bit_width = perf->control_register.bit_width; cmd.val = (u32) perf->states[next_perf_state].control; break; default: return -ENODEV; } cpus_clear(cmd.mask); if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) cmd.mask = online_policy_cpus; else cpu_set(policy->cpu, cmd.mask); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; drv_write(&cmd); if (!check_freqs(cmd.mask, freqs.new, data)) return -EAGAIN; px_statistic_update(cmd.mask, perf->state, next_perf_state); perf->state = next_perf_state; policy->cur = freqs.new; return result;}static unsigned longacpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu){ struct processor_performance *perf = data->acpi_data; if (cpu_khz) { /* search the closest match to cpu_khz */ unsigned int i; unsigned long freq; unsigned long freqn = perf->states[0].core_frequency * 1000; for (i=0; i<(perf->state_count-1); i++) { freq = freqn; freqn = perf->states[i+1].core_frequency * 1000; if ((2 * cpu_khz) > (freqn + freq)) { perf->state = i; return freq; } } perf->state = perf->state_count-1; return freqn; } else { /* assume CPU is at P0... */ perf->state = 0; return perf->states[0].core_frequency * 1000; }}static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy){ unsigned int i; unsigned int valid_states = 0; unsigned int cpu = policy->cpu; struct acpi_cpufreq_data *data; unsigned int result = 0; struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; struct processor_performance *perf; data = xmalloc(struct acpi_cpufreq_data); if (!data) return -ENOMEM; memset(data, 0, sizeof(struct acpi_cpufreq_data)); drv_data[cpu] = data; data->acpi_data = &processor_pminfo[cpu].perf; perf = data->acpi_data; policy->shared_type = perf->shared_type; /* * Currently the latest linux (kernel version 2.6.26) * still has issue when handle the situation _psd HW_ALL coordination. * In Xen hypervisor, we handle _psd HW_ALL coordination in same way as * _psd SW_ALL coordination for the seek of safety. */ policy->cpus = perf->shared_cpu_map; /* capability check */ if (perf->state_count <= 1) { printk("No P-States\n"); result = -ENODEV; goto err_unreg; } if (perf->control_register.space_id != perf->status_register.space_id) { result = -ENODEV; goto err_unreg; } switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: printk("xen_pminfo: @acpi_cpufreq_cpu_init," "SYSTEM IO addr space\n"); data->cpu_feature = SYSTEM_IO_CAPABLE; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: printk("xen_pminfo: @acpi_cpufreq_cpu_init," "HARDWARE addr space\n"); if (!check_est_cpu(cpu)) { result = -ENODEV; goto err_unreg; } data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; break; default: result = -ENODEV; goto err_unreg; } data->freq_table = xmalloc_array(struct cpufreq_frequency_table, (perf->state_count+1)); if (!data->freq_table) { result = -ENOMEM; goto err_unreg; } /* detect transition latency */ policy->cpuinfo.transition_latency = 0; for (i=0; i<perf->state_count; i++) { if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000; } data->max_freq = perf->states[0].core_frequency * 1000; /* table init */ for (i=0; i<perf->state_count; i++) { if (i>0 && perf->states[i].core_frequency >= data->freq_table[valid_states-1].frequency / 1000) continue; data->freq_table[valid_states].index = i; data->freq_table[valid_states].frequency = perf->states[i].core_frequency * 1000; valid_states++; } data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; perf->state = 0; result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); if (result) goto err_freqfree; switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: /* Current speed is unknown and not detectable by IO port */ policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); break; case ACPI_ADR_SPACE_FIXED_HARDWARE: acpi_cpufreq_driver.get = get_cur_freq_on_cpu; policy->cur = get_cur_freq_on_cpu(cpu); break; default: break; } /* Check for APERF/MPERF support in hardware */ if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) { unsigned int ecx; ecx = cpuid_ecx(6); if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY) acpi_cpufreq_driver.getavg = get_measured_perf; } /* * the first call to ->target() should result in us actually * writing something to the appropriate registers. */ policy->resume = 1; return result;err_freqfree: xfree(data->freq_table);err_unreg: xfree(data); drv_data[cpu] = NULL; return result;}static struct cpufreq_driver acpi_cpufreq_driver = { .target = acpi_cpufreq_target, .init = acpi_cpufreq_cpu_init,};void cpufreq_dom_exit(void){ cpufreq_dom_max = 0; cpus_clear(cpufreq_dom_mask); if (cpufreq_dom_pt) xfree(cpufreq_dom_pt);}int cpufreq_dom_init(void){ unsigned int i; cpufreq_dom_max = 0; cpus_clear(cpufreq_dom_mask); for_each_online_cpu(i) { cpu_set(processor_pminfo[i].perf.domain_info.domain, cpufreq_dom_mask); if (cpufreq_dom_max < processor_pminfo[i].perf.domain_info.domain) cpufreq_dom_max = processor_pminfo[i].perf.domain_info.domain; } cpufreq_dom_max++; cpufreq_dom_pt = xmalloc_array(cpumask_t, cpufreq_dom_max); if (!cpufreq_dom_pt) return -ENOMEM; memset(cpufreq_dom_pt, 0, cpufreq_dom_max * sizeof(cpumask_t)); for_each_online_cpu(i) cpu_set(i, cpufreq_dom_pt[processor_pminfo[i].perf.domain_info.domain]); for_each_online_cpu(i) processor_pminfo[i].perf.shared_cpu_map = cpufreq_dom_pt[processor_pminfo[i].perf.domain_info.domain]; return 0;}static int cpufreq_cpu_init(void){ int i, ret = 0; for_each_online_cpu(i) { xen_px_policy[i].cpu = i; ret = px_statistic_init(i); if (ret) return ret; ret = acpi_cpufreq_cpu_init(&xen_px_policy[i]); if (ret) return ret; } return ret;}int cpufreq_dom_dbs(unsigned int event){ int cpu, dom, ret = 0; for (dom=0; dom<cpufreq_dom_max; dom++) { if (!cpu_isset(dom, cpufreq_dom_mask)) continue; cpu = first_cpu(cpufreq_dom_pt[dom]); ret = cpufreq_governor_dbs(&xen_px_policy[cpu], event); if (ret) return ret; } return ret;}int acpi_cpufreq_init(void){ int ret = 0; /* setup cpumask of psd dom and shared cpu map of cpu */ ret = cpufreq_dom_init(); if (ret) goto err; /* setup cpufreq driver */ cpufreq_driver = &acpi_cpufreq_driver; /* setup cpufreq infrastructure */ ret = cpufreq_cpu_init(); if (ret) goto err; /* setup cpufreq dbs according to dom coordiation */ ret = cpufreq_dom_dbs(CPUFREQ_GOV_START); if (ret) goto err; return ret;err: cpufreq_dom_exit(); return ret;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -