📄 cpufreq_ondemand.c
字号:
__ATTR(_name, 0644, show_##_name, store_##_name)define_one_rw(sampling_rate);define_one_rw(up_threshold);define_one_rw(ignore_nice_load);define_one_rw(powersave_bias);static struct attribute * dbs_attributes[] = { &sampling_rate_max.attr, &sampling_rate_min.attr, &sampling_rate.attr, &up_threshold.attr, &ignore_nice_load.attr, &powersave_bias.attr, NULL};static struct attribute_group dbs_attr_group = { .attrs = dbs_attributes, .name = "ondemand",};/************************** sysfs end ************************/static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info){ unsigned int idle_ticks, total_ticks; unsigned int load = 0; cputime64_t cur_jiffies; struct cpufreq_policy *policy; unsigned int j; if (!this_dbs_info->enable) return; this_dbs_info->freq_lo = 0; policy = this_dbs_info->cur_policy; cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); total_ticks = (unsigned int) cputime64_sub(cur_jiffies, this_dbs_info->prev_cpu_wall); this_dbs_info->prev_cpu_wall = get_jiffies_64(); if (!total_ticks) return; /* * Every sampling_rate, we check, if current idle time is less * than 20% (default), then we try to increase frequency * Every sampling_rate, we look for a the lowest * frequency which can sustain the load while keeping idle time over * 30%. If such a frequency exist, we try to decrease to this frequency. * * Any frequency increase takes it to the maximum frequency. * Frequency reduction happens at minimum steps of * 5% (default) of current frequency */ /* Get Idle Time */ idle_ticks = UINT_MAX; for_each_cpu_mask(j, policy->cpus) { cputime64_t total_idle_ticks; unsigned int tmp_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); total_idle_ticks = get_cpu_idle_time(j); tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = total_idle_ticks; if (tmp_idle_ticks < idle_ticks) idle_ticks = tmp_idle_ticks; } if (likely(total_ticks > idle_ticks)) load = (100 * (total_ticks - idle_ticks)) / total_ticks; /* Check for frequency increase */ if (load > dbs_tuners_ins.up_threshold) { /* if we are already at full speed then break out early */ if (!dbs_tuners_ins.powersave_bias) { if (policy->cur == policy->max) return; __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); } else { int freq = powersave_bias_target(policy, policy->max, CPUFREQ_RELATION_H); __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); } return; } /* Check for frequency decrease */ /* if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) return; /* * The optimal frequency is the frequency that is the lowest that * can support the current CPU usage without triggering the up * policy. To be safe, we focus 10 points under the threshold. */ if (load < (dbs_tuners_ins.up_threshold - 10)) { unsigned int freq_next, freq_cur; freq_cur = __cpufreq_driver_getavg(policy); if (!freq_cur) freq_cur = policy->cur; freq_next = (freq_cur * load) / (dbs_tuners_ins.up_threshold - 10); if (!dbs_tuners_ins.powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } else { int freq = powersave_bias_target(policy, freq_next, CPUFREQ_RELATION_L); __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); } }}static void do_dbs_timer(struct work_struct *work){ struct cpu_dbs_info_s *dbs_info = container_of(work, struct cpu_dbs_info_s, work.work); unsigned int cpu = dbs_info->cpu; int sample_type = dbs_info->sample_type; /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; if (lock_policy_rwsem_write(cpu) < 0) return; if (!dbs_info->enable) { unlock_policy_rwsem_write(cpu); return; } /* Common NORMAL_SAMPLE setup */ dbs_info->sample_type = DBS_NORMAL_SAMPLE; if (!dbs_tuners_ins.powersave_bias || sample_type == DBS_NORMAL_SAMPLE) { dbs_check_cpu(dbs_info); if (dbs_info->freq_lo) { /* Setup timer for SUB_SAMPLE */ dbs_info->sample_type = DBS_SUB_SAMPLE; delay = dbs_info->freq_hi_jiffies; } } else { __cpufreq_driver_target(dbs_info->cur_policy, dbs_info->freq_lo, CPUFREQ_RELATION_H); } queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); unlock_policy_rwsem_write(cpu);}static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info){ /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; dbs_info->enable = 1; ondemand_powersave_bias_init(); dbs_info->sample_type = DBS_NORMAL_SAMPLE; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, delay);}static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info){ dbs_info->enable = 0; cancel_delayed_work(&dbs_info->work);}static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event){ unsigned int cpu = policy->cpu; struct cpu_dbs_info_s *this_dbs_info; unsigned int j; int rc; this_dbs_info = &per_cpu(cpu_dbs_info, cpu); switch (event) { case CPUFREQ_GOV_START: if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; if (this_dbs_info->enable) /* Already enabled */ break; mutex_lock(&dbs_mutex); dbs_enable++; rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); if (rc) { dbs_enable--; mutex_unlock(&dbs_mutex); return rc; } for_each_cpu_mask(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); j_dbs_info->prev_cpu_wall = get_jiffies_64(); } this_dbs_info->cpu = cpu; /* * Start the timerschedule work, when this governor * is used for first time */ if (dbs_enable == 1) { unsigned int latency; /* policy latency is in nS. Convert it to uS first */ latency = policy->cpuinfo.transition_latency / 1000; if (latency == 0) latency = 1; def_sampling_rate = latency * DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) def_sampling_rate = MIN_STAT_SAMPLING_RATE; dbs_tuners_ins.sampling_rate = def_sampling_rate; } dbs_timer_init(this_dbs_info); mutex_unlock(&dbs_mutex); break; case CPUFREQ_GOV_STOP: mutex_lock(&dbs_mutex); dbs_timer_exit(this_dbs_info); sysfs_remove_group(&policy->kobj, &dbs_attr_group); dbs_enable--; mutex_unlock(&dbs_mutex); break; case CPUFREQ_GOV_LIMITS: mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->max, CPUFREQ_RELATION_H); else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); mutex_unlock(&dbs_mutex); break; } return 0;}struct cpufreq_governor cpufreq_gov_ondemand = { .name = "ondemand", .governor = cpufreq_governor_dbs, .max_transition_latency = TRANSITION_LATENCY_LIMIT, .owner = THIS_MODULE,};EXPORT_SYMBOL(cpufreq_gov_ondemand);static int __init cpufreq_gov_dbs_init(void){ kondemand_wq = create_workqueue("kondemand"); if (!kondemand_wq) { printk(KERN_ERR "Creation of kondemand failed\n"); return -EFAULT; } return cpufreq_register_governor(&cpufreq_gov_ondemand);}static void __exit cpufreq_gov_dbs_exit(void){ cpufreq_unregister_governor(&cpufreq_gov_ondemand); destroy_workqueue(kondemand_wq);}MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " "Low Latency Frequency Transition capable processors");MODULE_LICENSE("GPL");#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDfs_initcall(cpufreq_gov_dbs_init);#elsemodule_init(cpufreq_gov_dbs_init);#endifmodule_exit(cpufreq_gov_dbs_exit);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -