cpufreq.c
来自「linux 内核源代码」· C语言 代码 · 共 1,883 行 · 第 1/4 页
C
1,883 行
\ ret = cpufreq_get_policy(&new_policy, policy->cpu); \ if (ret) \ return -EINVAL; \ \ ret = sscanf (buf, "%u", &new_policy.object); \ if (ret != 1) \ return -EINVAL; \ \ ret = __cpufreq_set_policy(policy, &new_policy); \ policy->user_policy.object = policy->object; \ \ return ret ? ret : count; \}store_one(scaling_min_freq,min);store_one(scaling_max_freq,max);/** * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware */static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf){ unsigned int cur_freq = __cpufreq_get(policy->cpu); if (!cur_freq) return sprintf(buf, "<unknown>"); return sprintf(buf, "%u\n", cur_freq);}/** * show_scaling_governor - show the current policy for the specified CPU */static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf){ if(policy->policy == CPUFREQ_POLICY_POWERSAVE) return sprintf(buf, "powersave\n"); else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) return sprintf(buf, "performance\n"); else if (policy->governor) return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name); return -EINVAL;}/** * store_scaling_governor - store policy for the specified CPU */static ssize_t store_scaling_governor (struct cpufreq_policy * policy, const char *buf, size_t count){ unsigned int ret = -EINVAL; char str_governor[16]; struct cpufreq_policy new_policy; ret = cpufreq_get_policy(&new_policy, policy->cpu); if (ret) return ret; ret = sscanf (buf, "%15s", str_governor); if (ret != 1) return -EINVAL; if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor)) return -EINVAL; /* Do not use cpufreq_set_policy here or the user_policy.max will be wrongly overridden */ ret = __cpufreq_set_policy(policy, &new_policy); policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; if (ret) return ret; else return count;}/** * show_scaling_driver - show the cpufreq driver currently loaded */static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf){ return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);}/** * show_scaling_available_governors - show the available CPUfreq governors */static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy, char *buf){ ssize_t i = 0; struct cpufreq_governor *t; if (!cpufreq_driver->target) { i += sprintf(buf, "performance powersave"); goto out; } list_for_each_entry(t, &cpufreq_governor_list, governor_list) { if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) goto out; i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); }out: i += sprintf(&buf[i], "\n"); return i;}/** * show_affected_cpus - show the CPUs affected by each transition */static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf){ ssize_t i = 0; unsigned int cpu; for_each_cpu_mask(cpu, policy->cpus) { if (i) i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); if (i >= (PAGE_SIZE - 5)) break; } i += sprintf(&buf[i], "\n"); return i;}#define define_one_ro(_name) \static struct freq_attr _name = \__ATTR(_name, 0444, show_##_name, NULL)#define define_one_ro0400(_name) \static struct freq_attr _name = \__ATTR(_name, 0400, show_##_name, NULL)#define define_one_rw(_name) \static struct freq_attr _name = \__ATTR(_name, 0644, show_##_name, store_##_name)define_one_ro0400(cpuinfo_cur_freq);define_one_ro(cpuinfo_min_freq);define_one_ro(cpuinfo_max_freq);define_one_ro(scaling_available_governors);define_one_ro(scaling_driver);define_one_ro(scaling_cur_freq);define_one_ro(affected_cpus);define_one_rw(scaling_min_freq);define_one_rw(scaling_max_freq);define_one_rw(scaling_governor);static struct attribute * default_attrs[] = { &cpuinfo_min_freq.attr, &cpuinfo_max_freq.attr, &scaling_min_freq.attr, &scaling_max_freq.attr, &affected_cpus.attr, &scaling_governor.attr, &scaling_driver.attr, &scaling_available_governors.attr, NULL};#define to_policy(k) container_of(k,struct cpufreq_policy,kobj)#define to_attr(a) container_of(a,struct freq_attr,attr)static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf){ struct cpufreq_policy * policy = to_policy(kobj); struct freq_attr * fattr = to_attr(attr); ssize_t ret; policy = cpufreq_cpu_get(policy->cpu); if (!policy) return -EINVAL; if (lock_policy_rwsem_read(policy->cpu) < 0) return -EINVAL; if (fattr->show) ret = fattr->show(policy, buf); else ret = -EIO; unlock_policy_rwsem_read(policy->cpu); cpufreq_cpu_put(policy); return ret;}static ssize_t store(struct kobject * kobj, struct attribute * attr, const char * buf, size_t count){ struct cpufreq_policy * policy = to_policy(kobj); struct freq_attr * fattr = to_attr(attr); ssize_t ret; policy = cpufreq_cpu_get(policy->cpu); if (!policy) return -EINVAL; if (lock_policy_rwsem_write(policy->cpu) < 0) return -EINVAL; if (fattr->store) ret = fattr->store(policy, buf, count); else ret = -EIO; unlock_policy_rwsem_write(policy->cpu); cpufreq_cpu_put(policy); return ret;}static void cpufreq_sysfs_release(struct kobject * kobj){ struct cpufreq_policy * policy = to_policy(kobj); dprintk("last reference is dropped\n"); complete(&policy->kobj_unregister);}static struct sysfs_ops sysfs_ops = { .show = show, .store = store,};static struct kobj_type ktype_cpufreq = { .sysfs_ops = &sysfs_ops, .default_attrs = default_attrs, .release = cpufreq_sysfs_release,};/** * cpufreq_add_dev - add a CPU device * * Adds the cpufreq interface for a CPU device. */static int cpufreq_add_dev (struct sys_device * sys_dev){ unsigned int cpu = sys_dev->id; int ret = 0; struct cpufreq_policy new_policy; struct cpufreq_policy *policy; struct freq_attr **drv_attr; struct sys_device *cpu_sys_dev; unsigned long flags; unsigned int j;#ifdef CONFIG_SMP struct cpufreq_policy *managed_policy;#endif if (cpu_is_offline(cpu)) return 0; cpufreq_debug_disable_ratelimit(); dprintk("adding CPU %u\n", cpu);#ifdef CONFIG_SMP /* check whether a different CPU already registered this * CPU because it is in the same boat. */ policy = cpufreq_cpu_get(cpu); if (unlikely(policy)) { cpufreq_cpu_put(policy); cpufreq_debug_enable_ratelimit(); return 0; }#endif if (!try_module_get(cpufreq_driver->owner)) { ret = -EINVAL; goto module_out; } policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); if (!policy) { ret = -ENOMEM; goto nomem_out; } policy->cpu = cpu; policy->cpus = cpumask_of_cpu(cpu); /* Initially set CPU itself as the policy_cpu */ per_cpu(policy_cpu, cpu) = cpu; lock_policy_rwsem_write(cpu); init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); /* Set governor before ->init, so that driver could check it */ policy->governor = CPUFREQ_DEFAULT_GOVERNOR; /* call driver. From then on the cpufreq must be able * to accept all calls to ->verify and ->setpolicy for this CPU */ ret = cpufreq_driver->init(policy); if (ret) { dprintk("initialization failed\n"); unlock_policy_rwsem_write(cpu); goto err_out; } policy->user_policy.min = policy->cpuinfo.min_freq; policy->user_policy.max = policy->cpuinfo.max_freq;#ifdef CONFIG_SMP#ifdef CONFIG_HOTPLUG_CPU if (cpufreq_cpu_governor[cpu]){ policy->governor = cpufreq_cpu_governor[cpu]; dprintk("Restoring governor %s for cpu %d\n", policy->governor->name, cpu); }#endif for_each_cpu_mask(j, policy->cpus) { if (cpu == j) continue; /* check for existing affected CPUs. They may not be aware * of it due to CPU Hotplug. */ managed_policy = cpufreq_cpu_get(j); if (unlikely(managed_policy)) { /* Set proper policy_cpu */ unlock_policy_rwsem_write(cpu); per_cpu(policy_cpu, cpu) = managed_policy->cpu; if (lock_policy_rwsem_write(cpu) < 0) goto err_out_driver_exit; spin_lock_irqsave(&cpufreq_driver_lock, flags); managed_policy->cpus = policy->cpus; cpufreq_cpu_data[cpu] = managed_policy; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); dprintk("CPU already managed, adding link\n"); ret = sysfs_create_link(&sys_dev->kobj, &managed_policy->kobj, "cpufreq"); if (ret) { unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; } cpufreq_debug_enable_ratelimit(); ret = 0; unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; /* call driver->exit() */ } }#endif memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); /* prepare interface data */ policy->kobj.parent = &sys_dev->kobj; policy->kobj.ktype = &ktype_cpufreq; kobject_set_name(&policy->kobj, "cpufreq"); ret = kobject_register(&policy->kobj); if (ret) { unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; } /* set up files for this cpu device */ drv_attr = cpufreq_driver->attr; while ((drv_attr) && (*drv_attr)) { ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); if (ret) { unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; } drv_attr++; } if (cpufreq_driver->get){ ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); if (ret) { unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; } } if (cpufreq_driver->target){ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); if (ret) { unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; } } spin_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu_mask(j, policy->cpus) { cpufreq_cpu_data[j] = policy; per_cpu(policy_cpu, j) = policy->cpu; } spin_unlock_irqrestore(&cpufreq_driver_lock, flags); /* symlink affected CPUs */ for_each_cpu_mask(j, policy->cpus) { if (j == cpu) continue; if (!cpu_online(j)) continue; dprintk("CPU %u already managed, adding link\n", j); cpufreq_cpu_get(cpu); cpu_sys_dev = get_cpu_sysdev(j); ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, "cpufreq"); if (ret) { unlock_policy_rwsem_write(cpu); goto err_out_unregister; } } policy->governor = NULL; /* to assure that the starting sequence is * run in cpufreq_set_policy */ /* set default policy */ ret = __cpufreq_set_policy(policy, &new_policy); policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; unlock_policy_rwsem_write(cpu); if (ret) { dprintk("setting policy failed\n"); goto err_out_unregister; } module_put(cpufreq_driver->owner); dprintk("initialization complete\n"); cpufreq_debug_enable_ratelimit(); return 0;err_out_unregister: spin_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu_mask(j, policy->cpus) cpufreq_cpu_data[j] = NULL; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); kobject_unregister(&policy->kobj); wait_for_completion(&policy->kobj_unregister);err_out_driver_exit: if (cpufreq_driver->exit) cpufreq_driver->exit(policy);err_out: kfree(policy);nomem_out: module_put(cpufreq_driver->owner);module_out: cpufreq_debug_enable_ratelimit(); return ret;}/** * __cpufreq_remove_dev - remove a CPU device * * Removes the cpufreq interface for a CPU device. * Caller should already have policy_rwsem in write mode for this CPU. * This routine frees the rwsem before returning. */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?