📄 intel_cacheinfo.c
字号:
if (new_l2) { l2 = new_l2;#ifdef CONFIG_X86_HT per_cpu(cpu_llc_id, cpu) = l2_id;#endif } if (new_l3) { l3 = new_l3;#ifdef CONFIG_X86_HT per_cpu(cpu_llc_id, cpu) = l3_id;#endif } if (trace) printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); else if ( l1i ) printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); if (l1d) printk(", L1 D cache: %dK\n", l1d); else printk("\n"); if (l2) printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); if (l3) printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); return l2;}/* pointer to _cpuid4_info array (for each cache leaf) */static struct _cpuid4_info *cpuid4_info[NR_CPUS];#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))#ifdef CONFIG_SMPstatic void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index){ struct _cpuid4_info *this_leaf, *sibling_leaf; unsigned long num_threads_sharing; int index_msb, i; struct cpuinfo_x86 *c = &cpu_data(cpu); this_leaf = CPUID4_INFO_IDX(cpu, index); num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; if (num_threads_sharing == 1) cpu_set(cpu, this_leaf->shared_cpu_map); else { index_msb = get_count_order(num_threads_sharing); for_each_online_cpu(i) { if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) { cpu_set(i, this_leaf->shared_cpu_map); if (i != cpu && cpuid4_info[i]) { sibling_leaf = CPUID4_INFO_IDX(i, index); cpu_set(cpu, sibling_leaf->shared_cpu_map); } } } }}static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index){ struct _cpuid4_info *this_leaf, *sibling_leaf; int sibling; this_leaf = CPUID4_INFO_IDX(cpu, index); for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { sibling_leaf = CPUID4_INFO_IDX(sibling, index); cpu_clear(cpu, sibling_leaf->shared_cpu_map); }}#elsestatic void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}#endifstatic void __cpuinit free_cache_attributes(unsigned int cpu){ int i; for (i = 0; i < num_cache_leaves; i++) cache_remove_shared_cpu_map(cpu, i); kfree(cpuid4_info[cpu]); cpuid4_info[cpu] = NULL;}static int __cpuinit detect_cache_attributes(unsigned int cpu){ struct _cpuid4_info *this_leaf; unsigned long j; int retval; cpumask_t oldmask; if (num_cache_leaves == 0) return -ENOENT; cpuid4_info[cpu] = kzalloc( sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); if (cpuid4_info[cpu] == NULL) return -ENOMEM; oldmask = current->cpus_allowed; retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); if (retval) goto out; /* Do cpuid and store the results */ for (j = 0; j < num_cache_leaves; j++) { this_leaf = CPUID4_INFO_IDX(cpu, j); retval = cpuid4_cache_lookup(j, this_leaf); if (unlikely(retval < 0)) { int i; for (i = 0; i < j; i++) cache_remove_shared_cpu_map(cpu, i); break; } cache_shared_cpu_map_setup(cpu, j); } set_cpus_allowed(current, oldmask);out: if (retval) { kfree(cpuid4_info[cpu]); cpuid4_info[cpu] = NULL; } return retval;}#ifdef CONFIG_SYSFS#include <linux/kobject.h>#include <linux/sysfs.h>extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c *//* pointer to kobject for cpuX/cache */static struct kobject * cache_kobject[NR_CPUS];struct _index_kobject { struct kobject kobj; unsigned int cpu; unsigned short index;};/* pointer to array of kobjects for cpuX/cache/indexY */static struct _index_kobject *index_kobject[NR_CPUS];#define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))#define show_one_plus(file_name, object, val) \static ssize_t show_##file_name \ (struct _cpuid4_info *this_leaf, char *buf) \{ \ return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \}show_one_plus(level, eax.split.level, 0);show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf){ return sprintf (buf, "%luK\n", this_leaf->size / 1024);}static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf){ char mask_str[NR_CPUS]; cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map); return sprintf(buf, "%s\n", mask_str);}static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { switch(this_leaf->eax.split.type) { case CACHE_TYPE_DATA: return sprintf(buf, "Data\n"); break; case CACHE_TYPE_INST: return sprintf(buf, "Instruction\n"); break; case CACHE_TYPE_UNIFIED: return sprintf(buf, "Unified\n"); break; default: return sprintf(buf, "Unknown\n"); break; }}struct _cache_attr { struct attribute attr; ssize_t (*show)(struct _cpuid4_info *, char *); ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);};#define define_one_ro(_name) \static struct _cache_attr _name = \ __ATTR(_name, 0444, show_##_name, NULL)define_one_ro(level);define_one_ro(type);define_one_ro(coherency_line_size);define_one_ro(physical_line_partition);define_one_ro(ways_of_associativity);define_one_ro(number_of_sets);define_one_ro(size);define_one_ro(shared_cpu_map);static struct attribute * default_attrs[] = { &type.attr, &level.attr, &coherency_line_size.attr, &physical_line_partition.attr, &ways_of_associativity.attr, &number_of_sets.attr, &size.attr, &shared_cpu_map.attr, NULL};#define to_object(k) container_of(k, struct _index_kobject, kobj)#define to_attr(a) container_of(a, struct _cache_attr, attr)static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf){ struct _cache_attr *fattr = to_attr(attr); struct _index_kobject *this_leaf = to_object(kobj); ssize_t ret; ret = fattr->show ? fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), buf) : 0; return ret;}static ssize_t store(struct kobject * kobj, struct attribute * attr, const char * buf, size_t count){ return 0;}static struct sysfs_ops sysfs_ops = { .show = show, .store = store,};static struct kobj_type ktype_cache = { .sysfs_ops = &sysfs_ops, .default_attrs = default_attrs,};static struct kobj_type ktype_percpu_entry = { .sysfs_ops = &sysfs_ops,};static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu){ kfree(cache_kobject[cpu]); kfree(index_kobject[cpu]); cache_kobject[cpu] = NULL; index_kobject[cpu] = NULL; free_cache_attributes(cpu);}static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu){ int err; if (num_cache_leaves == 0) return -ENOENT; err = detect_cache_attributes(cpu); if (err) return err; /* Allocate all required memory */ cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); if (unlikely(cache_kobject[cpu] == NULL)) goto err_out; index_kobject[cpu] = kzalloc( sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); if (unlikely(index_kobject[cpu] == NULL)) goto err_out; return 0;err_out: cpuid4_cache_sysfs_exit(cpu); return -ENOMEM;}static cpumask_t cache_dev_map = CPU_MASK_NONE;/* Add/Remove cache interface for CPU device */static int __cpuinit cache_add_dev(struct sys_device * sys_dev){ unsigned int cpu = sys_dev->id; unsigned long i, j; struct _index_kobject *this_object; int retval; retval = cpuid4_cache_sysfs_init(cpu); if (unlikely(retval < 0)) return retval; cache_kobject[cpu]->parent = &sys_dev->kobj; kobject_set_name(cache_kobject[cpu], "%s", "cache"); cache_kobject[cpu]->ktype = &ktype_percpu_entry; retval = kobject_register(cache_kobject[cpu]); if (retval < 0) { cpuid4_cache_sysfs_exit(cpu); return retval; } for (i = 0; i < num_cache_leaves; i++) { this_object = INDEX_KOBJECT_PTR(cpu,i); this_object->cpu = cpu; this_object->index = i; this_object->kobj.parent = cache_kobject[cpu]; kobject_set_name(&(this_object->kobj), "index%1lu", i); this_object->kobj.ktype = &ktype_cache; retval = kobject_register(&(this_object->kobj)); if (unlikely(retval)) { for (j = 0; j < i; j++) { kobject_unregister( &(INDEX_KOBJECT_PTR(cpu,j)->kobj)); } kobject_unregister(cache_kobject[cpu]); cpuid4_cache_sysfs_exit(cpu); break; } } if (!retval) cpu_set(cpu, cache_dev_map); return retval;}static void __cpuinit cache_remove_dev(struct sys_device * sys_dev){ unsigned int cpu = sys_dev->id; unsigned long i; if (cpuid4_info[cpu] == NULL) return; if (!cpu_isset(cpu, cache_dev_map)) return; cpu_clear(cpu, cache_dev_map); for (i = 0; i < num_cache_leaves; i++) kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); kobject_unregister(cache_kobject[cpu]); cpuid4_cache_sysfs_exit(cpu);}static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu){ unsigned int cpu = (unsigned long)hcpu; struct sys_device *sys_dev; sys_dev = get_cpu_sysdev(cpu); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: cache_add_dev(sys_dev); break; case CPU_DEAD: case CPU_DEAD_FROZEN: cache_remove_dev(sys_dev); break; } return NOTIFY_OK;}static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier ={ .notifier_call = cacheinfo_cpu_callback,};static int __cpuinit cache_sysfs_init(void){ int i; if (num_cache_leaves == 0) return 0; for_each_online_cpu(i) { int err; struct sys_device *sys_dev = get_cpu_sysdev(i); err = cache_add_dev(sys_dev); if (err) return err; } register_hotcpu_notifier(&cacheinfo_cpu_notifier); return 0;}device_initcall(cache_sysfs_init);#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -