⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 intel_cacheinfo.c

📁 底层驱动开发
💻 C
📖 第 1 页 / 共 2 页
字号:
	unsigned long num_threads_sharing;#ifdef CONFIG_X86_HT	struct cpuinfo_x86 *c = cpu_data + cpu;#endif	this_leaf = CPUID4_INFO_IDX(cpu, index);	num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;	if (num_threads_sharing == 1)		cpu_set(cpu, this_leaf->shared_cpu_map);#ifdef CONFIG_X86_HT	else if (num_threads_sharing == smp_num_siblings)		this_leaf->shared_cpu_map = cpu_sibling_map[cpu];	else if (num_threads_sharing == (c->x86_num_cores * smp_num_siblings))		this_leaf->shared_cpu_map = cpu_core_map[cpu];	else		printk(KERN_DEBUG "Number of CPUs sharing cache didn't match "				"any known set of CPUs\n");#endif}#elsestatic void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}#endifstatic void free_cache_attributes(unsigned int cpu){	kfree(cpuid4_info[cpu]);	cpuid4_info[cpu] = NULL;}static int __devinit detect_cache_attributes(unsigned int cpu){	struct _cpuid4_info	*this_leaf;	unsigned long 		j;	int 			retval;	cpumask_t		oldmask;	if (num_cache_leaves == 0)		return -ENOENT;	cpuid4_info[cpu] = kmalloc(	    sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);	if (unlikely(cpuid4_info[cpu] == NULL))		return -ENOMEM;	memset(cpuid4_info[cpu], 0,	    sizeof(struct _cpuid4_info) * num_cache_leaves);	oldmask = current->cpus_allowed;	retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));	if (retval)		goto out;	/* Do cpuid and store the results */	retval = 0;	for (j = 0; j < num_cache_leaves; j++) {		this_leaf = CPUID4_INFO_IDX(cpu, j);		retval = cpuid4_cache_lookup(j, this_leaf);		if (unlikely(retval < 0))			break;		cache_shared_cpu_map_setup(cpu, j);	}	set_cpus_allowed(current, oldmask);out:	if (retval)		free_cache_attributes(cpu);	return retval;}#ifdef CONFIG_SYSFS#include <linux/kobject.h>#include <linux/sysfs.h>extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c *//* pointer to kobject for cpuX/cache */static struct kobject * cache_kobject[NR_CPUS];struct _index_kobject {	struct kobject kobj;	unsigned int cpu;	unsigned short index;};/* pointer to array of kobjects for cpuX/cache/indexY */static struct _index_kobject *index_kobject[NR_CPUS];#define INDEX_KOBJECT_PTR(x,y)    (&((index_kobject[x])[y]))#define show_one_plus(file_name, object, val)				\static ssize_t show_##file_name						\			(struct _cpuid4_info *this_leaf, char *buf)	\{									\	return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \}show_one_plus(level, eax.split.level, 0);show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf){	return sprintf (buf, "%luK\n", this_leaf->size / 1024);}static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf){	char mask_str[NR_CPUS];	cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);	return sprintf(buf, "%s\n", mask_str);}static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {	switch(this_leaf->eax.split.type) {	    case CACHE_TYPE_DATA:		return sprintf(buf, "Data\n");		break;	    case CACHE_TYPE_INST:		return sprintf(buf, "Instruction\n");		break;	    case CACHE_TYPE_UNIFIED:		return sprintf(buf, "Unified\n");		break;	    default:		return sprintf(buf, "Unknown\n");		break;	}}struct _cache_attr {	struct attribute attr;	ssize_t (*show)(struct _cpuid4_info *, char *);	ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);};#define define_one_ro(_name) \static struct _cache_attr _name = \	__ATTR(_name, 0444, show_##_name, NULL)define_one_ro(level);define_one_ro(type);define_one_ro(coherency_line_size);define_one_ro(physical_line_partition);define_one_ro(ways_of_associativity);define_one_ro(number_of_sets);define_one_ro(size);define_one_ro(shared_cpu_map);static struct attribute * default_attrs[] = {	&type.attr,	&level.attr,	&coherency_line_size.attr,	&physical_line_partition.attr,	&ways_of_associativity.attr,	&number_of_sets.attr,	&size.attr,	&shared_cpu_map.attr,	NULL};#define to_object(k) container_of(k, struct _index_kobject, kobj)#define to_attr(a) container_of(a, struct _cache_attr, attr)static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf){	struct _cache_attr *fattr = to_attr(attr);	struct _index_kobject *this_leaf = to_object(kobj);	ssize_t ret;	ret = fattr->show ?		fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),			buf) :	       	0;	return ret;}static ssize_t store(struct kobject * kobj, struct attribute * attr,		     const char * buf, size_t count){	return 0;}static struct sysfs_ops sysfs_ops = {	.show   = show,	.store  = store,};static struct kobj_type ktype_cache = {	.sysfs_ops	= &sysfs_ops,	.default_attrs	= default_attrs,};static struct kobj_type ktype_percpu_entry = {	.sysfs_ops	= &sysfs_ops,};static void cpuid4_cache_sysfs_exit(unsigned int cpu){	kfree(cache_kobject[cpu]);	kfree(index_kobject[cpu]);	cache_kobject[cpu] = NULL;	index_kobject[cpu] = NULL;	free_cache_attributes(cpu);}static int __devinit cpuid4_cache_sysfs_init(unsigned int cpu){	if (num_cache_leaves == 0)		return -ENOENT;	detect_cache_attributes(cpu);	if (cpuid4_info[cpu] == NULL)		return -ENOENT;	/* Allocate all required memory */	cache_kobject[cpu] = kmalloc(sizeof(struct kobject), GFP_KERNEL);	if (unlikely(cache_kobject[cpu] == NULL))		goto err_out;	memset(cache_kobject[cpu], 0, sizeof(struct kobject));	index_kobject[cpu] = kmalloc(	    sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);	if (unlikely(index_kobject[cpu] == NULL))		goto err_out;	memset(index_kobject[cpu], 0,	    sizeof(struct _index_kobject) * num_cache_leaves);	return 0;err_out:	cpuid4_cache_sysfs_exit(cpu);	return -ENOMEM;}/* Add/Remove cache interface for CPU device */static int __devinit cache_add_dev(struct sys_device * sys_dev){	unsigned int cpu = sys_dev->id;	unsigned long i, j;	struct _index_kobject *this_object;	int retval = 0;	retval = cpuid4_cache_sysfs_init(cpu);	if (unlikely(retval < 0))		return retval;	cache_kobject[cpu]->parent = &sys_dev->kobj;	kobject_set_name(cache_kobject[cpu], "%s", "cache");	cache_kobject[cpu]->ktype = &ktype_percpu_entry;	retval = kobject_register(cache_kobject[cpu]);	for (i = 0; i < num_cache_leaves; i++) {		this_object = INDEX_KOBJECT_PTR(cpu,i);		this_object->cpu = cpu;		this_object->index = i;		this_object->kobj.parent = cache_kobject[cpu];		kobject_set_name(&(this_object->kobj), "index%1lu", i);		this_object->kobj.ktype = &ktype_cache;		retval = kobject_register(&(this_object->kobj));		if (unlikely(retval)) {			for (j = 0; j < i; j++) {				kobject_unregister(					&(INDEX_KOBJECT_PTR(cpu,j)->kobj));			}			kobject_unregister(cache_kobject[cpu]);			cpuid4_cache_sysfs_exit(cpu);			break;		}	}	return retval;}static int __devexit cache_remove_dev(struct sys_device * sys_dev){	unsigned int cpu = sys_dev->id;	unsigned long i;	for (i = 0; i < num_cache_leaves; i++)		kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));	kobject_unregister(cache_kobject[cpu]);	cpuid4_cache_sysfs_exit(cpu);	return 0;}static struct sysdev_driver cache_sysdev_driver = {	.add = cache_add_dev,	.remove = __devexit_p(cache_remove_dev),};/* Register/Unregister the cpu_cache driver */static int __devinit cache_register_driver(void){	if (num_cache_leaves == 0)		return 0;	return sysdev_driver_register(&cpu_sysdev_class,&cache_sysdev_driver);}device_initcall(cache_register_driver);#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -