⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 main.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 2 页
字号:
			     mtrr_attrib_to_str(type));			goto out;		}		if (increment)			++usage_table[i];		error = i;		goto out;	}	/*  Search for an empty MTRR  */	i = mtrr_if->get_free_region(base, size, replace);	if (i >= 0) {		set_mtrr(i, base, size, type);		if (likely(replace < 0))			usage_table[i] = 1;		else {			usage_table[i] = usage_table[replace] + !!increment;			if (unlikely(replace != i)) {				set_mtrr(replace, 0, 0, 0);				usage_table[replace] = 0;			}		}	} else		printk(KERN_INFO "mtrr: no more MTRRs available\n");	error = i; out:	mutex_unlock(&mtrr_mutex);	unlock_cpu_hotplug();	return error;}static int mtrr_check(unsigned long base, unsigned long size){	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {		printk(KERN_WARNING			"mtrr: size and base must be multiples of 4 kiB\n");		printk(KERN_DEBUG			"mtrr: size: 0x%lx  base: 0x%lx\n", size, base);		dump_stack();		return -1;	}	return 0;}/** *	mtrr_add - Add a memory type region *	@base: Physical base address of region *	@size: Physical size of region *	@type: Type of MTRR desired *	@increment: If this is true do usage counting on the region * *	Memory type region registers control the caching on newer Intel and *	non Intel processors. This function allows drivers to request an *	MTRR is added. The details and hardware specifics of each processor's *	implementation are hidden from the caller, but nevertheless the  *	caller should expect to need to provide a power of two size on an *	equivalent power of two boundary. * *	If the region cannot be added either because all regions are in use *	or the CPU cannot support it a negative value is returned. On success *	the register number for this entry is returned, but should be treated *	as a cookie only. * *	On a multiprocessor machine the changes are made to all processors. *	This is required on x86 by the Intel processors. * *	The available types are * *	%MTRR_TYPE_UNCACHABLE	-	No caching * *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever * *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts * *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes * *	BUGS: Needs a quiet flag for the cases where drivers do not mind *	failures and do not wish system log messages to be sent. */intmtrr_add(unsigned long base, unsigned long size, unsigned int type,	 char increment){	if (mtrr_check(base, size))		return -EINVAL;	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,			     increment);}/** *	mtrr_del_page - delete a memory type region *	@reg: Register returned by mtrr_add *	@base: Physical base address *	@size: Size of region * *	If register is supplied then base and size are ignored. This is *	how drivers should call it. * *	Releases an MTRR region. If the usage count drops to zero the  *	register is freed and the region returns to default state. *	On success the register is returned, on failure a negative error *	code. */int mtrr_del_page(int reg, unsigned long base, unsigned long size){	int i, max;	mtrr_type ltype;	unsigned long lbase, lsize;	int error = -EINVAL;	if (!mtrr_if)		return -ENXIO;	max = num_var_ranges;	/* No CPU hotplug when we change MTRR entries */	lock_cpu_hotplug();	mutex_lock(&mtrr_mutex);	if (reg < 0) {		/*  Search for existing MTRR  */		for (i = 0; i < max; ++i) {			mtrr_if->get(i, &lbase, &lsize, &ltype);			if (lbase == base && lsize == size) {				reg = i;				break;			}		}		if (reg < 0) {			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,			       size);			goto out;		}	}	if (reg >= max) {		printk(KERN_WARNING "mtrr: register: %d too big\n", reg);		goto out;	}	if (is_cpu(CYRIX) && !use_intel()) {		if ((reg == 3) && arr3_protected) {			printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");			goto out;		}	}	mtrr_if->get(reg, &lbase, &lsize, &ltype);	if (lsize < 1) {		printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);		goto out;	}	if (usage_table[reg] < 1) {		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);		goto out;	}	if (--usage_table[reg] < 1)		set_mtrr(reg, 0, 0, 0);	error = reg; out:	mutex_unlock(&mtrr_mutex);	unlock_cpu_hotplug();	return error;}/** *	mtrr_del - delete a memory type region *	@reg: Register returned by mtrr_add *	@base: Physical base address *	@size: Size of region * *	If register is supplied then base and size are ignored. This is *	how drivers should call it. * *	Releases an MTRR region. If the usage count drops to zero the  *	register is freed and the region returns to default state. *	On success the register is returned, on failure a negative error *	code. */intmtrr_del(int reg, unsigned long base, unsigned long size){	if (mtrr_check(base, size))		return -EINVAL;	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);}EXPORT_SYMBOL(mtrr_add);EXPORT_SYMBOL(mtrr_del);/* HACK ALERT! * These should be called implicitly, but we can't yet until all the initcall * stuff is done... */extern void amd_init_mtrr(void);extern void cyrix_init_mtrr(void);extern void centaur_init_mtrr(void);static void __init init_ifs(void){#ifndef CONFIG_X86_64	amd_init_mtrr();	cyrix_init_mtrr();	centaur_init_mtrr();#endif}/* The suspend/resume methods are only for CPU without MTRR. CPU using generic * MTRR driver doesn't require this */struct mtrr_value {	mtrr_type	ltype;	unsigned long	lbase;	unsigned long	lsize;};static struct mtrr_value * mtrr_state;static int mtrr_save(struct sys_device * sysdev, pm_message_t state){	int i;	int size = num_var_ranges * sizeof(struct mtrr_value);	mtrr_state = kzalloc(size,GFP_ATOMIC);	if (!mtrr_state)		return -ENOMEM;	for (i = 0; i < num_var_ranges; i++) {		mtrr_if->get(i,			     &mtrr_state[i].lbase,			     &mtrr_state[i].lsize,			     &mtrr_state[i].ltype);	}	return 0;}static int mtrr_restore(struct sys_device * sysdev){	int i;	for (i = 0; i < num_var_ranges; i++) {		if (mtrr_state[i].lsize) 			set_mtrr(i,				 mtrr_state[i].lbase,				 mtrr_state[i].lsize,				 mtrr_state[i].ltype);	}	kfree(mtrr_state);	return 0;}static struct sysdev_driver mtrr_sysdev_driver = {	.suspend	= mtrr_save,	.resume		= mtrr_restore,};/** * mtrr_bp_init - initialize mtrrs on the boot CPU * * This needs to be called early; before any of the other CPUs are  * initialized (i.e. before smp_init()). *  */void __init mtrr_bp_init(void){	init_ifs();	if (cpu_has_mtrr) {		mtrr_if = &generic_mtrr_ops;		size_or_mask = 0xff000000;	/* 36 bits */		size_and_mask = 0x00f00000;		/* This is an AMD specific MSR, but we assume(hope?) that		   Intel will implement it to when they extend the address		   bus of the Xeon. */		if (cpuid_eax(0x80000000) >= 0x80000008) {			u32 phys_addr;			phys_addr = cpuid_eax(0x80000008) & 0xff;			/* CPUID workaround for Intel 0F33/0F34 CPU */			if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&			    boot_cpu_data.x86 == 0xF &&			    boot_cpu_data.x86_model == 0x3 &&			    (boot_cpu_data.x86_mask == 0x3 ||			     boot_cpu_data.x86_mask == 0x4))				phys_addr = 36;			size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);			size_and_mask = ~size_or_mask & 0xfffff00000ULL;		} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&			   boot_cpu_data.x86 == 6) {			/* VIA C* family have Intel style MTRRs, but			   don't support PAE */			size_or_mask = 0xfff00000;	/* 32 bits */			size_and_mask = 0;		}	} else {		switch (boot_cpu_data.x86_vendor) {		case X86_VENDOR_AMD:			if (cpu_has_k6_mtrr) {				/* Pre-Athlon (K6) AMD CPU MTRRs */				mtrr_if = mtrr_ops[X86_VENDOR_AMD];				size_or_mask = 0xfff00000;	/* 32 bits */				size_and_mask = 0;			}			break;		case X86_VENDOR_CENTAUR:			if (cpu_has_centaur_mcr) {				mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];				size_or_mask = 0xfff00000;	/* 32 bits */				size_and_mask = 0;			}			break;		case X86_VENDOR_CYRIX:			if (cpu_has_cyrix_arr) {				mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];				size_or_mask = 0xfff00000;	/* 32 bits */				size_and_mask = 0;			}			break;		default:			break;		}	}	if (mtrr_if) {		set_num_var_ranges();		init_table();		if (use_intel())			get_mtrr_state();	}}void mtrr_ap_init(void){	unsigned long flags;	if (!mtrr_if || !use_intel())		return;	/*	 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,	 * but this routine will be called in cpu boot time, holding the lock	 * breaks it. This routine is called in two cases: 1.very earily time	 * of software resume, when there absolutely isn't mtrr entry changes;	 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to	 * prevent mtrr entry changes	 */	local_irq_save(flags);	mtrr_if->set_all();	local_irq_restore(flags);}/** * Save current fixed-range MTRR state of the BSP */void mtrr_save_state(void){	smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);}static int __init mtrr_init_finialize(void){	if (!mtrr_if)		return 0;	if (use_intel())		mtrr_state_warn();	else {		/* The CPUs haven't MTRR and seem to not support SMP. They have		 * specific drivers, we use a tricky method to support		 * suspend/resume for them.		 * TBD: is there any system with such CPU which supports		 * suspend/resume?  if no, we should remove the code.		 */		sysdev_driver_register(&cpu_sysdev_class,			&mtrr_sysdev_driver);	}	return 0;}subsys_initcall(mtrr_init_finialize);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -