⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 main.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/*  Generic MTRR (Memory Type Range Register) driver.    Copyright (C) 1997-2000  Richard Gooch    Copyright (c) 2002	     Patrick Mochel    This library is free software; you can redistribute it and/or    modify it under the terms of the GNU Library General Public    License as published by the Free Software Foundation; either    version 2 of the License, or (at your option) any later version.    This library is distributed in the hope that it will be useful,    but WITHOUT ANY WARRANTY; without even the implied warranty of    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU    Library General Public License for more details.    You should have received a copy of the GNU Library General Public    License along with this library; if not, write to the Free    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au    The postal address is:      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.    Source: "Pentium Pro Family Developer's Manual, Volume 3:    Operating System Writer's Guide" (Intel document number 242692),    section 11.11.7    This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>     on 6-7 March 2002.     Source: Intel Architecture Software Developers Manual, Volume 3:     System Programming Guide; Section 9.11. (1997 edition - PPro).*/#include <linux/module.h>#include <linux/init.h>#include <linux/pci.h>#include <linux/smp.h>#include <linux/cpu.h>#include <linux/mutex.h>#include <asm/mtrr.h>#include <asm/uaccess.h>#include <asm/processor.h>#include <asm/msr.h>#include "mtrr.h"u32 num_var_ranges = 0;unsigned int *usage_table;static DEFINE_MUTEX(mtrr_mutex);u64 size_or_mask, size_and_mask;static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};struct mtrr_ops * mtrr_if = NULL;static void set_mtrr(unsigned int reg, unsigned long base,		     unsigned long size, mtrr_type type);#ifndef CONFIG_X86_64extern int arr3_protected;#else#define arr3_protected 0#endifvoid set_mtrr_ops(struct mtrr_ops * ops){	if (ops->vendor && ops->vendor < X86_VENDOR_NUM)		mtrr_ops[ops->vendor] = ops;}/*  Returns non-zero if we have the write-combining memory type  */static int have_wrcomb(void){	struct pci_dev *dev;	u8 rev;		if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {		/* ServerWorks LE chipsets < rev 6 have problems with write-combining		   Don't allow it and leave room for other chipsets to be tagged */		if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&		    dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {			pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);			if (rev <= 5) {				printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");				pci_dev_put(dev);				return 0;			}		}		/* Intel 450NX errata # 23. Non ascending cacheline evictions to		   write combining memory may resulting in data corruption */		if (dev->vendor == PCI_VENDOR_ID_INTEL &&		    dev->device == PCI_DEVICE_ID_INTEL_82451NX) {			printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");			pci_dev_put(dev);			return 0;		}		pci_dev_put(dev);	}			return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);}/*  This function returns the number of variable MTRRs  */static void __init set_num_var_ranges(void){	unsigned long config = 0, dummy;	if (use_intel()) {		rdmsr(MTRRcap_MSR, config, dummy);	} else if (is_cpu(AMD))		config = 2;	else if (is_cpu(CYRIX) || is_cpu(CENTAUR))		config = 8;	num_var_ranges = config & 0xff;}static void __init init_table(void){	int i, max;	max = num_var_ranges;	if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))	    == NULL) {		printk(KERN_ERR "mtrr: could not allocate\n");		return;	}	for (i = 0; i < max; i++)		usage_table[i] = 1;}struct set_mtrr_data {	atomic_t	count;	atomic_t	gate;	unsigned long	smp_base;	unsigned long	smp_size;	unsigned int	smp_reg;	mtrr_type	smp_type;};static void ipi_handler(void *info)/*  [SUMMARY] Synchronisation handler. Executed by "other" CPUs.    [RETURNS] Nothing.*/{#ifdef CONFIG_SMP	struct set_mtrr_data *data = info;	unsigned long flags;	local_irq_save(flags);	atomic_dec(&data->count);	while(!atomic_read(&data->gate))		cpu_relax();	/*  The master has cleared me to execute  */	if (data->smp_reg != ~0U) 		mtrr_if->set(data->smp_reg, data->smp_base, 			     data->smp_size, data->smp_type);	else		mtrr_if->set_all();	atomic_dec(&data->count);	while(atomic_read(&data->gate))		cpu_relax();	atomic_dec(&data->count);	local_irq_restore(flags);#endif}static inline int types_compatible(mtrr_type type1, mtrr_type type2) {	return type1 == MTRR_TYPE_UNCACHABLE ||	       type2 == MTRR_TYPE_UNCACHABLE ||	       (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||	       (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);}/** * set_mtrr - update mtrrs on all processors * @reg:	mtrr in question * @base:	mtrr base * @size:	mtrr size * @type:	mtrr type * * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: *  * 1. Send IPI to do the following: * 2. Disable Interrupts * 3. Wait for all procs to do so  * 4. Enter no-fill cache mode * 5. Flush caches * 6. Clear PGE bit * 7. Flush all TLBs * 8. Disable all range registers * 9. Update the MTRRs * 10. Enable all range registers * 11. Flush all TLBs and caches again * 12. Enter normal cache mode and reenable caching * 13. Set PGE  * 14. Wait for buddies to catch up * 15. Enable interrupts. *  * What does that mean for us? Well, first we set data.count to the number * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait * until it hits 0 and proceed. We set the data.gate flag and reset data.count. * Meanwhile, they are waiting for that flag to be set. Once it's set, each  * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it  * differently, so we call mtrr_if->set() callback and let them take care of it. * When they're done, they again decrement data->count and wait for data.gate to  * be reset.  * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. * Everyone then enables interrupts and we all continue on. * * Note that the mechanism is the same for UP systems, too; all the SMP stuff * becomes nops. */static void set_mtrr(unsigned int reg, unsigned long base,		     unsigned long size, mtrr_type type){	struct set_mtrr_data data;	unsigned long flags;	data.smp_reg = reg;	data.smp_base = base;	data.smp_size = size;	data.smp_type = type;	atomic_set(&data.count, num_booting_cpus() - 1);	/* make sure data.count is visible before unleashing other CPUs */	smp_wmb();	atomic_set(&data.gate,0);	/*  Start the ball rolling on other CPUs  */	if (smp_call_function(ipi_handler, &data, 1, 0) != 0)		panic("mtrr: timed out waiting for other CPUs\n");	local_irq_save(flags);	while(atomic_read(&data.count))		cpu_relax();	/* ok, reset count and toggle gate */	atomic_set(&data.count, num_booting_cpus() - 1);	smp_wmb();	atomic_set(&data.gate,1);	/* do our MTRR business */	/* HACK!	 * We use this same function to initialize the mtrrs on boot.	 * The state of the boot cpu's mtrrs has been saved, and we want	 * to replicate across all the APs. 	 * If we're doing that @reg is set to something special...	 */	if (reg != ~0U) 		mtrr_if->set(reg,base,size,type);	/* wait for the others */	while(atomic_read(&data.count))		cpu_relax();	atomic_set(&data.count, num_booting_cpus() - 1);	smp_wmb();	atomic_set(&data.gate,0);	/*	 * Wait here for everyone to have seen the gate change	 * So we're the last ones to touch 'data'	 */	while(atomic_read(&data.count))		cpu_relax();	local_irq_restore(flags);}/** *	mtrr_add_page - Add a memory type region *	@base: Physical base address of region in pages (in units of 4 kB!) *	@size: Physical size of region in pages (4 kB) *	@type: Type of MTRR desired *	@increment: If this is true do usage counting on the region * *	Memory type region registers control the caching on newer Intel and *	non Intel processors. This function allows drivers to request an *	MTRR is added. The details and hardware specifics of each processor's *	implementation are hidden from the caller, but nevertheless the  *	caller should expect to need to provide a power of two size on an *	equivalent power of two boundary. * *	If the region cannot be added either because all regions are in use *	or the CPU cannot support it a negative value is returned. On success *	the register number for this entry is returned, but should be treated *	as a cookie only. * *	On a multiprocessor machine the changes are made to all processors. *	This is required on x86 by the Intel processors. * *	The available types are * *	%MTRR_TYPE_UNCACHABLE	-	No caching * *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever * *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts * *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes * *	BUGS: Needs a quiet flag for the cases where drivers do not mind *	failures and do not wish system log messages to be sent. */int mtrr_add_page(unsigned long base, unsigned long size, 		  unsigned int type, char increment){	int i, replace, error;	mtrr_type ltype;	unsigned long lbase, lsize;	if (!mtrr_if)		return -ENXIO;			if ((error = mtrr_if->validate_add_page(base,size,type)))		return error;	if (type >= MTRR_NUM_TYPES) {		printk(KERN_WARNING "mtrr: type: %u invalid\n", type);		return -EINVAL;	}	/*  If the type is WC, check that this processor supports it  */	if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {		printk(KERN_WARNING		       "mtrr: your processor doesn't support write-combining\n");		return -ENOSYS;	}	if (!size) {		printk(KERN_WARNING "mtrr: zero sized request\n");		return -EINVAL;	}	if (base & size_or_mask || size & size_or_mask) {		printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");		return -EINVAL;	}	error = -EINVAL;	replace = -1;	/* No CPU hotplug when we change MTRR entries */	lock_cpu_hotplug();	/*  Search for existing MTRR  */	mutex_lock(&mtrr_mutex);	for (i = 0; i < num_var_ranges; ++i) {		mtrr_if->get(i, &lbase, &lsize, &ltype);		if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)			continue;		/*  At this point we know there is some kind of overlap/enclosure  */		if (base < lbase || base + size - 1 > lbase + lsize - 1) {			if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {				/*  New region encloses an existing region  */				if (type == ltype) {					replace = replace == -1 ? i : -2;					continue;				}				else if (types_compatible(type, ltype))					continue;			}			printk(KERN_WARNING			       "mtrr: 0x%lx000,0x%lx000 overlaps existing"			       " 0x%lx000,0x%lx000\n", base, size, lbase,			       lsize);			goto out;		}		/*  New region is enclosed by an existing region  */		if (ltype != type) {			if (types_compatible(type, ltype))				continue;			printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",			     base, size, mtrr_attrib_to_str(ltype),

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -