⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mtrr.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	break;      case MTRRIOC_SET_PAGE_ENTRY:	if ( !suser () ) return -EPERM;	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )	    return -EFAULT;	err = mtrr_add_page (sentry.base, sentry.size, sentry.type, 0);	if (err < 0) return err;	break;      case MTRRIOC_DEL_PAGE_ENTRY:	if ( !suser () ) return -EPERM;	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )	    return -EFAULT;	err = mtrr_file_del (sentry.base, sentry.size, file, 1);	if (err < 0) return err;	break;      case MTRRIOC_KILL_PAGE_ENTRY:	if ( !suser () ) return -EPERM;	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )	    return -EFAULT;	err = mtrr_del_page (-1, sentry.base, sentry.size);	if (err < 0) return err;	break;      case MTRRIOC_GET_PAGE_ENTRY:	if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )	    return -EFAULT;	if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;	(*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);	gentry.type = type;	if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )	     return -EFAULT;	break;    }    return 0;}   /*  End Function mtrr_ioctl  */static int mtrr_close (struct inode *ino, struct file *file){    int i, max;    unsigned int *fcount = file->private_data;    if (fcount == NULL) return 0;    lock_kernel();    max = get_num_var_ranges ();    for (i = 0; i < max; ++i)    {	while (fcount[i] > 0)	{	    if (mtrr_del (i, 0, 0) < 0) printk ("mtrr: reg %d not used\n", i);	    --fcount[i];	}    }    unlock_kernel();    kfree (fcount);    file->private_data = NULL;    return 0;}   /*  End Function mtrr_close  */static struct file_operations mtrr_fops ={    owner:	THIS_MODULE,    read:	mtrr_read,    write:	mtrr_write,    ioctl:	mtrr_ioctl,    release:	mtrr_close,};#  ifdef CONFIG_PROC_FSstatic struct proc_dir_entry *proc_root_mtrr;#  endif  /*  CONFIG_PROC_FS  */static devfs_handle_t devfs_handle;static void compute_ascii (void){    char factor;    int i, max;    mtrr_type type;    unsigned long base, size;    ascii_buf_bytes = 0;    max = get_num_var_ranges ();    for (i = 0; i < max; i++)    {	(*get_mtrr) (i, &base, &size, &type);	if (size == 0) usage_table[i] = 0;	else	{	    if (size < (0x100000 >> PAGE_SHIFT))	    {		/* less than 1MB */		factor = 'K';		size <<= PAGE_SHIFT - 10;	    }	    else	    {		factor = 'M';		size >>= 20 - PAGE_SHIFT;	    }	    sprintf		(ascii_buffer + ascii_buf_bytes,		 "reg%02i: base=0x%05lx000 (%4liMB), size=%4li%cB: %s, count=%d\n",		 i, base, base >> (20 - PAGE_SHIFT), size, factor,		 attrib_to_str (type), usage_table[i]);	    ascii_buf_bytes += strlen (ascii_buffer + ascii_buf_bytes);	}    }    devfs_set_file_size (devfs_handle, ascii_buf_bytes);#  ifdef CONFIG_PROC_FS    proc_root_mtrr->size = ascii_buf_bytes;#  endif  /*  CONFIG_PROC_FS  */}   /*  End Function compute_ascii  */#endif  /*  USERSPACE_INTERFACE  */EXPORT_SYMBOL(mtrr_add);EXPORT_SYMBOL(mtrr_del);#ifdef CONFIG_SMPtypedef struct{    unsigned long base;    unsigned long size;    mtrr_type type;} arr_state_t;arr_state_t arr_state[8] __initdata ={    {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL},    {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}};unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };static void __init cyrix_arr_init_secondary(void){    struct set_mtrr_context ctxt;    int i;    set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */     /* the CCRs are not contiguous */    for(i=0; i<4; i++) setCx86(CX86_CCR0 + i, ccr_state[i]);    for(   ; i<7; i++) setCx86(CX86_CCR4 + i, ccr_state[i]);    for(i=0; i<8; i++)      cyrix_set_arr_up(i,        arr_state[i].base, arr_state[i].size, arr_state[i].type, FALSE);    set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */}   /*  End Function cyrix_arr_init_secondary  */#endif/* * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection * with the SMM (System Management Mode) mode. So we need the following: * Check whether SMI_LOCK (CCR3 bit 0) is set *   if it is set, write a warning message: ARR3 cannot be changed! *     (it cannot be changed until the next processor reset) *   if it is reset, then we can change it, set all the needed bits: *   - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset) *   - disable access to SMM memory (CCR1 bit 2 reset) *   - disable SMM mode (CCR1 bit 1 reset) *   - disable write protection of ARR3 (CCR6 bit 1 reset) *   - (maybe) disable ARR3 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set) */static void __init cyrix_arr_init(void){    struct set_mtrr_context ctxt;    unsigned char ccr[7];    int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };#ifdef CONFIG_SMP    int i;#endif    set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */    /* Save all CCRs locally */    ccr[0] = getCx86 (CX86_CCR0);    ccr[1] = getCx86 (CX86_CCR1);    ccr[2] = getCx86 (CX86_CCR2);    ccr[3] = ctxt.ccr3;    ccr[4] = getCx86 (CX86_CCR4);    ccr[5] = getCx86 (CX86_CCR5);    ccr[6] = getCx86 (CX86_CCR6);    if (ccr[3] & 1)    {	ccrc[3] = 1;	arr3_protected = 1;    }    else    {	/* Disable SMM mode (bit 1), access to SMM memory (bit 2) and	 * access to SMM memory through ARR3 (bit 7).	 */	if (ccr[1] & 0x80) { ccr[1] &= 0x7f; ccrc[1] |= 0x80; }	if (ccr[1] & 0x04) { ccr[1] &= 0xfb; ccrc[1] |= 0x04; }	if (ccr[1] & 0x02) { ccr[1] &= 0xfd; ccrc[1] |= 0x02; }	arr3_protected = 0;	if (ccr[6] & 0x02) {	    ccr[6] &= 0xfd; ccrc[6] = 1; /* Disable write protection of ARR3 */	    setCx86 (CX86_CCR6, ccr[6]);	}	/* Disable ARR3. This is safe now that we disabled SMM. */	/* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */    }    /* If we changed CCR1 in memory, change it in the processor, too. */    if (ccrc[1]) setCx86 (CX86_CCR1, ccr[1]);    /* Enable ARR usage by the processor */    if (!(ccr[5] & 0x20))    {	ccr[5] |= 0x20; ccrc[5] = 1;	setCx86 (CX86_CCR5, ccr[5]);    }#ifdef CONFIG_SMP    for(i=0; i<7; i++) ccr_state[i] = ccr[i];    for(i=0; i<8; i++)      cyrix_get_arr(i,        &arr_state[i].base, &arr_state[i].size, &arr_state[i].type);#endif    set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */    if ( ccrc[5] ) printk ("mtrr: ARR usage was not enabled, enabled manually\n");    if ( ccrc[3] ) printk ("mtrr: ARR3 cannot be changed\n");/*    if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");    if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");    if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");*/    if ( ccrc[6] ) printk ("mtrr: ARR3 was write protected, unprotected\n");}   /*  End Function cyrix_arr_init  */static void __init centaur_mcr_init(void){    unsigned i;    struct set_mtrr_context ctxt;    set_mtrr_prepare (&ctxt);    /* Unfortunately, MCR's are read-only, so there is no way to     * find out what the bios might have done.     */    /* Clear all MCR's.     * This way we are sure that the centaur_mcr array contains the actual     * values. The disadvantage is that any BIOS tweaks are thus undone.     */    for (i = 0; i < 8; ++i)    {        centaur_mcr[i].high = 0;	centaur_mcr[i].low = 0;	wrmsr (0x110 + i , 0, 0);    }    /*  Throw the main write-combining switch...  */    wrmsr (0x120, 0x01f0001f, 0);    set_mtrr_done (&ctxt);}   /*  End Function centaur_mcr_init  */static int __init mtrr_setup(void){    if ( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ) {	/* Intel (P6) standard MTRRs */	mtrr_if = MTRR_IF_INTEL;	get_mtrr = intel_get_mtrr;	set_mtrr_up = intel_set_mtrr_up;	switch (boot_cpu_data.x86_vendor) {	case X86_VENDOR_AMD:		/* The original Athlon docs said that		   total addressable memory is 44 bits wide.		   It was not really clear whether its MTRRs		   follow this or not. (Read: 44 or 36 bits).		   However, "x86-64_overview.pdf" explicitly		   states that "previous implementations support		   36 bit MTRRs" and also provides a way to		   query the width (in bits) of the physical		   addressable memory on the Hammer family.		 */		if (boot_cpu_data.x86 == 7 && (cpuid_eax(0x80000000) >= 0x80000008)) {			u32	phys_addr;			phys_addr = cpuid_eax(0x80000008) & 0xff ;			size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);			size_and_mask = ~size_or_mask & 0xfff00000;			break;		}	default:		/* Intel, etc. */		size_or_mask  = 0xff000000; /* 36 bits */		size_and_mask = 0x00f00000;		break;	}    } else if ( test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ) {	/* Pre-Athlon (K6) AMD CPU MTRRs */	mtrr_if = MTRR_IF_AMD_K6;	get_mtrr = amd_get_mtrr;	set_mtrr_up = amd_set_mtrr_up;	size_or_mask  = 0xfff00000; /* 32 bits */	size_and_mask = 0;    } else if ( test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ) {	/* Cyrix ARRs */	mtrr_if = MTRR_IF_CYRIX_ARR;	get_mtrr = cyrix_get_arr;	set_mtrr_up = cyrix_set_arr_up;	get_free_region = cyrix_get_free_region;	cyrix_arr_init();	size_or_mask  = 0xfff00000; /* 32 bits */	size_and_mask = 0;    } else if ( test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) {	/* Centaur MCRs */	mtrr_if = MTRR_IF_CENTAUR_MCR;	get_mtrr = centaur_get_mcr;	set_mtrr_up = centaur_set_mcr_up;	centaur_mcr_init();	size_or_mask  = 0xfff00000; /* 32 bits */	size_and_mask = 0;    } else {	/* No supported MTRR interface */	mtrr_if = MTRR_IF_NONE;    }    printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n"	    "mtrr: detected mtrr type: %s\n",	    MTRR_VERSION, mtrr_if_name[mtrr_if]);    return (mtrr_if != MTRR_IF_NONE);}   /*  End Function mtrr_setup  */#ifdef CONFIG_SMPstatic volatile unsigned long smp_changes_mask __initdata = 0;static struct mtrr_state smp_mtrr_state __initdata = {0, 0};void __init mtrr_init_boot_cpu(void){    if ( !mtrr_setup () )	return;    if ( mtrr_if == MTRR_IF_INTEL ) {	/* Only for Intel MTRRs */	get_mtrr_state (&smp_mtrr_state);    }}   /*  End Function mtrr_init_boot_cpu  */static void __init intel_mtrr_init_secondary_cpu(void){    unsigned long mask, count;    struct set_mtrr_context ctxt;    /*  Note that this is not ideal, since the cache is only flushed/disabled	for this CPU while the MTRRs are changed, but changing this requires	more invasive changes to the way the kernel boots  */    set_mtrr_prepare (&ctxt);    mask = set_mtrr_state (&smp_mtrr_state, &ctxt);    set_mtrr_done (&ctxt);    /*  Use the atomic bitops to update the global mask  */    for (count = 0; count < sizeof mask * 8; ++count)    {	if (mask & 0x01) set_bit (count, &smp_changes_mask);	mask >>= 1;    }}   /*  End Function intel_mtrr_init_secondary_cpu  */void __init mtrr_init_secondary_cpu(void){    switch ( mtrr_if ) {    case MTRR_IF_INTEL:	/* Intel (P6) standard MTRRs */	intel_mtrr_init_secondary_cpu();	break;    case MTRR_IF_CYRIX_ARR:	/* This is _completely theoretical_!	 * I assume here that one day Cyrix will support Intel APIC.	 * In reality on non-Intel CPUs we won't even get to this routine.	 * Hopefully no one will plug two Cyrix processors in a dual P5 board.	 *  :-)	 */	cyrix_arr_init_secondary ();	break;    default:	/* I see no MTRRs I can support in SMP mode... */	printk ("mtrr: SMP support incomplete for this vendor\n");    }}   /*  End Function mtrr_init_secondary_cpu  */#endif  /*  CONFIG_SMP  */int __init mtrr_init(void){#ifdef CONFIG_SMP    /* mtrr_setup() should already have been called from mtrr_init_boot_cpu() */    if ( mtrr_if == MTRR_IF_INTEL ) {	finalize_mtrr_state (&smp_mtrr_state);	mtrr_state_warn (smp_changes_mask);    }#else    if ( !mtrr_setup() )	return 0;		/* MTRRs not supported? */#endif#ifdef CONFIG_PROC_FS    proc_root_mtrr = create_proc_entry ("mtrr", S_IWUSR | S_IRUGO, &proc_root);    proc_root_mtrr->owner = THIS_MODULE;    proc_root_mtrr->proc_fops = &mtrr_fops;#endif#ifdef CONFIG_DEVFS_FS    devfs_handle = devfs_register (NULL, "cpu/mtrr", DEVFS_FL_DEFAULT, 0, 0,				   S_IFREG | S_IRUGO | S_IWUSR,				   &mtrr_fops, NULL);#endif    init_table ();    return 0;}   /*  End Function

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -