📄 mtrr.c
字号:
unsigned long *p = (unsigned long *)frs; int changed = FALSE; int i; unsigned long lo, hi; rdmsr(MTRRfix64K_00000_MSR, lo, hi); if (p[0] != lo || p[1] != hi) { wrmsr (MTRRfix64K_00000_MSR, p[0], p[1]); changed = TRUE; } for (i = 0; i < 2; i++) { rdmsr (MTRRfix16K_80000_MSR + i, lo, hi); if (p[2 + i*2] != lo || p[3 + i*2] != hi) { wrmsr (MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]); changed = TRUE; } } for (i = 0; i < 8; i++) { rdmsr (MTRRfix4K_C0000_MSR + i, lo, hi); if (p[6 + i*2] != lo || p[7 + i*2] != hi) { wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]); changed = TRUE; } } return changed;} /* End Function set_fixed_ranges_testing */struct mtrr_state{ unsigned int num_var_ranges; struct mtrr_var_range *var_ranges; mtrr_type fixed_ranges[NUM_FIXED_RANGES]; unsigned char enabled; mtrr_type def_type;};/* Grab all of the MTRR state for this CPU into *state */static void __init get_mtrr_state(struct mtrr_state *state){ unsigned int nvrs, i; struct mtrr_var_range *vrs; unsigned long lo, dummy; nvrs = state->num_var_ranges = get_num_var_ranges(); vrs = state->var_ranges = kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL); if (vrs == NULL) nvrs = state->num_var_ranges = 0; for (i = 0; i < nvrs; i++) get_mtrr_var_range (i, &vrs[i]); get_fixed_ranges (state->fixed_ranges); rdmsr (MTRRdefType_MSR, lo, dummy); state->def_type = (lo & 0xff); state->enabled = (lo & 0xc00) >> 10;} /* End Function get_mtrr_state *//* Free resources associated with a struct mtrr_state */static void __init finalize_mtrr_state(struct mtrr_state *state){ if (state->var_ranges) kfree (state->var_ranges);} /* End Function finalize_mtrr_state */static unsigned long __init set_mtrr_state (struct mtrr_state *state, struct set_mtrr_context *ctxt)/* [SUMMARY] Set the MTRR state for this CPU. <state> The MTRR state information to read. <ctxt> Some relevant CPU context. [NOTE] The CPU must already be in a safe state for MTRR changes. [RETURNS] 0 if no changes made, else a mask indication what was changed.*/{ unsigned int i; unsigned long change_mask = 0; for (i = 0; i < state->num_var_ranges; i++) if ( set_mtrr_var_range_testing (i, &state->var_ranges[i]) ) change_mask |= MTRR_CHANGE_MASK_VARIABLE; if ( set_fixed_ranges_testing(state->fixed_ranges) ) change_mask |= MTRR_CHANGE_MASK_FIXED; /* Set_mtrr_restore restores the old value of MTRRdefType, so to set it we fiddle with the saved value */ if ( (ctxt->deftype_lo & 0xff) != state->def_type || ( (ctxt->deftype_lo & 0xc00) >> 10 ) != state->enabled) { ctxt->deftype_lo |= (state->def_type | state->enabled << 10); change_mask |= MTRR_CHANGE_MASK_DEFTYPE; } return change_mask;} /* End Function set_mtrr_state */static atomic_t undone_count;static volatile int wait_barrier_execute = FALSE;static volatile int wait_barrier_cache_enable = FALSE;struct set_mtrr_data{ unsigned long smp_base; unsigned long smp_size; unsigned int smp_reg; mtrr_type smp_type;};static void ipi_handler (void *info)/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. [RETURNS] Nothing.*/{ struct set_mtrr_data *data = info; struct set_mtrr_context ctxt; set_mtrr_prepare (&ctxt); /* Notify master that I've flushed and disabled my cache */ atomic_dec (&undone_count); while (wait_barrier_execute) barrier (); /* The master has cleared me to execute */ (*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size, data->smp_type, FALSE); /* Notify master CPU that I've executed the function */ atomic_dec (&undone_count); /* Wait for master to clear me to enable cache and return */ while (wait_barrier_cache_enable) barrier (); set_mtrr_done (&ctxt);} /* End Function ipi_handler */static void set_mtrr_smp (unsigned int reg, unsigned long base, unsigned long size, mtrr_type type){ struct set_mtrr_data data; struct set_mtrr_context ctxt; data.smp_reg = reg; data.smp_base = base; data.smp_size = size; data.smp_type = type; wait_barrier_execute = TRUE; wait_barrier_cache_enable = TRUE; atomic_set (&undone_count, smp_num_cpus - 1); /* Start the ball rolling on other CPUs */ if (smp_call_function (ipi_handler, &data, 1, 0) != 0) panic ("mtrr: timed out waiting for other CPUs\n"); /* Flush and disable the local CPU's cache */ set_mtrr_prepare (&ctxt); /* Wait for all other CPUs to flush and disable their caches */ while (atomic_read (&undone_count) > 0) barrier (); /* Set up for completion wait and then release other CPUs to change MTRRs*/ atomic_set (&undone_count, smp_num_cpus - 1); wait_barrier_execute = FALSE; (*set_mtrr_up) (reg, base, size, type, FALSE); /* Now wait for other CPUs to complete the function */ while (atomic_read (&undone_count) > 0) barrier (); /* Now all CPUs should have finished the function. Release the barrier to allow them to re-enable their caches and return from their interrupt, then enable the local cache and return */ wait_barrier_cache_enable = FALSE; set_mtrr_done (&ctxt);} /* End Function set_mtrr_smp *//* Some BIOS's are fucked and don't set all MTRRs the same! */static void __init mtrr_state_warn(unsigned long mask){ if (!mask) return; if (mask & MTRR_CHANGE_MASK_FIXED) printk ("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); if (mask & MTRR_CHANGE_MASK_VARIABLE) printk ("mtrr: your CPUs had inconsistent variable MTRR settings\n"); if (mask & MTRR_CHANGE_MASK_DEFTYPE) printk ("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); printk ("mtrr: probably your BIOS does not setup all CPUs\n");} /* End Function mtrr_state_warn */#endif /* CONFIG_SMP */static char *attrib_to_str (int x){ return (x <= 6) ? mtrr_strings[x] : "?";} /* End Function attrib_to_str */static void init_table (void){ int i, max; max = get_num_var_ranges (); if ( ( usage_table = kmalloc (max * sizeof *usage_table, GFP_KERNEL) ) == NULL ) { printk ("mtrr: could not allocate\n"); return; } for (i = 0; i < max; i++) usage_table[i] = 1;#ifdef USERSPACE_INTERFACE if ( ( ascii_buffer = kmalloc (max * LINE_SIZE, GFP_KERNEL) ) == NULL ) { printk ("mtrr: could not allocate\n"); return; } ascii_buf_bytes = 0; compute_ascii ();#endif} /* End Function init_table */static int generic_get_free_region (unsigned long base, unsigned long size)/* [SUMMARY] Get a free MTRR. <base> The starting (base) address of the region. <size> The size (in bytes) of the region. [RETURNS] The index of the region on success, else -1 on error.*/{ int i, max; mtrr_type ltype; unsigned long lbase, lsize; max = get_num_var_ranges (); for (i = 0; i < max; ++i) { (*get_mtrr) (i, &lbase, &lsize, <ype); if (lsize == 0) return i; } return -ENOSPC;} /* End Function generic_get_free_region */static int cyrix_get_free_region (unsigned long base, unsigned long size)/* [SUMMARY] Get a free ARR. <base> The starting (base) address of the region. <size> The size (in bytes) of the region. [RETURNS] The index of the region on success, else -1 on error.*/{ int i; mtrr_type ltype; unsigned long lbase, lsize; /* If we are to set up a region >32M then look at ARR7 immediately */ if (size > 0x2000) { cyrix_get_arr (7, &lbase, &lsize, <ype); if (lsize == 0) return 7; /* Else try ARR0-ARR6 first */ } else { for (i = 0; i < 7; i++) { cyrix_get_arr (i, &lbase, &lsize, <ype); if ((i == 3) && arr3_protected) continue; if (lsize == 0) return i; } /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */ cyrix_get_arr (i, &lbase, &lsize, <ype); if ((lsize == 0) && (size >= 0x40)) return i; } return -ENOSPC;} /* End Function cyrix_get_free_region */static int (*get_free_region) (unsigned long base, unsigned long size) = generic_get_free_region;/** * mtrr_add_page - Add a memory type region * @base: Physical base address of region in pages (4 KB) * @size: Physical size of region in pages (4 KB) * @type: Type of MTRR desired * @increment: If this is true do usage counting on the region * * Memory type region registers control the caching on newer Intel and * non Intel processors. This function allows drivers to request an * MTRR is added. The details and hardware specifics of each processor's * implementation are hidden from the caller, but nevertheless the * caller should expect to need to provide a power of two size on an * equivalent power of two boundary. * * If the region cannot be added either because all regions are in use * or the CPU cannot support it a negative value is returned. On success * the register number for this entry is returned, but should be treated * as a cookie only. * * On a multiprocessor machine the changes are made to all processors. * This is required on x86 by the Intel processors. * * The available types are * * %MTRR_TYPE_UNCACHEABLE - No caching * * %MTRR_TYPE_WRITEBACK - Write data back in bursts whenever * * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts * * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes * * BUGS: Needs a quiet flag for the cases where drivers do not mind * failures and do not wish system log messages to be sent. */int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, char increment){/* [SUMMARY] Add an MTRR entry. <base> The starting (base, in pages) address of the region. <size> The size of the region. (in pages) <type> The type of the new region. <increment> If true and the region already exists, the usage count will be incremented. [RETURNS] The MTRR register on success, else a negative number indicating the error code. [NOTE] This routine uses a spinlock.*/ int i, max; mtrr_type ltype; unsigned long lbase, lsize, last; switch ( mtrr_if ) { case MTRR_IF_NONE: return -ENXIO; /* No MTRRs whatsoever */ case MTRR_IF_AMD_K6: /* Apply the K6 block alignment and size rules In order o Uncached or gathering only o 128K or bigger block o Power of 2 block o base suitably aligned to the power */ if ( type > MTRR_TYPE_WRCOMB || size < (1 << (17-PAGE_SHIFT)) || (size & ~(size-1))-size || ( base & (size-1) ) ) return -EINVAL; break; case MTRR_IF_INTEL: /* For Intel PPro stepping <= 7, must be 4 MiB aligned */ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_mask <= 7 ) { if ( base & ((1 << (22-PAGE_SHIFT))-1) ) { printk (KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); return -EINVAL; } } /* Fall through */ case MTRR_IF_CYRIX_ARR: case MTRR_IF_CENTAUR_MCR: if ( mtrr_if == MTRR_IF_CENTAUR_MCR ) { if (type != MTRR_TYPE_WRCOMB) { printk (KERN_WARNING "mtrr: only write-combining is supported\n"); return -EINVAL; } } else if (base + size < 0x100) { printk (KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n", base, size); return -EINVAL; } /* Check upper bits of base and last are equal and lower bits are 0 for base and 1 for last */ last = base + size - 1; for (lbase = base; !(lbase & 1) && (last & 1); lbase = lbase >> 1, last = last >> 1); if (lbase != last) { printk (KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); return -EINVAL; } break; default: return -EINVAL; } if (type >= MTRR_NUM_TYPES) { printk ("mtrr: type: %u illegal\n", type); return -EINVAL; } /* If the type is WC, check that this processor supports it */ if ( (type == MTRR_TYPE_WRCOMB) && !have_wrcomb () ) { printk (KERN_WARNING "mtrr: your processor doesn't support write-combining\n"); return -ENOSYS; } if ( base & size_or_mask || size & size_or_mask ) { printk ("mtrr: base or size exceeds the MTRR width\n"); return -EINVAL; } increment = increment ? 1 : 0; max = get_num_var_ranges (); /* Search for existing MTRR */ down(&main_lock); for (i = 0; i < max; ++i) { (*get_mtrr) (i, &lbase, &lsize, <ype); if (base >= lbase + lsize) continue;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -