1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/tlbflush.h>
16 struct mtrr_var_range *var_ranges;
17 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
18 unsigned char enabled;
19 unsigned char have_fixed;
23 static unsigned long smp_changes_mask;
24 static struct mtrr_state mtrr_state = {};
26 #undef MODULE_PARAM_PREFIX
27 #define MODULE_PARAM_PREFIX "mtrr."
29 static __initdata int mtrr_show;
30 module_param_named(show, mtrr_show, bool, 0);
32 /* Get the MSR pair relating to a var range */
34 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
36 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
37 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
41 get_fixed_ranges(mtrr_type * frs)
43 unsigned int *p = (unsigned int *) frs;
46 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
48 for (i = 0; i < 2; i++)
49 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
50 for (i = 0; i < 8; i++)
51 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
54 void mtrr_save_fixed_ranges(void *info)
56 get_fixed_ranges(mtrr_state.fixed_ranges);
59 static void __init print_fixed(unsigned base, unsigned step, const mtrr_type*types)
63 for (i = 0; i < 8; ++i, ++types, base += step)
64 printk(KERN_INFO "MTRR %05X-%05X %s\n", base, base + step - 1, mtrr_attrib_to_str(*types));
67 /* Grab all of the MTRR state for this CPU into *state */
68 void __init get_mtrr_state(void)
71 struct mtrr_var_range *vrs;
74 if (!mtrr_state.var_ranges) {
75 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
77 if (!mtrr_state.var_ranges)
80 vrs = mtrr_state.var_ranges;
82 rdmsr(MTRRcap_MSR, lo, dummy);
83 mtrr_state.have_fixed = (lo >> 8) & 1;
85 for (i = 0; i < num_var_ranges; i++)
86 get_mtrr_var_range(i, &vrs[i]);
87 if (mtrr_state.have_fixed)
88 get_fixed_ranges(mtrr_state.fixed_ranges);
90 rdmsr(MTRRdefType_MSR, lo, dummy);
91 mtrr_state.def_type = (lo & 0xff);
92 mtrr_state.enabled = (lo & 0xc00) >> 10;
97 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
98 if (mtrr_state.have_fixed) {
99 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
100 mtrr_state.enabled & 1 ? "en" : "dis");
101 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
102 for (i = 0; i < 2; ++i)
103 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
104 for (i = 0; i < 8; ++i)
105 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
107 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
108 mtrr_state.enabled & 2 ? "en" : "dis");
109 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
110 for (i = 0; i < num_var_ranges; ++i) {
111 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
112 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
115 mtrr_state.var_ranges[i].base_hi,
116 mtrr_state.var_ranges[i].base_lo >> 12,
118 mtrr_state.var_ranges[i].mask_hi,
119 mtrr_state.var_ranges[i].mask_lo >> 12,
120 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
122 printk(KERN_INFO "MTRR %u disabled\n", i);
127 /* Some BIOS's are fucked and don't set all MTRRs the same! */
128 void __init mtrr_state_warn(void)
130 unsigned long mask = smp_changes_mask;
134 if (mask & MTRR_CHANGE_MASK_FIXED)
135 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
136 if (mask & MTRR_CHANGE_MASK_VARIABLE)
137 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
138 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
139 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
140 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
141 printk(KERN_INFO "mtrr: corrected configuration.\n");
144 /* Doesn't attempt to pass an error out to MTRR users
145 because it's quite complicated in some cases and probably not
146 worth it because the best error handling is to ignore it. */
147 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
149 if (wrmsr_safe(msr, a, b) < 0)
151 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
152 smp_processor_id(), msr, a, b);
155 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
156 /* [SUMMARY] Get a free MTRR.
157 <base> The starting (base) address of the region.
158 <size> The size (in bytes) of the region.
159 [RETURNS] The index of the region on success, else -1 on error.
164 unsigned long lbase, lsize;
166 max = num_var_ranges;
167 if (replace_reg >= 0 && replace_reg < max)
169 for (i = 0; i < max; ++i) {
170 mtrr_if->get(i, &lbase, &lsize, <ype);
177 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
178 unsigned long *size, mtrr_type *type)
180 unsigned int mask_lo, mask_hi, base_lo, base_hi;
182 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
183 if ((mask_lo & 0x800) == 0) {
184 /* Invalid (i.e. free) range */
191 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
193 /* Work out the shifted address mask. */
194 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
195 | mask_lo >> PAGE_SHIFT;
197 /* This works correctly if size is a power of two, i.e. a
200 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
201 *type = base_lo & 0xff;
204 static int set_fixed_ranges(mtrr_type * frs)
206 unsigned int *p = (unsigned int *) frs;
211 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
212 if (p[0] != lo || p[1] != hi) {
213 mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
217 for (i = 0; i < 2; i++) {
218 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
219 if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
220 mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
226 for (i = 0; i < 8; i++) {
227 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
228 if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
229 mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
237 /* Set the MSR pair relating to a var range. Returns TRUE if
239 static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
244 rdmsr(MTRRphysBase_MSR(index), lo, hi);
245 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
246 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
247 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
248 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
252 rdmsr(MTRRphysMask_MSR(index), lo, hi);
254 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
255 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
256 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
257 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
263 static u32 deftype_lo, deftype_hi;
265 static unsigned long set_mtrr_state(void)
266 /* [SUMMARY] Set the MTRR state for this CPU.
267 <state> The MTRR state information to read.
268 <ctxt> Some relevant CPU context.
269 [NOTE] The CPU must already be in a safe state for MTRR changes.
270 [RETURNS] 0 if no changes made, else a mask indication what was changed.
274 unsigned long change_mask = 0;
276 for (i = 0; i < num_var_ranges; i++)
277 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
278 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
280 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
281 change_mask |= MTRR_CHANGE_MASK_FIXED;
283 /* Set_mtrr_restore restores the old value of MTRRdefType,
284 so to set it we fiddle with the saved value */
285 if ((deftype_lo & 0xff) != mtrr_state.def_type
286 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
287 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
288 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
295 static unsigned long cr4 = 0;
296 static DEFINE_SPINLOCK(set_atomicity_lock);
299 * Since we are disabling the cache don't allow any interrupts - they
300 * would run extremely slow and would only increase the pain. The caller must
301 * ensure that local interrupts are disabled and are reenabled after post_set()
305 static void prepare_set(void) __acquires(set_atomicity_lock)
309 /* Note that this is not ideal, since the cache is only flushed/disabled
310 for this CPU while the MTRRs are changed, but changing this requires
311 more invasive changes to the way the kernel boots */
313 spin_lock(&set_atomicity_lock);
315 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
316 cr0 = read_cr0() | 0x40000000; /* set CD flag */
320 /* Save value of CR4 and clear Page Global Enable (bit 7) */
323 write_cr4(cr4 & ~X86_CR4_PGE);
326 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
329 /* Save MTRR state */
330 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
332 /* Disable MTRRs, and set the default type to uncached */
333 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
336 static void post_set(void) __releases(set_atomicity_lock)
338 /* Flush TLBs (no need to flush caches - they are disabled) */
341 /* Intel (P6) standard MTRRs */
342 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
345 write_cr0(read_cr0() & 0xbfffffff);
347 /* Restore value of CR4 */
350 spin_unlock(&set_atomicity_lock);
353 static void generic_set_all(void)
355 unsigned long mask, count;
358 local_irq_save(flags);
361 /* Actually set the state */
362 mask = set_mtrr_state();
365 local_irq_restore(flags);
367 /* Use the atomic bitops to update the global mask */
368 for (count = 0; count < sizeof mask * 8; ++count) {
370 set_bit(count, &smp_changes_mask);
376 static void generic_set_mtrr(unsigned int reg, unsigned long base,
377 unsigned long size, mtrr_type type)
378 /* [SUMMARY] Set variable MTRR register on the local CPU.
379 <reg> The register to set.
380 <base> The base address of the region.
381 <size> The size of the region. If this is 0 the region is disabled.
382 <type> The type of the region.
383 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
389 struct mtrr_var_range *vr;
391 vr = &mtrr_state.var_ranges[reg];
393 local_irq_save(flags);
397 /* The invalid bit is kept in the mask, so we simply clear the
398 relevant mask register to disable a range. */
399 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
400 memset(vr, 0, sizeof(struct mtrr_var_range));
402 vr->base_lo = base << PAGE_SHIFT | type;
403 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
404 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
405 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
407 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
408 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
412 local_irq_restore(flags);
415 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
417 unsigned long lbase, last;
419 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
420 and not touch 0x70000000->0x7003FFFF */
421 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
422 boot_cpu_data.x86_model == 1 &&
423 boot_cpu_data.x86_mask <= 7) {
424 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
425 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
428 if (!(base + size < 0x70000 || base > 0x7003F) &&
429 (type == MTRR_TYPE_WRCOMB
430 || type == MTRR_TYPE_WRBACK)) {
431 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
437 printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
441 /* Check upper bits of base and last are equal and lower bits are 0
442 for base and 1 for last */
443 last = base + size - 1;
444 for (lbase = base; !(lbase & 1) && (last & 1);
445 lbase = lbase >> 1, last = last >> 1) ;
447 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
455 static int generic_have_wrcomb(void)
457 unsigned long config, dummy;
458 rdmsr(MTRRcap_MSR, config, dummy);
459 return (config & (1 << 10));
462 int positive_have_wrcomb(void)
467 /* generic structure...
469 struct mtrr_ops generic_mtrr_ops = {
471 .set_all = generic_set_all,
472 .get = generic_get_mtrr,
473 .get_free_region = generic_get_free_region,
474 .set = generic_set_mtrr,
475 .validate_add_page = generic_validate_add_page,
476 .have_wrcomb = generic_have_wrcomb,