1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
36 #include <linux/types.h> /* FIXME: kvm_para.h needs this */
38 #include <linux/kvm_para.h>
39 #include <linux/uaccess.h>
40 #include <linux/module.h>
41 #include <linux/mutex.h>
42 #include <linux/init.h>
43 #include <linux/sort.h>
44 #include <linux/cpu.h>
45 #include <linux/pci.h>
46 #include <linux/smp.h>
48 #include <asm/processor.h>
57 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
58 static DEFINE_MUTEX(mtrr_mutex);
60 u64 size_or_mask, size_and_mask;
61 static bool mtrr_aps_delayed_init;
63 static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
65 const struct mtrr_ops *mtrr_if;
67 static void set_mtrr(unsigned int reg, unsigned long base,
68 unsigned long size, mtrr_type type);
70 void set_mtrr_ops(const struct mtrr_ops *ops)
72 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
73 mtrr_ops[ops->vendor] = ops;
76 /* Returns non-zero if we have the write-combining memory type */
77 static int have_wrcomb(void)
82 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
85 * ServerWorks LE chipsets < rev 6 have problems with
86 * write-combining. Don't allow it and leave room for other
87 * chipsets to be tagged
89 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
90 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
91 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
93 pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
99 * Intel 450NX errata # 23. Non ascending cacheline evictions to
100 * write combining memory may resulting in data corruption
102 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
103 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
104 pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
110 return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
113 /* This function returns the number of variable MTRRs */
114 static void __init set_num_var_ranges(void)
116 unsigned long config = 0, dummy;
119 rdmsr(MSR_MTRRcap, config, dummy);
120 else if (is_cpu(AMD))
122 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
125 num_var_ranges = config & 0xff;
128 static void __init init_table(void)
132 max = num_var_ranges;
133 for (i = 0; i < max; i++)
134 mtrr_usage_table[i] = 1;
137 struct set_mtrr_data {
140 unsigned long smp_base;
141 unsigned long smp_size;
142 unsigned int smp_reg;
147 * ipi_handler - Synchronisation handler. Executed by "other" CPUs.
148 * @info: pointer to mtrr configuration data
152 static void ipi_handler(void *info)
155 struct set_mtrr_data *data = info;
158 local_irq_save(flags);
160 atomic_dec(&data->count);
161 while (!atomic_read(&data->gate))
164 /* The master has cleared me to execute */
165 if (data->smp_reg != ~0U) {
166 mtrr_if->set(data->smp_reg, data->smp_base,
167 data->smp_size, data->smp_type);
168 } else if (mtrr_aps_delayed_init) {
170 * Initialize the MTRRs inaddition to the synchronisation.
175 atomic_dec(&data->count);
176 while (atomic_read(&data->gate))
179 atomic_dec(&data->count);
180 local_irq_restore(flags);
184 static inline int types_compatible(mtrr_type type1, mtrr_type type2)
186 return type1 == MTRR_TYPE_UNCACHABLE ||
187 type2 == MTRR_TYPE_UNCACHABLE ||
188 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
189 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
193 * set_mtrr - update mtrrs on all processors
194 * @reg: mtrr in question
199 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
201 * 1. Send IPI to do the following:
202 * 2. Disable Interrupts
203 * 3. Wait for all procs to do so
204 * 4. Enter no-fill cache mode
208 * 8. Disable all range registers
209 * 9. Update the MTRRs
210 * 10. Enable all range registers
211 * 11. Flush all TLBs and caches again
212 * 12. Enter normal cache mode and reenable caching
214 * 14. Wait for buddies to catch up
215 * 15. Enable interrupts.
217 * What does that mean for us? Well, first we set data.count to the number
218 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
219 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
220 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
221 * CPU goes through the transition of updating MTRRs.
222 * The CPU vendors may each do it differently,
223 * so we call mtrr_if->set() callback and let them take care of it.
224 * When they're done, they again decrement data->count and wait for data.gate
226 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
227 * Everyone then enables interrupts and we all continue on.
229 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
233 set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
235 struct set_mtrr_data data;
239 data.smp_base = base;
240 data.smp_size = size;
241 data.smp_type = type;
242 atomic_set(&data.count, num_booting_cpus() - 1);
244 /* Make sure data.count is visible before unleashing other CPUs */
246 atomic_set(&data.gate, 0);
248 /* Start the ball rolling on other CPUs */
249 if (smp_call_function(ipi_handler, &data, 0) != 0)
250 panic("mtrr: timed out waiting for other CPUs\n");
252 local_irq_save(flags);
254 while (atomic_read(&data.count))
257 /* Ok, reset count and toggle gate */
258 atomic_set(&data.count, num_booting_cpus() - 1);
260 atomic_set(&data.gate, 1);
262 /* Do our MTRR business */
266 * We use this same function to initialize the mtrrs on boot.
267 * The state of the boot cpu's mtrrs has been saved, and we want
268 * to replicate across all the APs.
269 * If we're doing that @reg is set to something special...
272 mtrr_if->set(reg, base, size, type);
273 else if (!mtrr_aps_delayed_init)
276 /* Wait for the others */
277 while (atomic_read(&data.count))
280 atomic_set(&data.count, num_booting_cpus() - 1);
282 atomic_set(&data.gate, 0);
285 * Wait here for everyone to have seen the gate change
286 * So we're the last ones to touch 'data'
288 while (atomic_read(&data.count))
291 local_irq_restore(flags);
295 * mtrr_add_page - Add a memory type region
296 * @base: Physical base address of region in pages (in units of 4 kB!)
297 * @size: Physical size of region in pages (4 kB)
298 * @type: Type of MTRR desired
299 * @increment: If this is true do usage counting on the region
301 * Memory type region registers control the caching on newer Intel and
302 * non Intel processors. This function allows drivers to request an
303 * MTRR is added. The details and hardware specifics of each processor's
304 * implementation are hidden from the caller, but nevertheless the
305 * caller should expect to need to provide a power of two size on an
306 * equivalent power of two boundary.
308 * If the region cannot be added either because all regions are in use
309 * or the CPU cannot support it a negative value is returned. On success
310 * the register number for this entry is returned, but should be treated
313 * On a multiprocessor machine the changes are made to all processors.
314 * This is required on x86 by the Intel processors.
316 * The available types are
318 * %MTRR_TYPE_UNCACHABLE - No caching
320 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
322 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
324 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
326 * BUGS: Needs a quiet flag for the cases where drivers do not mind
327 * failures and do not wish system log messages to be sent.
329 int mtrr_add_page(unsigned long base, unsigned long size,
330 unsigned int type, bool increment)
332 unsigned long lbase, lsize;
333 int i, replace, error;
339 error = mtrr_if->validate_add_page(base, size, type);
343 if (type >= MTRR_NUM_TYPES) {
344 pr_warning("mtrr: type: %u invalid\n", type);
348 /* If the type is WC, check that this processor supports it */
349 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
350 pr_warning("mtrr: your processor doesn't support write-combining\n");
355 pr_warning("mtrr: zero sized request\n");
359 if (base & size_or_mask || size & size_or_mask) {
360 pr_warning("mtrr: base or size exceeds the MTRR width\n");
367 /* No CPU hotplug when we change MTRR entries */
370 /* Search for existing MTRR */
371 mutex_lock(&mtrr_mutex);
372 for (i = 0; i < num_var_ranges; ++i) {
373 mtrr_if->get(i, &lbase, &lsize, <ype);
374 if (!lsize || base > lbase + lsize - 1 ||
375 base + size - 1 < lbase)
378 * At this point we know there is some kind of
381 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
383 base + size - 1 >= lbase + lsize - 1) {
384 /* New region encloses an existing region */
386 replace = replace == -1 ? i : -2;
388 } else if (types_compatible(type, ltype))
391 pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
392 " 0x%lx000,0x%lx000\n", base, size, lbase,
396 /* New region is enclosed by an existing region */
398 if (types_compatible(type, ltype))
400 pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
401 base, size, mtrr_attrib_to_str(ltype),
402 mtrr_attrib_to_str(type));
406 ++mtrr_usage_table[i];
410 /* Search for an empty MTRR */
411 i = mtrr_if->get_free_region(base, size, replace);
413 set_mtrr(i, base, size, type);
414 if (likely(replace < 0)) {
415 mtrr_usage_table[i] = 1;
417 mtrr_usage_table[i] = mtrr_usage_table[replace];
419 mtrr_usage_table[i]++;
420 if (unlikely(replace != i)) {
421 set_mtrr(replace, 0, 0, 0);
422 mtrr_usage_table[replace] = 0;
426 pr_info("mtrr: no more MTRRs available\n");
430 mutex_unlock(&mtrr_mutex);
435 static int mtrr_check(unsigned long base, unsigned long size)
437 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
438 pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
439 pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
447 * mtrr_add - Add a memory type region
448 * @base: Physical base address of region
449 * @size: Physical size of region
450 * @type: Type of MTRR desired
451 * @increment: If this is true do usage counting on the region
453 * Memory type region registers control the caching on newer Intel and
454 * non Intel processors. This function allows drivers to request an
455 * MTRR is added. The details and hardware specifics of each processor's
456 * implementation are hidden from the caller, but nevertheless the
457 * caller should expect to need to provide a power of two size on an
458 * equivalent power of two boundary.
460 * If the region cannot be added either because all regions are in use
461 * or the CPU cannot support it a negative value is returned. On success
462 * the register number for this entry is returned, but should be treated
465 * On a multiprocessor machine the changes are made to all processors.
466 * This is required on x86 by the Intel processors.
468 * The available types are
470 * %MTRR_TYPE_UNCACHABLE - No caching
472 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
474 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
476 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
478 * BUGS: Needs a quiet flag for the cases where drivers do not mind
479 * failures and do not wish system log messages to be sent.
481 int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
484 if (mtrr_check(base, size))
486 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
489 EXPORT_SYMBOL(mtrr_add);
492 * mtrr_del_page - delete a memory type region
493 * @reg: Register returned by mtrr_add
494 * @base: Physical base address
495 * @size: Size of region
497 * If register is supplied then base and size are ignored. This is
498 * how drivers should call it.
500 * Releases an MTRR region. If the usage count drops to zero the
501 * register is freed and the region returns to default state.
502 * On success the register is returned, on failure a negative error
505 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
509 unsigned long lbase, lsize;
515 max = num_var_ranges;
516 /* No CPU hotplug when we change MTRR entries */
518 mutex_lock(&mtrr_mutex);
520 /* Search for existing MTRR */
521 for (i = 0; i < max; ++i) {
522 mtrr_if->get(i, &lbase, &lsize, <ype);
523 if (lbase == base && lsize == size) {
529 pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
535 pr_warning("mtrr: register: %d too big\n", reg);
538 mtrr_if->get(reg, &lbase, &lsize, <ype);
540 pr_warning("mtrr: MTRR %d not used\n", reg);
543 if (mtrr_usage_table[reg] < 1) {
544 pr_warning("mtrr: reg: %d has count=0\n", reg);
547 if (--mtrr_usage_table[reg] < 1)
548 set_mtrr(reg, 0, 0, 0);
551 mutex_unlock(&mtrr_mutex);
557 * mtrr_del - delete a memory type region
558 * @reg: Register returned by mtrr_add
559 * @base: Physical base address
560 * @size: Size of region
562 * If register is supplied then base and size are ignored. This is
563 * how drivers should call it.
565 * Releases an MTRR region. If the usage count drops to zero the
566 * register is freed and the region returns to default state.
567 * On success the register is returned, on failure a negative error
570 int mtrr_del(int reg, unsigned long base, unsigned long size)
572 if (mtrr_check(base, size))
574 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
576 EXPORT_SYMBOL(mtrr_del);
580 * These should be called implicitly, but we can't yet until all the initcall
583 static void __init init_ifs(void)
585 #ifndef CONFIG_X86_64
592 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
593 * MTRR driver doesn't require this
601 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
603 static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
607 for (i = 0; i < num_var_ranges; i++) {
608 mtrr_if->get(i, &mtrr_value[i].lbase,
609 &mtrr_value[i].lsize,
610 &mtrr_value[i].ltype);
615 static int mtrr_restore(struct sys_device *sysdev)
619 for (i = 0; i < num_var_ranges; i++) {
620 if (mtrr_value[i].lsize) {
621 set_mtrr(i, mtrr_value[i].lbase,
623 mtrr_value[i].ltype);
631 static struct sysdev_driver mtrr_sysdev_driver = {
632 .suspend = mtrr_save,
633 .resume = mtrr_restore,
636 int __initdata changed_by_mtrr_cleanup;
639 * mtrr_bp_init - initialize mtrrs on the boot CPU
641 * This needs to be called early; before any of the other CPUs are
642 * initialized (i.e. before smp_init()).
645 void __init mtrr_bp_init(void)
654 mtrr_if = &generic_mtrr_ops;
655 size_or_mask = 0xff000000; /* 36 bits */
656 size_and_mask = 0x00f00000;
660 * This is an AMD specific MSR, but we assume(hope?) that
661 * Intel will implement it to when they extend the address
664 if (cpuid_eax(0x80000000) >= 0x80000008) {
665 phys_addr = cpuid_eax(0x80000008) & 0xff;
666 /* CPUID workaround for Intel 0F33/0F34 CPU */
667 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
668 boot_cpu_data.x86 == 0xF &&
669 boot_cpu_data.x86_model == 0x3 &&
670 (boot_cpu_data.x86_mask == 0x3 ||
671 boot_cpu_data.x86_mask == 0x4))
674 size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
675 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
676 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
677 boot_cpu_data.x86 == 6) {
679 * VIA C* family have Intel style MTRRs,
680 * but don't support PAE
682 size_or_mask = 0xfff00000; /* 32 bits */
687 switch (boot_cpu_data.x86_vendor) {
689 if (cpu_has_k6_mtrr) {
690 /* Pre-Athlon (K6) AMD CPU MTRRs */
691 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
692 size_or_mask = 0xfff00000; /* 32 bits */
696 case X86_VENDOR_CENTAUR:
697 if (cpu_has_centaur_mcr) {
698 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
699 size_or_mask = 0xfff00000; /* 32 bits */
703 case X86_VENDOR_CYRIX:
704 if (cpu_has_cyrix_arr) {
705 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
706 size_or_mask = 0xfff00000; /* 32 bits */
716 set_num_var_ranges();
721 if (mtrr_cleanup(phys_addr)) {
722 changed_by_mtrr_cleanup = 1;
729 void mtrr_ap_init(void)
731 if (!use_intel() || mtrr_aps_delayed_init)
734 * Ideally we should hold mtrr_mutex here to avoid mtrr entries
735 * changed, but this routine will be called in cpu boot time,
736 * holding the lock breaks it.
738 * This routine is called in two cases:
740 * 1. very earily time of software resume, when there absolutely
741 * isn't mtrr entry changes;
743 * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
744 * lock to prevent mtrr entry changes
746 set_mtrr(~0U, 0, 0, 0);
750 * Save current fixed-range MTRR state of the BSP
752 void mtrr_save_state(void)
754 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
757 void set_mtrr_aps_delayed_init(void)
762 mtrr_aps_delayed_init = true;
766 * MTRR initialization for all AP's
768 void mtrr_aps_init(void)
773 set_mtrr(~0U, 0, 0, 0);
774 mtrr_aps_delayed_init = false;
777 void mtrr_bp_restore(void)
785 static int __init mtrr_init_finialize(void)
791 if (!changed_by_mtrr_cleanup)
797 * The CPU has no MTRR and seems to not support SMP. They have
798 * specific drivers, we use a tricky method to support
799 * suspend/resume for them.
801 * TBD: is there any system with such CPU which supports
802 * suspend/resume? If no, we should remove the code.
804 sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver);
808 subsys_initcall(mtrr_init_finialize);