2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <asm/amd_nb.h>
13 static u32 *flush_words;
15 struct pci_device_id amd_nb_misc_ids[] = {
16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
21 EXPORT_SYMBOL(amd_nb_misc_ids);
23 static struct pci_device_id amd_nb_link_ids[] = {
24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
28 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
35 struct amd_northbridge_info amd_northbridges;
36 EXPORT_SYMBOL(amd_northbridges);
38 static struct pci_dev *next_northbridge(struct pci_dev *dev,
39 struct pci_device_id *ids)
42 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
45 } while (!pci_match_id(ids, dev));
49 int amd_cache_northbridges(void)
52 struct amd_northbridge *nb;
53 struct pci_dev *misc, *link;
59 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
65 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
69 amd_northbridges.nb = nb;
70 amd_northbridges.num = i;
73 for (i = 0; i != amd_nb_num(); i++) {
74 node_to_amd_nb(i)->misc = misc =
75 next_northbridge(misc, amd_nb_misc_ids);
76 node_to_amd_nb(i)->link = link =
77 next_northbridge(link, amd_nb_link_ids);
80 /* some CPU families (e.g. family 0x11) do not support GART */
81 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
82 boot_cpu_data.x86 == 0x15)
83 amd_northbridges.flags |= AMD_NB_GART;
86 * Some CPU families support L3 Cache Index Disable. There are some
87 * limitations because of E382 and E388 on family 0x10.
89 if (boot_cpu_data.x86 == 0x10 &&
90 boot_cpu_data.x86_model >= 0x8 &&
91 (boot_cpu_data.x86_model > 0x9 ||
92 boot_cpu_data.x86_mask >= 0x1))
93 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
95 if (boot_cpu_data.x86 == 0x15)
96 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
100 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
102 /* Ignores subdevice/subvendor but as far as I can figure out
103 they're useless anyways */
104 int __init early_is_amd_nb(u32 device)
106 struct pci_device_id *id;
107 u32 vendor = device & 0xffff;
109 for (id = amd_nb_misc_ids; id->vendor; id++)
110 if (vendor == id->vendor && device == id->device)
115 int amd_cache_gart(void)
119 if (!amd_nb_has_feature(AMD_NB_GART))
122 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
124 amd_northbridges.flags &= ~AMD_NB_GART;
128 for (i = 0; i != amd_nb_num(); i++)
129 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
135 void amd_flush_garts(void)
139 static DEFINE_SPINLOCK(gart_lock);
141 if (!amd_nb_has_feature(AMD_NB_GART))
144 /* Avoid races between AGP and IOMMU. In theory it's not needed
145 but I'm not sure if the hardware won't lose flush requests
146 when another is pending. This whole thing is so expensive anyways
147 that it doesn't matter to serialize more. -AK */
148 spin_lock_irqsave(&gart_lock, flags);
150 for (i = 0; i < amd_nb_num(); i++) {
151 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
155 for (i = 0; i < amd_nb_num(); i++) {
157 /* Make sure the hardware actually executed the flush*/
159 pci_read_config_dword(node_to_amd_nb(i)->misc,
166 spin_unlock_irqrestore(&gart_lock, flags);
168 printk("nothing to flush?\n");
170 EXPORT_SYMBOL_GPL(amd_flush_garts);
172 static __init int init_amd_nbs(void)
176 err = amd_cache_northbridges();
179 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
181 if (amd_cache_gart() < 0)
182 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
183 "GART support disabled.\n");
188 /* This has to go after the PCI subsystem */
189 fs_initcall(init_amd_nbs);