2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <asm/amd_nb.h>
13 static u32 *flush_words;
15 const struct pci_device_id amd_nb_misc_ids[] = {
16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
21 EXPORT_SYMBOL(amd_nb_misc_ids);
23 static struct pci_device_id amd_nb_link_ids[] = {
24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
28 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
35 struct amd_northbridge_info amd_northbridges;
36 EXPORT_SYMBOL(amd_northbridges);
38 static struct pci_dev *next_northbridge(struct pci_dev *dev,
39 const struct pci_device_id *ids)
42 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
45 } while (!pci_match_id(ids, dev));
49 int amd_cache_northbridges(void)
52 struct amd_northbridge *nb;
53 struct pci_dev *misc, *link;
59 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
65 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
69 amd_northbridges.nb = nb;
70 amd_northbridges.num = i;
73 for (i = 0; i != amd_nb_num(); i++) {
74 node_to_amd_nb(i)->misc = misc =
75 next_northbridge(misc, amd_nb_misc_ids);
76 node_to_amd_nb(i)->link = link =
77 next_northbridge(link, amd_nb_link_ids);
80 /* some CPU families (e.g. family 0x11) do not support GART */
81 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
82 boot_cpu_data.x86 == 0x15)
83 amd_northbridges.flags |= AMD_NB_GART;
86 * Some CPU families support L3 Cache Index Disable. There are some
87 * limitations because of E382 and E388 on family 0x10.
89 if (boot_cpu_data.x86 == 0x10 &&
90 boot_cpu_data.x86_model >= 0x8 &&
91 (boot_cpu_data.x86_model > 0x9 ||
92 boot_cpu_data.x86_mask >= 0x1))
93 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
95 if (boot_cpu_data.x86 == 0x15)
96 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
98 /* L3 cache partitioning is supported on family 0x15 */
99 if (boot_cpu_data.x86 == 0x15)
100 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
104 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
106 /* Ignores subdevice/subvendor but as far as I can figure out
107 they're useless anyways */
108 int __init early_is_amd_nb(u32 device)
110 const struct pci_device_id *id;
111 u32 vendor = device & 0xffff;
114 for (id = amd_nb_misc_ids; id->vendor; id++)
115 if (vendor == id->vendor && device == id->device)
120 int amd_get_subcaches(int cpu)
122 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
126 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
129 pci_read_config_dword(link, 0x1d4, &mask);
132 cuid = cpu_data(cpu).compute_unit_id;
134 return (mask >> (4 * cuid)) & 0xf;
137 int amd_set_subcaches(int cpu, int mask)
139 static unsigned int reset, ban;
140 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
144 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
147 /* if necessary, collect reset state of L3 partitioning and BAN mode */
149 pci_read_config_dword(nb->link, 0x1d4, &reset);
150 pci_read_config_dword(nb->misc, 0x1b8, &ban);
154 /* deactivate BAN mode if any subcaches are to be disabled */
156 pci_read_config_dword(nb->misc, 0x1b8, ®);
157 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
161 cuid = cpu_data(cpu).compute_unit_id;
164 mask |= (0xf ^ (1 << cuid)) << 26;
166 pci_write_config_dword(nb->link, 0x1d4, mask);
168 /* reset BAN mode if L3 partitioning returned to reset state */
169 pci_read_config_dword(nb->link, 0x1d4, ®);
171 pci_read_config_dword(nb->misc, 0x1b8, ®);
173 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
179 int amd_cache_gart(void)
183 if (!amd_nb_has_feature(AMD_NB_GART))
186 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
188 amd_northbridges.flags &= ~AMD_NB_GART;
192 for (i = 0; i != amd_nb_num(); i++)
193 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
199 void amd_flush_garts(void)
203 static DEFINE_SPINLOCK(gart_lock);
205 if (!amd_nb_has_feature(AMD_NB_GART))
208 /* Avoid races between AGP and IOMMU. In theory it's not needed
209 but I'm not sure if the hardware won't lose flush requests
210 when another is pending. This whole thing is so expensive anyways
211 that it doesn't matter to serialize more. -AK */
212 spin_lock_irqsave(&gart_lock, flags);
214 for (i = 0; i < amd_nb_num(); i++) {
215 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
219 for (i = 0; i < amd_nb_num(); i++) {
221 /* Make sure the hardware actually executed the flush*/
223 pci_read_config_dword(node_to_amd_nb(i)->misc,
230 spin_unlock_irqrestore(&gart_lock, flags);
232 printk("nothing to flush?\n");
234 EXPORT_SYMBOL_GPL(amd_flush_garts);
236 static __init int init_amd_nbs(void)
240 err = amd_cache_northbridges();
243 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
245 if (amd_cache_gart() < 0)
246 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
247 "GART support disabled.\n");
252 /* This has to go after the PCI subsystem */
253 fs_initcall(init_amd_nbs);