x86, amd: Support L3 Cache Partitioning on AMD family 0x15 CPUs
[pandora-kernel.git] / arch / x86 / kernel / amd_nb.c
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <asm/amd_nb.h>
12
13 static u32 *flush_words;
14
15 struct pci_device_id amd_nb_misc_ids[] = {
16         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
19         {}
20 };
21 EXPORT_SYMBOL(amd_nb_misc_ids);
22
23 static struct pci_device_id amd_nb_link_ids[] = {
24         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
25         {}
26 };
27
28 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
29         { 0x00, 0x18, 0x20 },
30         { 0xff, 0x00, 0x20 },
31         { 0xfe, 0x00, 0x20 },
32         { }
33 };
34
35 struct amd_northbridge_info amd_northbridges;
36 EXPORT_SYMBOL(amd_northbridges);
37
38 static struct pci_dev *next_northbridge(struct pci_dev *dev,
39                                         struct pci_device_id *ids)
40 {
41         do {
42                 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
43                 if (!dev)
44                         break;
45         } while (!pci_match_id(ids, dev));
46         return dev;
47 }
48
49 int amd_cache_northbridges(void)
50 {
51         int i = 0;
52         struct amd_northbridge *nb;
53         struct pci_dev *misc, *link;
54
55         if (amd_nb_num())
56                 return 0;
57
58         misc = NULL;
59         while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
60                 i++;
61
62         if (i == 0)
63                 return 0;
64
65         nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
66         if (!nb)
67                 return -ENOMEM;
68
69         amd_northbridges.nb = nb;
70         amd_northbridges.num = i;
71
72         link = misc = NULL;
73         for (i = 0; i != amd_nb_num(); i++) {
74                 node_to_amd_nb(i)->misc = misc =
75                         next_northbridge(misc, amd_nb_misc_ids);
76                 node_to_amd_nb(i)->link = link =
77                         next_northbridge(link, amd_nb_link_ids);
78         }
79
80         /* some CPU families (e.g. family 0x11) do not support GART */
81         if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
82             boot_cpu_data.x86 == 0x15)
83                 amd_northbridges.flags |= AMD_NB_GART;
84
85         /*
86          * Some CPU families support L3 Cache Index Disable. There are some
87          * limitations because of E382 and E388 on family 0x10.
88          */
89         if (boot_cpu_data.x86 == 0x10 &&
90             boot_cpu_data.x86_model >= 0x8 &&
91             (boot_cpu_data.x86_model > 0x9 ||
92              boot_cpu_data.x86_mask >= 0x1))
93                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
94
95         if (boot_cpu_data.x86 == 0x15)
96                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
97
98         /* L3 cache partitioning is supported on family 0x15 */
99         if (boot_cpu_data.x86 == 0x15)
100                 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102         return 0;
103 }
104 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106 /* Ignores subdevice/subvendor but as far as I can figure out
107    they're useless anyways */
108 int __init early_is_amd_nb(u32 device)
109 {
110         struct pci_device_id *id;
111         u32 vendor = device & 0xffff;
112         device >>= 16;
113         for (id = amd_nb_misc_ids; id->vendor; id++)
114                 if (vendor == id->vendor && device == id->device)
115                         return 1;
116         return 0;
117 }
118
119 int amd_get_subcaches(int cpu)
120 {
121         struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
122         unsigned int mask;
123         int cuid = 0;
124
125         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
126                 return 0;
127
128         pci_read_config_dword(link, 0x1d4, &mask);
129
130 #ifdef CONFIG_SMP
131         cuid = cpu_data(cpu).compute_unit_id;
132 #endif
133         return (mask >> (4 * cuid)) & 0xf;
134 }
135
136 int amd_set_subcaches(int cpu, int mask)
137 {
138         static unsigned int reset, ban;
139         struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
140         unsigned int reg;
141         int cuid = 0;
142
143         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
144                 return -EINVAL;
145
146         /* if necessary, collect reset state of L3 partitioning and BAN mode */
147         if (reset == 0) {
148                 pci_read_config_dword(nb->link, 0x1d4, &reset);
149                 pci_read_config_dword(nb->misc, 0x1b8, &ban);
150                 ban &= 0x180000;
151         }
152
153         /* deactivate BAN mode if any subcaches are to be disabled */
154         if (mask != 0xf) {
155                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
156                 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
157         }
158
159 #ifdef CONFIG_SMP
160         cuid = cpu_data(cpu).compute_unit_id;
161 #endif
162         mask <<= 4 * cuid;
163         mask |= (0xf ^ (1 << cuid)) << 26;
164
165         pci_write_config_dword(nb->link, 0x1d4, mask);
166
167         /* reset BAN mode if L3 partitioning returned to reset state */
168         pci_read_config_dword(nb->link, 0x1d4, &reg);
169         if (reg == reset) {
170                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
171                 reg &= ~0x180000;
172                 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
173         }
174
175         return 0;
176 }
177
178 int amd_cache_gart(void)
179 {
180        int i;
181
182        if (!amd_nb_has_feature(AMD_NB_GART))
183                return 0;
184
185        flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
186        if (!flush_words) {
187                amd_northbridges.flags &= ~AMD_NB_GART;
188                return -ENOMEM;
189        }
190
191        for (i = 0; i != amd_nb_num(); i++)
192                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
193                                      &flush_words[i]);
194
195        return 0;
196 }
197
198 void amd_flush_garts(void)
199 {
200         int flushed, i;
201         unsigned long flags;
202         static DEFINE_SPINLOCK(gart_lock);
203
204         if (!amd_nb_has_feature(AMD_NB_GART))
205                 return;
206
207         /* Avoid races between AGP and IOMMU. In theory it's not needed
208            but I'm not sure if the hardware won't lose flush requests
209            when another is pending. This whole thing is so expensive anyways
210            that it doesn't matter to serialize more. -AK */
211         spin_lock_irqsave(&gart_lock, flags);
212         flushed = 0;
213         for (i = 0; i < amd_nb_num(); i++) {
214                 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
215                                        flush_words[i] | 1);
216                 flushed++;
217         }
218         for (i = 0; i < amd_nb_num(); i++) {
219                 u32 w;
220                 /* Make sure the hardware actually executed the flush*/
221                 for (;;) {
222                         pci_read_config_dword(node_to_amd_nb(i)->misc,
223                                               0x9c, &w);
224                         if (!(w & 1))
225                                 break;
226                         cpu_relax();
227                 }
228         }
229         spin_unlock_irqrestore(&gart_lock, flags);
230         if (!flushed)
231                 printk("nothing to flush?\n");
232 }
233 EXPORT_SYMBOL_GPL(amd_flush_garts);
234
235 static __init int init_amd_nbs(void)
236 {
237         int err = 0;
238
239         err = amd_cache_northbridges();
240
241         if (err < 0)
242                 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
243
244         if (amd_cache_gart() < 0)
245                 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
246                        "GART support disabled.\n");
247
248         return err;
249 }
250
251 /* This has to go after the PCI subsystem */
252 fs_initcall(init_amd_nbs);