Merge branch 'x86-trampoline-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / arch / x86 / kernel / amd_nb.c
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <asm/amd_nb.h>
12
13 static u32 *flush_words;
14
15 const struct pci_device_id amd_nb_misc_ids[] = {
16         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
19         {}
20 };
21 EXPORT_SYMBOL(amd_nb_misc_ids);
22
23 static struct pci_device_id amd_nb_link_ids[] = {
24         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
25         {}
26 };
27
28 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
29         { 0x00, 0x18, 0x20 },
30         { 0xff, 0x00, 0x20 },
31         { 0xfe, 0x00, 0x20 },
32         { }
33 };
34
35 struct amd_northbridge_info amd_northbridges;
36 EXPORT_SYMBOL(amd_northbridges);
37
38 static struct pci_dev *next_northbridge(struct pci_dev *dev,
39                                         const struct pci_device_id *ids)
40 {
41         do {
42                 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
43                 if (!dev)
44                         break;
45         } while (!pci_match_id(ids, dev));
46         return dev;
47 }
48
49 int amd_cache_northbridges(void)
50 {
51         int i = 0;
52         struct amd_northbridge *nb;
53         struct pci_dev *misc, *link;
54
55         if (amd_nb_num())
56                 return 0;
57
58         misc = NULL;
59         while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
60                 i++;
61
62         if (i == 0)
63                 return 0;
64
65         nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
66         if (!nb)
67                 return -ENOMEM;
68
69         amd_northbridges.nb = nb;
70         amd_northbridges.num = i;
71
72         link = misc = NULL;
73         for (i = 0; i != amd_nb_num(); i++) {
74                 node_to_amd_nb(i)->misc = misc =
75                         next_northbridge(misc, amd_nb_misc_ids);
76                 node_to_amd_nb(i)->link = link =
77                         next_northbridge(link, amd_nb_link_ids);
78         }
79
80         /* some CPU families (e.g. family 0x11) do not support GART */
81         if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
82             boot_cpu_data.x86 == 0x15)
83                 amd_northbridges.flags |= AMD_NB_GART;
84
85         /*
86          * Some CPU families support L3 Cache Index Disable. There are some
87          * limitations because of E382 and E388 on family 0x10.
88          */
89         if (boot_cpu_data.x86 == 0x10 &&
90             boot_cpu_data.x86_model >= 0x8 &&
91             (boot_cpu_data.x86_model > 0x9 ||
92              boot_cpu_data.x86_mask >= 0x1))
93                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
94
95         if (boot_cpu_data.x86 == 0x15)
96                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
97
98         /* L3 cache partitioning is supported on family 0x15 */
99         if (boot_cpu_data.x86 == 0x15)
100                 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102         return 0;
103 }
104 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106 /* Ignores subdevice/subvendor but as far as I can figure out
107    they're useless anyways */
108 int __init early_is_amd_nb(u32 device)
109 {
110         const struct pci_device_id *id;
111         u32 vendor = device & 0xffff;
112
113         device >>= 16;
114         for (id = amd_nb_misc_ids; id->vendor; id++)
115                 if (vendor == id->vendor && device == id->device)
116                         return 1;
117         return 0;
118 }
119
120 int amd_get_subcaches(int cpu)
121 {
122         struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
123         unsigned int mask;
124         int cuid = 0;
125
126         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
127                 return 0;
128
129         pci_read_config_dword(link, 0x1d4, &mask);
130
131 #ifdef CONFIG_SMP
132         cuid = cpu_data(cpu).compute_unit_id;
133 #endif
134         return (mask >> (4 * cuid)) & 0xf;
135 }
136
137 int amd_set_subcaches(int cpu, int mask)
138 {
139         static unsigned int reset, ban;
140         struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
141         unsigned int reg;
142         int cuid = 0;
143
144         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
145                 return -EINVAL;
146
147         /* if necessary, collect reset state of L3 partitioning and BAN mode */
148         if (reset == 0) {
149                 pci_read_config_dword(nb->link, 0x1d4, &reset);
150                 pci_read_config_dword(nb->misc, 0x1b8, &ban);
151                 ban &= 0x180000;
152         }
153
154         /* deactivate BAN mode if any subcaches are to be disabled */
155         if (mask != 0xf) {
156                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
157                 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
158         }
159
160 #ifdef CONFIG_SMP
161         cuid = cpu_data(cpu).compute_unit_id;
162 #endif
163         mask <<= 4 * cuid;
164         mask |= (0xf ^ (1 << cuid)) << 26;
165
166         pci_write_config_dword(nb->link, 0x1d4, mask);
167
168         /* reset BAN mode if L3 partitioning returned to reset state */
169         pci_read_config_dword(nb->link, 0x1d4, &reg);
170         if (reg == reset) {
171                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
172                 reg &= ~0x180000;
173                 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
174         }
175
176         return 0;
177 }
178
179 int amd_cache_gart(void)
180 {
181        int i;
182
183        if (!amd_nb_has_feature(AMD_NB_GART))
184                return 0;
185
186        flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
187        if (!flush_words) {
188                amd_northbridges.flags &= ~AMD_NB_GART;
189                return -ENOMEM;
190        }
191
192        for (i = 0; i != amd_nb_num(); i++)
193                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
194                                      &flush_words[i]);
195
196        return 0;
197 }
198
199 void amd_flush_garts(void)
200 {
201         int flushed, i;
202         unsigned long flags;
203         static DEFINE_SPINLOCK(gart_lock);
204
205         if (!amd_nb_has_feature(AMD_NB_GART))
206                 return;
207
208         /* Avoid races between AGP and IOMMU. In theory it's not needed
209            but I'm not sure if the hardware won't lose flush requests
210            when another is pending. This whole thing is so expensive anyways
211            that it doesn't matter to serialize more. -AK */
212         spin_lock_irqsave(&gart_lock, flags);
213         flushed = 0;
214         for (i = 0; i < amd_nb_num(); i++) {
215                 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
216                                        flush_words[i] | 1);
217                 flushed++;
218         }
219         for (i = 0; i < amd_nb_num(); i++) {
220                 u32 w;
221                 /* Make sure the hardware actually executed the flush*/
222                 for (;;) {
223                         pci_read_config_dword(node_to_amd_nb(i)->misc,
224                                               0x9c, &w);
225                         if (!(w & 1))
226                                 break;
227                         cpu_relax();
228                 }
229         }
230         spin_unlock_irqrestore(&gart_lock, flags);
231         if (!flushed)
232                 printk("nothing to flush?\n");
233 }
234 EXPORT_SYMBOL_GPL(amd_flush_garts);
235
236 static __init int init_amd_nbs(void)
237 {
238         int err = 0;
239
240         err = amd_cache_northbridges();
241
242         if (err < 0)
243                 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
244
245         if (amd_cache_gart() < 0)
246                 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
247                        "GART support disabled.\n");
248
249         return err;
250 }
251
252 /* This has to go after the PCI subsystem */
253 fs_initcall(init_amd_nbs);