x86, amd: Extend AMD northbridge caching code to support "Link Control" devices
[pandora-kernel.git] / arch / x86 / kernel / amd_nb.c
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <asm/amd_nb.h>
12
13 static u32 *flush_words;
14
15 struct pci_device_id amd_nb_misc_ids[] = {
16         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
19         {}
20 };
21 EXPORT_SYMBOL(amd_nb_misc_ids);
22
23 static struct pci_device_id amd_nb_link_ids[] = {
24         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
25         {}
26 };
27
28 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
29         { 0x00, 0x18, 0x20 },
30         { 0xff, 0x00, 0x20 },
31         { 0xfe, 0x00, 0x20 },
32         { }
33 };
34
35 struct amd_northbridge_info amd_northbridges;
36 EXPORT_SYMBOL(amd_northbridges);
37
38 static struct pci_dev *next_northbridge(struct pci_dev *dev,
39                                         struct pci_device_id *ids)
40 {
41         do {
42                 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
43                 if (!dev)
44                         break;
45         } while (!pci_match_id(ids, dev));
46         return dev;
47 }
48
49 int amd_cache_northbridges(void)
50 {
51         int i = 0;
52         struct amd_northbridge *nb;
53         struct pci_dev *misc, *link;
54
55         if (amd_nb_num())
56                 return 0;
57
58         misc = NULL;
59         while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
60                 i++;
61
62         if (i == 0)
63                 return 0;
64
65         nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
66         if (!nb)
67                 return -ENOMEM;
68
69         amd_northbridges.nb = nb;
70         amd_northbridges.num = i;
71
72         link = misc = NULL;
73         for (i = 0; i != amd_nb_num(); i++) {
74                 node_to_amd_nb(i)->misc = misc =
75                         next_northbridge(misc, amd_nb_misc_ids);
76                 node_to_amd_nb(i)->link = link =
77                         next_northbridge(link, amd_nb_link_ids);
78         }
79
80         /* some CPU families (e.g. family 0x11) do not support GART */
81         if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
82             boot_cpu_data.x86 == 0x15)
83                 amd_northbridges.flags |= AMD_NB_GART;
84
85         /*
86          * Some CPU families support L3 Cache Index Disable. There are some
87          * limitations because of E382 and E388 on family 0x10.
88          */
89         if (boot_cpu_data.x86 == 0x10 &&
90             boot_cpu_data.x86_model >= 0x8 &&
91             (boot_cpu_data.x86_model > 0x9 ||
92              boot_cpu_data.x86_mask >= 0x1))
93                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
94
95         if (boot_cpu_data.x86 == 0x15)
96                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
97
98         return 0;
99 }
100 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
101
102 /* Ignores subdevice/subvendor but as far as I can figure out
103    they're useless anyways */
104 int __init early_is_amd_nb(u32 device)
105 {
106         struct pci_device_id *id;
107         u32 vendor = device & 0xffff;
108         device >>= 16;
109         for (id = amd_nb_misc_ids; id->vendor; id++)
110                 if (vendor == id->vendor && device == id->device)
111                         return 1;
112         return 0;
113 }
114
115 int amd_cache_gart(void)
116 {
117        int i;
118
119        if (!amd_nb_has_feature(AMD_NB_GART))
120                return 0;
121
122        flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
123        if (!flush_words) {
124                amd_northbridges.flags &= ~AMD_NB_GART;
125                return -ENOMEM;
126        }
127
128        for (i = 0; i != amd_nb_num(); i++)
129                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
130                                      &flush_words[i]);
131
132        return 0;
133 }
134
135 void amd_flush_garts(void)
136 {
137         int flushed, i;
138         unsigned long flags;
139         static DEFINE_SPINLOCK(gart_lock);
140
141         if (!amd_nb_has_feature(AMD_NB_GART))
142                 return;
143
144         /* Avoid races between AGP and IOMMU. In theory it's not needed
145            but I'm not sure if the hardware won't lose flush requests
146            when another is pending. This whole thing is so expensive anyways
147            that it doesn't matter to serialize more. -AK */
148         spin_lock_irqsave(&gart_lock, flags);
149         flushed = 0;
150         for (i = 0; i < amd_nb_num(); i++) {
151                 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
152                                        flush_words[i] | 1);
153                 flushed++;
154         }
155         for (i = 0; i < amd_nb_num(); i++) {
156                 u32 w;
157                 /* Make sure the hardware actually executed the flush*/
158                 for (;;) {
159                         pci_read_config_dword(node_to_amd_nb(i)->misc,
160                                               0x9c, &w);
161                         if (!(w & 1))
162                                 break;
163                         cpu_relax();
164                 }
165         }
166         spin_unlock_irqrestore(&gart_lock, flags);
167         if (!flushed)
168                 printk("nothing to flush?\n");
169 }
170 EXPORT_SYMBOL_GPL(amd_flush_garts);
171
172 static __init int init_amd_nbs(void)
173 {
174         int err = 0;
175
176         err = amd_cache_northbridges();
177
178         if (err < 0)
179                 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
180
181         if (amd_cache_gart() < 0)
182                 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
183                        "GART support disabled.\n");
184
185         return err;
186 }
187
188 /* This has to go after the PCI subsystem */
189 fs_initcall(init_amd_nbs);