Merge branch 'for-rmk-fixes' of git://aeryn.fluff.org.uk/bjdooks/linux
[pandora-kernel.git] / drivers / char / agp / amd-k7-agp.c
1 /*
2  * AMD K7 AGPGART routines.
3  */
4
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/init.h>
8 #include <linux/agp_backend.h>
9 #include <linux/gfp.h>
10 #include <linux/page-flags.h>
11 #include <linux/mm.h>
12 #include "agp.h"
13
14 #define AMD_MMBASE      0x14
15 #define AMD_APSIZE      0xac
16 #define AMD_MODECNTL    0xb0
17 #define AMD_MODECNTL2   0xb2
18 #define AMD_GARTENABLE  0x02    /* In mmio region (16-bit register) */
19 #define AMD_ATTBASE     0x04    /* In mmio region (32-bit register) */
20 #define AMD_TLBFLUSH    0x0c    /* In mmio region (32-bit register) */
21 #define AMD_CACHEENTRY  0x10    /* In mmio region (32-bit register) */
22
23 static struct pci_device_id agp_amdk7_pci_table[];
24
25 struct amd_page_map {
26         unsigned long *real;
27         unsigned long __iomem *remapped;
28 };
29
30 static struct _amd_irongate_private {
31         volatile u8 __iomem *registers;
32         struct amd_page_map **gatt_pages;
33         int num_tables;
34 } amd_irongate_private;
35
36 static int amd_create_page_map(struct amd_page_map *page_map)
37 {
38         int i;
39
40         page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
41         if (page_map->real == NULL)
42                 return -ENOMEM;
43
44 #ifndef CONFIG_X86
45         SetPageReserved(virt_to_page(page_map->real));
46         global_cache_flush();
47         page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
48                                             PAGE_SIZE);
49         if (page_map->remapped == NULL) {
50                 ClearPageReserved(virt_to_page(page_map->real));
51                 free_page((unsigned long) page_map->real);
52                 page_map->real = NULL;
53                 return -ENOMEM;
54         }
55         global_cache_flush();
56 #else
57         set_memory_uc((unsigned long)page_map->real, 1);
58         page_map->remapped = page_map->real;
59 #endif
60
61         for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
62                 writel(agp_bridge->scratch_page, page_map->remapped+i);
63                 readl(page_map->remapped+i);    /* PCI Posting. */
64         }
65
66         return 0;
67 }
68
69 static void amd_free_page_map(struct amd_page_map *page_map)
70 {
71 #ifndef CONFIG_X86
72         iounmap(page_map->remapped);
73         ClearPageReserved(virt_to_page(page_map->real));
74 #else
75         set_memory_wb((unsigned long)page_map->real, 1);
76 #endif
77         free_page((unsigned long) page_map->real);
78 }
79
80 static void amd_free_gatt_pages(void)
81 {
82         int i;
83         struct amd_page_map **tables;
84         struct amd_page_map *entry;
85
86         tables = amd_irongate_private.gatt_pages;
87         for (i = 0; i < amd_irongate_private.num_tables; i++) {
88                 entry = tables[i];
89                 if (entry != NULL) {
90                         if (entry->real != NULL)
91                                 amd_free_page_map(entry);
92                         kfree(entry);
93                 }
94         }
95         kfree(tables);
96         amd_irongate_private.gatt_pages = NULL;
97 }
98
99 static int amd_create_gatt_pages(int nr_tables)
100 {
101         struct amd_page_map **tables;
102         struct amd_page_map *entry;
103         int retval = 0;
104         int i;
105
106         tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL);
107         if (tables == NULL)
108                 return -ENOMEM;
109
110         for (i = 0; i < nr_tables; i++) {
111                 entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL);
112                 tables[i] = entry;
113                 if (entry == NULL) {
114                         retval = -ENOMEM;
115                         break;
116                 }
117                 retval = amd_create_page_map(entry);
118                 if (retval != 0)
119                         break;
120         }
121         amd_irongate_private.num_tables = i;
122         amd_irongate_private.gatt_pages = tables;
123
124         if (retval != 0)
125                 amd_free_gatt_pages();
126
127         return retval;
128 }
129
130 /* Since we don't need contiguous memory we just try
131  * to get the gatt table once
132  */
133
134 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
135 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
136         GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
137 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
138 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
139         GET_PAGE_DIR_IDX(addr)]->remapped)
140
141 static int amd_create_gatt_table(struct agp_bridge_data *bridge)
142 {
143         struct aper_size_info_lvl2 *value;
144         struct amd_page_map page_dir;
145         unsigned long addr;
146         int retval;
147         u32 temp;
148         int i;
149
150         value = A_SIZE_LVL2(agp_bridge->current_size);
151         retval = amd_create_page_map(&page_dir);
152         if (retval != 0)
153                 return retval;
154
155         retval = amd_create_gatt_pages(value->num_entries / 1024);
156         if (retval != 0) {
157                 amd_free_page_map(&page_dir);
158                 return retval;
159         }
160
161         agp_bridge->gatt_table_real = (u32 *)page_dir.real;
162         agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
163         agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
164
165         /* Get the address for the gart region.
166          * This is a bus address even on the alpha, b/c its
167          * used to program the agp master not the cpu
168          */
169
170         pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
171         addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
172         agp_bridge->gart_bus_addr = addr;
173
174         /* Calculate the agp offset */
175         for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
176                 writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1,
177                         page_dir.remapped+GET_PAGE_DIR_OFF(addr));
178                 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr));        /* PCI Posting. */
179         }
180
181         return 0;
182 }
183
184 static int amd_free_gatt_table(struct agp_bridge_data *bridge)
185 {
186         struct amd_page_map page_dir;
187
188         page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
189         page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
190
191         amd_free_gatt_pages();
192         amd_free_page_map(&page_dir);
193         return 0;
194 }
195
196 static int amd_irongate_fetch_size(void)
197 {
198         int i;
199         u32 temp;
200         struct aper_size_info_lvl2 *values;
201
202         pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
203         temp = (temp & 0x0000000e);
204         values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
205         for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
206                 if (temp == values[i].size_value) {
207                         agp_bridge->previous_size =
208                             agp_bridge->current_size = (void *) (values + i);
209
210                         agp_bridge->aperture_size_idx = i;
211                         return values[i].size;
212                 }
213         }
214
215         return 0;
216 }
217
218 static int amd_irongate_configure(void)
219 {
220         struct aper_size_info_lvl2 *current_size;
221         u32 temp;
222         u16 enable_reg;
223
224         current_size = A_SIZE_LVL2(agp_bridge->current_size);
225
226         if (!amd_irongate_private.registers) {
227                 /* Get the memory mapped registers */
228                 pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp);
229                 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
230                 amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
231                 if (!amd_irongate_private.registers)
232                         return -ENOMEM;
233         }
234
235         /* Write out the address of the gatt table */
236         writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE);
237         readl(amd_irongate_private.registers+AMD_ATTBASE);      /* PCI Posting. */
238
239         /* Write the Sync register */
240         pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80);
241
242         /* Set indexing mode */
243         pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00);
244
245         /* Write the enable register */
246         enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
247         enable_reg = (enable_reg | 0x0004);
248         writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
249         readw(amd_irongate_private.registers+AMD_GARTENABLE);   /* PCI Posting. */
250
251         /* Write out the size register */
252         pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
253         temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1);
254         pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
255
256         /* Flush the tlb */
257         writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
258         readl(amd_irongate_private.registers+AMD_TLBFLUSH);     /* PCI Posting.*/
259         return 0;
260 }
261
262 static void amd_irongate_cleanup(void)
263 {
264         struct aper_size_info_lvl2 *previous_size;
265         u32 temp;
266         u16 enable_reg;
267
268         previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
269
270         enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
271         enable_reg = (enable_reg & ~(0x0004));
272         writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
273         readw(amd_irongate_private.registers+AMD_GARTENABLE);   /* PCI Posting. */
274
275         /* Write back the previous size and disable gart translation */
276         pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
277         temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
278         pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
279         iounmap((void __iomem *) amd_irongate_private.registers);
280 }
281
282 /*
283  * This routine could be implemented by taking the addresses
284  * written to the GATT, and flushing them individually.  However
285  * currently it just flushes the whole table.  Which is probably
286  * more efficent, since agp_memory blocks can be a large number of
287  * entries.
288  */
289
290 static void amd_irongate_tlbflush(struct agp_memory *temp)
291 {
292         writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
293         readl(amd_irongate_private.registers+AMD_TLBFLUSH);     /* PCI Posting. */
294 }
295
296 static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
297 {
298         int i, j, num_entries;
299         unsigned long __iomem *cur_gatt;
300         unsigned long addr;
301
302         num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
303
304         if (type != 0 || mem->type != 0)
305                 return -EINVAL;
306
307         if ((pg_start + mem->page_count) > num_entries)
308                 return -EINVAL;
309
310         j = pg_start;
311         while (j < (pg_start + mem->page_count)) {
312                 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
313                 cur_gatt = GET_GATT(addr);
314                 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
315                         return -EBUSY;
316                 j++;
317         }
318
319         if (!mem->is_flushed) {
320                 global_cache_flush();
321                 mem->is_flushed = true;
322         }
323
324         for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
325                 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
326                 cur_gatt = GET_GATT(addr);
327                 writel(agp_generic_mask_memory(agp_bridge,
328                         mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
329                 readl(cur_gatt+GET_GATT_OFF(addr));     /* PCI Posting. */
330         }
331         amd_irongate_tlbflush(mem);
332         return 0;
333 }
334
335 static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
336 {
337         int i;
338         unsigned long __iomem *cur_gatt;
339         unsigned long addr;
340
341         if (type != 0 || mem->type != 0)
342                 return -EINVAL;
343
344         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
345                 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
346                 cur_gatt = GET_GATT(addr);
347                 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
348                 readl(cur_gatt+GET_GATT_OFF(addr));     /* PCI Posting. */
349         }
350
351         amd_irongate_tlbflush(mem);
352         return 0;
353 }
354
355 static const struct aper_size_info_lvl2 amd_irongate_sizes[7] =
356 {
357         {2048, 524288, 0x0000000c},
358         {1024, 262144, 0x0000000a},
359         {512, 131072, 0x00000008},
360         {256, 65536, 0x00000006},
361         {128, 32768, 0x00000004},
362         {64, 16384, 0x00000002},
363         {32, 8192, 0x00000000}
364 };
365
366 static const struct gatt_mask amd_irongate_masks[] =
367 {
368         {.mask = 1, .type = 0}
369 };
370
371 static const struct agp_bridge_driver amd_irongate_driver = {
372         .owner                  = THIS_MODULE,
373         .aperture_sizes         = amd_irongate_sizes,
374         .size_type              = LVL2_APER_SIZE,
375         .num_aperture_sizes     = 7,
376         .configure              = amd_irongate_configure,
377         .fetch_size             = amd_irongate_fetch_size,
378         .cleanup                = amd_irongate_cleanup,
379         .tlb_flush              = amd_irongate_tlbflush,
380         .mask_memory            = agp_generic_mask_memory,
381         .masks                  = amd_irongate_masks,
382         .agp_enable             = agp_generic_enable,
383         .cache_flush            = global_cache_flush,
384         .create_gatt_table      = amd_create_gatt_table,
385         .free_gatt_table        = amd_free_gatt_table,
386         .insert_memory          = amd_insert_memory,
387         .remove_memory          = amd_remove_memory,
388         .alloc_by_type          = agp_generic_alloc_by_type,
389         .free_by_type           = agp_generic_free_by_type,
390         .agp_alloc_page         = agp_generic_alloc_page,
391         .agp_alloc_pages        = agp_generic_alloc_pages,
392         .agp_destroy_page       = agp_generic_destroy_page,
393         .agp_destroy_pages      = agp_generic_destroy_pages,
394         .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
395 };
396
397 static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
398 {
399         {
400                 .device_id      = PCI_DEVICE_ID_AMD_FE_GATE_7006,
401                 .chipset_name   = "Irongate",
402         },
403         {
404                 .device_id      = PCI_DEVICE_ID_AMD_FE_GATE_700E,
405                 .chipset_name   = "761",
406         },
407         {
408                 .device_id      = PCI_DEVICE_ID_AMD_FE_GATE_700C,
409                 .chipset_name   = "760MP",
410         },
411         { }, /* dummy final entry, always present */
412 };
413
414 static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
415                                      const struct pci_device_id *ent)
416 {
417         struct agp_bridge_data *bridge;
418         u8 cap_ptr;
419         int j;
420
421         cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
422         if (!cap_ptr)
423                 return -ENODEV;
424
425         j = ent - agp_amdk7_pci_table;
426         dev_info(&pdev->dev, "AMD %s chipset\n",
427                  amd_agp_device_ids[j].chipset_name);
428
429         bridge = agp_alloc_bridge();
430         if (!bridge)
431                 return -ENOMEM;
432
433         bridge->driver = &amd_irongate_driver;
434         bridge->dev_private_data = &amd_irongate_private,
435         bridge->dev = pdev;
436         bridge->capndx = cap_ptr;
437
438         /* 751 Errata (22564_B-1.PDF)
439            erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
440            system controller may experience noise due to strong drive strengths
441          */
442         if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
443                 struct pci_dev *gfxcard=NULL;
444
445                 cap_ptr = 0;
446                 while (!cap_ptr) {
447                         gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
448                         if (!gfxcard) {
449                                 dev_info(&pdev->dev, "no AGP VGA controller\n");
450                                 return -ENODEV;
451                         }
452                         cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
453                 }
454
455                 /* With so many variants of NVidia cards, it's simpler just
456                    to blacklist them all, and then whitelist them as needed
457                    (if necessary at all). */
458                 if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
459                         agp_bridge->flags |= AGP_ERRATA_1X;
460                         dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
461                 }
462                 pci_dev_put(gfxcard);
463         }
464
465         /* 761 Errata (23613_F.pdf)
466          * Revisions B0/B1 were a disaster.
467          * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
468          * erratum 45: Timing problem prevents fast writes -- Disable fast write.
469          * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
470          * With this lot disabled, we should prevent lockups. */
471         if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
472                 if (pdev->revision == 0x10 || pdev->revision == 0x11) {
473                         agp_bridge->flags = AGP_ERRATA_FASTWRITES;
474                         agp_bridge->flags |= AGP_ERRATA_SBA;
475                         agp_bridge->flags |= AGP_ERRATA_1X;
476                         dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
477                 }
478         }
479
480         /* Fill in the mode register */
481         pci_read_config_dword(pdev,
482                         bridge->capndx+PCI_AGP_STATUS,
483                         &bridge->mode);
484
485         pci_set_drvdata(pdev, bridge);
486         return agp_add_bridge(bridge);
487 }
488
489 static void __devexit agp_amdk7_remove(struct pci_dev *pdev)
490 {
491         struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
492
493         agp_remove_bridge(bridge);
494         agp_put_bridge(bridge);
495 }
496
497 #ifdef CONFIG_PM
498
499 static int agp_amdk7_suspend(struct pci_dev *pdev, pm_message_t state)
500 {
501         pci_save_state(pdev);
502         pci_set_power_state(pdev, pci_choose_state(pdev, state));
503
504         return 0;
505 }
506
507 static int agp_amdk7_resume(struct pci_dev *pdev)
508 {
509         pci_set_power_state(pdev, PCI_D0);
510         pci_restore_state(pdev);
511
512         return amd_irongate_driver.configure();
513 }
514
515 #endif /* CONFIG_PM */
516
517 /* must be the same order as name table above */
518 static struct pci_device_id agp_amdk7_pci_table[] = {
519         {
520         .class          = (PCI_CLASS_BRIDGE_HOST << 8),
521         .class_mask     = ~0,
522         .vendor         = PCI_VENDOR_ID_AMD,
523         .device         = PCI_DEVICE_ID_AMD_FE_GATE_7006,
524         .subvendor      = PCI_ANY_ID,
525         .subdevice      = PCI_ANY_ID,
526         },
527         {
528         .class          = (PCI_CLASS_BRIDGE_HOST << 8),
529         .class_mask     = ~0,
530         .vendor         = PCI_VENDOR_ID_AMD,
531         .device         = PCI_DEVICE_ID_AMD_FE_GATE_700E,
532         .subvendor      = PCI_ANY_ID,
533         .subdevice      = PCI_ANY_ID,
534         },
535         {
536         .class          = (PCI_CLASS_BRIDGE_HOST << 8),
537         .class_mask     = ~0,
538         .vendor         = PCI_VENDOR_ID_AMD,
539         .device         = PCI_DEVICE_ID_AMD_FE_GATE_700C,
540         .subvendor      = PCI_ANY_ID,
541         .subdevice      = PCI_ANY_ID,
542         },
543         { }
544 };
545
546 MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
547
548 static struct pci_driver agp_amdk7_pci_driver = {
549         .name           = "agpgart-amdk7",
550         .id_table       = agp_amdk7_pci_table,
551         .probe          = agp_amdk7_probe,
552         .remove         = agp_amdk7_remove,
553 #ifdef CONFIG_PM
554         .suspend        = agp_amdk7_suspend,
555         .resume         = agp_amdk7_resume,
556 #endif
557 };
558
559 static int __init agp_amdk7_init(void)
560 {
561         if (agp_off)
562                 return -EINVAL;
563         return pci_register_driver(&agp_amdk7_pci_driver);
564 }
565
566 static void __exit agp_amdk7_cleanup(void)
567 {
568         pci_unregister_driver(&agp_amdk7_pci_driver);
569 }
570
571 module_init(agp_amdk7_init);
572 module_exit(agp_amdk7_cleanup);
573
574 MODULE_LICENSE("GPL and additional rights");