Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / char / agp / amd-k7-agp.c
1 /*
2  * AMD K7 AGPGART routines.
3  */
4
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/init.h>
8 #include <linux/agp_backend.h>
9 #include <linux/page-flags.h>
10 #include <linux/mm.h>
11 #include <linux/slab.h>
12 #include "agp.h"
13
14 #define AMD_MMBASE      0x14
15 #define AMD_APSIZE      0xac
16 #define AMD_MODECNTL    0xb0
17 #define AMD_MODECNTL2   0xb2
18 #define AMD_GARTENABLE  0x02    /* In mmio region (16-bit register) */
19 #define AMD_ATTBASE     0x04    /* In mmio region (32-bit register) */
20 #define AMD_TLBFLUSH    0x0c    /* In mmio region (32-bit register) */
21 #define AMD_CACHEENTRY  0x10    /* In mmio region (32-bit register) */
22
23 static struct pci_device_id agp_amdk7_pci_table[];
24
25 struct amd_page_map {
26         unsigned long *real;
27         unsigned long __iomem *remapped;
28 };
29
30 static struct _amd_irongate_private {
31         volatile u8 __iomem *registers;
32         struct amd_page_map **gatt_pages;
33         int num_tables;
34 } amd_irongate_private;
35
36 static int amd_create_page_map(struct amd_page_map *page_map)
37 {
38         int i;
39
40         page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
41         if (page_map->real == NULL)
42                 return -ENOMEM;
43
44 #ifndef CONFIG_X86
45         SetPageReserved(virt_to_page(page_map->real));
46         global_cache_flush();
47         page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
48                                             PAGE_SIZE);
49         if (page_map->remapped == NULL) {
50                 ClearPageReserved(virt_to_page(page_map->real));
51                 free_page((unsigned long) page_map->real);
52                 page_map->real = NULL;
53                 return -ENOMEM;
54         }
55         global_cache_flush();
56 #else
57         set_memory_uc((unsigned long)page_map->real, 1);
58         page_map->remapped = page_map->real;
59 #endif
60
61         for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
62                 writel(agp_bridge->scratch_page, page_map->remapped+i);
63                 readl(page_map->remapped+i);    /* PCI Posting. */
64         }
65
66         return 0;
67 }
68
69 static void amd_free_page_map(struct amd_page_map *page_map)
70 {
71 #ifndef CONFIG_X86
72         iounmap(page_map->remapped);
73         ClearPageReserved(virt_to_page(page_map->real));
74 #else
75         set_memory_wb((unsigned long)page_map->real, 1);
76 #endif
77         free_page((unsigned long) page_map->real);
78 }
79
80 static void amd_free_gatt_pages(void)
81 {
82         int i;
83         struct amd_page_map **tables;
84         struct amd_page_map *entry;
85
86         tables = amd_irongate_private.gatt_pages;
87         for (i = 0; i < amd_irongate_private.num_tables; i++) {
88                 entry = tables[i];
89                 if (entry != NULL) {
90                         if (entry->real != NULL)
91                                 amd_free_page_map(entry);
92                         kfree(entry);
93                 }
94         }
95         kfree(tables);
96         amd_irongate_private.gatt_pages = NULL;
97 }
98
99 static int amd_create_gatt_pages(int nr_tables)
100 {
101         struct amd_page_map **tables;
102         struct amd_page_map *entry;
103         int retval = 0;
104         int i;
105
106         tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL);
107         if (tables == NULL)
108                 return -ENOMEM;
109
110         for (i = 0; i < nr_tables; i++) {
111                 entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL);
112                 tables[i] = entry;
113                 if (entry == NULL) {
114                         retval = -ENOMEM;
115                         break;
116                 }
117                 retval = amd_create_page_map(entry);
118                 if (retval != 0)
119                         break;
120         }
121         amd_irongate_private.num_tables = i;
122         amd_irongate_private.gatt_pages = tables;
123
124         if (retval != 0)
125                 amd_free_gatt_pages();
126
127         return retval;
128 }
129
130 /* Since we don't need contiguous memory we just try
131  * to get the gatt table once
132  */
133
134 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
135 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
136         GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
137 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
138 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
139         GET_PAGE_DIR_IDX(addr)]->remapped)
140
141 static int amd_create_gatt_table(struct agp_bridge_data *bridge)
142 {
143         struct aper_size_info_lvl2 *value;
144         struct amd_page_map page_dir;
145         unsigned long __iomem *cur_gatt;
146         unsigned long addr;
147         int retval;
148         u32 temp;
149         int i;
150
151         value = A_SIZE_LVL2(agp_bridge->current_size);
152         retval = amd_create_page_map(&page_dir);
153         if (retval != 0)
154                 return retval;
155
156         retval = amd_create_gatt_pages(value->num_entries / 1024);
157         if (retval != 0) {
158                 amd_free_page_map(&page_dir);
159                 return retval;
160         }
161
162         agp_bridge->gatt_table_real = (u32 *)page_dir.real;
163         agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
164         agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
165
166         /* Get the address for the gart region.
167          * This is a bus address even on the alpha, b/c its
168          * used to program the agp master not the cpu
169          */
170
171         pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
172         addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
173         agp_bridge->gart_bus_addr = addr;
174
175         /* Calculate the agp offset */
176         for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
177                 writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
178                         page_dir.remapped+GET_PAGE_DIR_OFF(addr));
179                 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr));        /* PCI Posting. */
180         }
181
182         for (i = 0; i < value->num_entries; i++) {
183                 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
184                 cur_gatt = GET_GATT(addr);
185                 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
186                 readl(cur_gatt+GET_GATT_OFF(addr));     /* PCI Posting. */
187         }
188
189         return 0;
190 }
191
192 static int amd_free_gatt_table(struct agp_bridge_data *bridge)
193 {
194         struct amd_page_map page_dir;
195
196         page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
197         page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
198
199         amd_free_gatt_pages();
200         amd_free_page_map(&page_dir);
201         return 0;
202 }
203
204 static int amd_irongate_fetch_size(void)
205 {
206         int i;
207         u32 temp;
208         struct aper_size_info_lvl2 *values;
209
210         pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
211         temp = (temp & 0x0000000e);
212         values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
213         for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
214                 if (temp == values[i].size_value) {
215                         agp_bridge->previous_size =
216                             agp_bridge->current_size = (void *) (values + i);
217
218                         agp_bridge->aperture_size_idx = i;
219                         return values[i].size;
220                 }
221         }
222
223         return 0;
224 }
225
226 static int amd_irongate_configure(void)
227 {
228         struct aper_size_info_lvl2 *current_size;
229         u32 temp;
230         u16 enable_reg;
231
232         current_size = A_SIZE_LVL2(agp_bridge->current_size);
233
234         if (!amd_irongate_private.registers) {
235                 /* Get the memory mapped registers */
236                 pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp);
237                 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
238                 amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
239                 if (!amd_irongate_private.registers)
240                         return -ENOMEM;
241         }
242
243         /* Write out the address of the gatt table */
244         writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE);
245         readl(amd_irongate_private.registers+AMD_ATTBASE);      /* PCI Posting. */
246
247         /* Write the Sync register */
248         pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80);
249
250         /* Set indexing mode */
251         pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00);
252
253         /* Write the enable register */
254         enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
255         enable_reg = (enable_reg | 0x0004);
256         writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
257         readw(amd_irongate_private.registers+AMD_GARTENABLE);   /* PCI Posting. */
258
259         /* Write out the size register */
260         pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
261         temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1);
262         pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
263
264         /* Flush the tlb */
265         writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
266         readl(amd_irongate_private.registers+AMD_TLBFLUSH);     /* PCI Posting.*/
267         return 0;
268 }
269
270 static void amd_irongate_cleanup(void)
271 {
272         struct aper_size_info_lvl2 *previous_size;
273         u32 temp;
274         u16 enable_reg;
275
276         previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
277
278         enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
279         enable_reg = (enable_reg & ~(0x0004));
280         writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
281         readw(amd_irongate_private.registers+AMD_GARTENABLE);   /* PCI Posting. */
282
283         /* Write back the previous size and disable gart translation */
284         pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
285         temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
286         pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
287         iounmap((void __iomem *) amd_irongate_private.registers);
288 }
289
290 /*
291  * This routine could be implemented by taking the addresses
292  * written to the GATT, and flushing them individually.  However
293  * currently it just flushes the whole table.  Which is probably
294  * more efficent, since agp_memory blocks can be a large number of
295  * entries.
296  */
297
298 static void amd_irongate_tlbflush(struct agp_memory *temp)
299 {
300         writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
301         readl(amd_irongate_private.registers+AMD_TLBFLUSH);     /* PCI Posting. */
302 }
303
304 static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
305 {
306         int i, j, num_entries;
307         unsigned long __iomem *cur_gatt;
308         unsigned long addr;
309
310         num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
311
312         if (type != mem->type ||
313             agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
314                 return -EINVAL;
315
316         if ((pg_start + mem->page_count) > num_entries)
317                 return -EINVAL;
318
319         j = pg_start;
320         while (j < (pg_start + mem->page_count)) {
321                 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
322                 cur_gatt = GET_GATT(addr);
323                 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
324                         return -EBUSY;
325                 j++;
326         }
327
328         if (!mem->is_flushed) {
329                 global_cache_flush();
330                 mem->is_flushed = true;
331         }
332
333         for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
334                 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
335                 cur_gatt = GET_GATT(addr);
336                 writel(agp_generic_mask_memory(agp_bridge,
337                                                page_to_phys(mem->pages[i]),
338                                                mem->type),
339                        cur_gatt+GET_GATT_OFF(addr));
340                 readl(cur_gatt+GET_GATT_OFF(addr));     /* PCI Posting. */
341         }
342         amd_irongate_tlbflush(mem);
343         return 0;
344 }
345
346 static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
347 {
348         int i;
349         unsigned long __iomem *cur_gatt;
350         unsigned long addr;
351
352         if (type != mem->type ||
353             agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
354                 return -EINVAL;
355
356         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
357                 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
358                 cur_gatt = GET_GATT(addr);
359                 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
360                 readl(cur_gatt+GET_GATT_OFF(addr));     /* PCI Posting. */
361         }
362
363         amd_irongate_tlbflush(mem);
364         return 0;
365 }
366
367 static const struct aper_size_info_lvl2 amd_irongate_sizes[7] =
368 {
369         {2048, 524288, 0x0000000c},
370         {1024, 262144, 0x0000000a},
371         {512, 131072, 0x00000008},
372         {256, 65536, 0x00000006},
373         {128, 32768, 0x00000004},
374         {64, 16384, 0x00000002},
375         {32, 8192, 0x00000000}
376 };
377
378 static const struct gatt_mask amd_irongate_masks[] =
379 {
380         {.mask = 1, .type = 0}
381 };
382
383 static const struct agp_bridge_driver amd_irongate_driver = {
384         .owner                  = THIS_MODULE,
385         .aperture_sizes         = amd_irongate_sizes,
386         .size_type              = LVL2_APER_SIZE,
387         .num_aperture_sizes     = 7,
388         .needs_scratch_page     = true,
389         .configure              = amd_irongate_configure,
390         .fetch_size             = amd_irongate_fetch_size,
391         .cleanup                = amd_irongate_cleanup,
392         .tlb_flush              = amd_irongate_tlbflush,
393         .mask_memory            = agp_generic_mask_memory,
394         .masks                  = amd_irongate_masks,
395         .agp_enable             = agp_generic_enable,
396         .cache_flush            = global_cache_flush,
397         .create_gatt_table      = amd_create_gatt_table,
398         .free_gatt_table        = amd_free_gatt_table,
399         .insert_memory          = amd_insert_memory,
400         .remove_memory          = amd_remove_memory,
401         .alloc_by_type          = agp_generic_alloc_by_type,
402         .free_by_type           = agp_generic_free_by_type,
403         .agp_alloc_page         = agp_generic_alloc_page,
404         .agp_alloc_pages        = agp_generic_alloc_pages,
405         .agp_destroy_page       = agp_generic_destroy_page,
406         .agp_destroy_pages      = agp_generic_destroy_pages,
407         .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
408 };
409
410 static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
411 {
412         {
413                 .device_id      = PCI_DEVICE_ID_AMD_FE_GATE_7006,
414                 .chipset_name   = "Irongate",
415         },
416         {
417                 .device_id      = PCI_DEVICE_ID_AMD_FE_GATE_700E,
418                 .chipset_name   = "761",
419         },
420         {
421                 .device_id      = PCI_DEVICE_ID_AMD_FE_GATE_700C,
422                 .chipset_name   = "760MP",
423         },
424         { }, /* dummy final entry, always present */
425 };
426
427 static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
428                                      const struct pci_device_id *ent)
429 {
430         struct agp_bridge_data *bridge;
431         u8 cap_ptr;
432         int j;
433
434         cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
435         if (!cap_ptr)
436                 return -ENODEV;
437
438         j = ent - agp_amdk7_pci_table;
439         dev_info(&pdev->dev, "AMD %s chipset\n",
440                  amd_agp_device_ids[j].chipset_name);
441
442         bridge = agp_alloc_bridge();
443         if (!bridge)
444                 return -ENOMEM;
445
446         bridge->driver = &amd_irongate_driver;
447         bridge->dev_private_data = &amd_irongate_private,
448         bridge->dev = pdev;
449         bridge->capndx = cap_ptr;
450
451         /* 751 Errata (22564_B-1.PDF)
452            erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
453            system controller may experience noise due to strong drive strengths
454          */
455         if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
456                 struct pci_dev *gfxcard=NULL;
457
458                 cap_ptr = 0;
459                 while (!cap_ptr) {
460                         gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
461                         if (!gfxcard) {
462                                 dev_info(&pdev->dev, "no AGP VGA controller\n");
463                                 return -ENODEV;
464                         }
465                         cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
466                 }
467
468                 /* With so many variants of NVidia cards, it's simpler just
469                    to blacklist them all, and then whitelist them as needed
470                    (if necessary at all). */
471                 if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
472                         agp_bridge->flags |= AGP_ERRATA_1X;
473                         dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
474                 }
475                 pci_dev_put(gfxcard);
476         }
477
478         /* 761 Errata (23613_F.pdf)
479          * Revisions B0/B1 were a disaster.
480          * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
481          * erratum 45: Timing problem prevents fast writes -- Disable fast write.
482          * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
483          * With this lot disabled, we should prevent lockups. */
484         if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
485                 if (pdev->revision == 0x10 || pdev->revision == 0x11) {
486                         agp_bridge->flags = AGP_ERRATA_FASTWRITES;
487                         agp_bridge->flags |= AGP_ERRATA_SBA;
488                         agp_bridge->flags |= AGP_ERRATA_1X;
489                         dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
490                 }
491         }
492
493         /* Fill in the mode register */
494         pci_read_config_dword(pdev,
495                         bridge->capndx+PCI_AGP_STATUS,
496                         &bridge->mode);
497
498         pci_set_drvdata(pdev, bridge);
499         return agp_add_bridge(bridge);
500 }
501
502 static void __devexit agp_amdk7_remove(struct pci_dev *pdev)
503 {
504         struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
505
506         agp_remove_bridge(bridge);
507         agp_put_bridge(bridge);
508 }
509
510 #ifdef CONFIG_PM
511
512 static int agp_amdk7_suspend(struct pci_dev *pdev, pm_message_t state)
513 {
514         pci_save_state(pdev);
515         pci_set_power_state(pdev, pci_choose_state(pdev, state));
516
517         return 0;
518 }
519
520 static int agp_amdk7_resume(struct pci_dev *pdev)
521 {
522         pci_set_power_state(pdev, PCI_D0);
523         pci_restore_state(pdev);
524
525         return amd_irongate_driver.configure();
526 }
527
528 #endif /* CONFIG_PM */
529
530 /* must be the same order as name table above */
531 static struct pci_device_id agp_amdk7_pci_table[] = {
532         {
533         .class          = (PCI_CLASS_BRIDGE_HOST << 8),
534         .class_mask     = ~0,
535         .vendor         = PCI_VENDOR_ID_AMD,
536         .device         = PCI_DEVICE_ID_AMD_FE_GATE_7006,
537         .subvendor      = PCI_ANY_ID,
538         .subdevice      = PCI_ANY_ID,
539         },
540         {
541         .class          = (PCI_CLASS_BRIDGE_HOST << 8),
542         .class_mask     = ~0,
543         .vendor         = PCI_VENDOR_ID_AMD,
544         .device         = PCI_DEVICE_ID_AMD_FE_GATE_700E,
545         .subvendor      = PCI_ANY_ID,
546         .subdevice      = PCI_ANY_ID,
547         },
548         {
549         .class          = (PCI_CLASS_BRIDGE_HOST << 8),
550         .class_mask     = ~0,
551         .vendor         = PCI_VENDOR_ID_AMD,
552         .device         = PCI_DEVICE_ID_AMD_FE_GATE_700C,
553         .subvendor      = PCI_ANY_ID,
554         .subdevice      = PCI_ANY_ID,
555         },
556         { }
557 };
558
559 MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
560
561 static struct pci_driver agp_amdk7_pci_driver = {
562         .name           = "agpgart-amdk7",
563         .id_table       = agp_amdk7_pci_table,
564         .probe          = agp_amdk7_probe,
565         .remove         = agp_amdk7_remove,
566 #ifdef CONFIG_PM
567         .suspend        = agp_amdk7_suspend,
568         .resume         = agp_amdk7_resume,
569 #endif
570 };
571
572 static int __init agp_amdk7_init(void)
573 {
574         if (agp_off)
575                 return -EINVAL;
576         return pci_register_driver(&agp_amdk7_pci_driver);
577 }
578
579 static void __exit agp_amdk7_cleanup(void)
580 {
581         pci_unregister_driver(&agp_amdk7_pci_driver);
582 }
583
584 module_init(agp_amdk7_init);
585 module_exit(agp_amdk7_cleanup);
586
587 MODULE_LICENSE("GPL and additional rights");