Merge branch 'fixes' of master.kernel.org:/home/rmk/linux-2.6-arm
[pandora-kernel.git] / drivers / char / agp / intel-gtt.c
1 /*
2  * Intel GTT (Graphics Translation Table) routines
3  *
4  * Caveat: This driver implements the linux agp interface, but this is far from
5  * a agp driver! GTT support ended up here for purely historical reasons: The
6  * old userspace intel graphics drivers needed an interface to map memory into
7  * the GTT. And the drm provides a default interface for graphic devices sitting
8  * on an agp port. So it made sense to fake the GTT support as an agp port to
9  * avoid having to create a new api.
10  *
11  * With gem this does not make much sense anymore, just needlessly complicates
12  * the code. But as long as the old graphics stack is still support, it's stuck
13  * here.
14  *
15  * /fairy-tale-mode off
16  */
17
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/pagemap.h>
23 #include <linux/agp_backend.h>
24 #include <asm/smp.h>
25 #include "agp.h"
26 #include "intel-agp.h"
27 #include <drm/intel-gtt.h>
28
29 /*
30  * If we have Intel graphics, we're not going to have anything other than
31  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
32  * on the Intel IOMMU support (CONFIG_DMAR).
33  * Only newer chipsets need to bother with this, of course.
34  */
35 #ifdef CONFIG_DMAR
36 #define USE_PCI_DMA_API 1
37 #else
38 #define USE_PCI_DMA_API 0
39 #endif
40
41 struct intel_gtt_driver {
42         unsigned int gen : 8;
43         unsigned int is_g33 : 1;
44         unsigned int is_pineview : 1;
45         unsigned int is_ironlake : 1;
46         unsigned int has_pgtbl_enable : 1;
47         unsigned int dma_mask_size : 8;
48         /* Chipset specific GTT setup */
49         int (*setup)(void);
50         /* This should undo anything done in ->setup() save the unmapping
51          * of the mmio register file, that's done in the generic code. */
52         void (*cleanup)(void);
53         void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
54         /* Flags is a more or less chipset specific opaque value.
55          * For chipsets that need to support old ums (non-gem) code, this
56          * needs to be identical to the various supported agp memory types! */
57         bool (*check_flags)(unsigned int flags);
58         void (*chipset_flush)(void);
59 };
60
61 static struct _intel_private {
62         struct intel_gtt base;
63         const struct intel_gtt_driver *driver;
64         struct pci_dev *pcidev; /* device one */
65         struct pci_dev *bridge_dev;
66         u8 __iomem *registers;
67         phys_addr_t gtt_bus_addr;
68         phys_addr_t gma_bus_addr;
69         u32 PGETBL_save;
70         u32 __iomem *gtt;               /* I915G */
71         bool clear_fake_agp; /* on first access via agp, fill with scratch */
72         int num_dcache_entries;
73         union {
74                 void __iomem *i9xx_flush_page;
75                 void *i8xx_flush_page;
76         };
77         char *i81x_gtt_table;
78         struct page *i8xx_page;
79         struct resource ifp_resource;
80         int resource_valid;
81         struct page *scratch_page;
82         dma_addr_t scratch_page_dma;
83 } intel_private;
84
85 #define INTEL_GTT_GEN   intel_private.driver->gen
86 #define IS_G33          intel_private.driver->is_g33
87 #define IS_PINEVIEW     intel_private.driver->is_pineview
88 #define IS_IRONLAKE     intel_private.driver->is_ironlake
89 #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
90
91 int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
92                          struct scatterlist **sg_list, int *num_sg)
93 {
94         struct sg_table st;
95         struct scatterlist *sg;
96         int i;
97
98         if (*sg_list)
99                 return 0; /* already mapped (for e.g. resume */
100
101         DBG("try mapping %lu pages\n", (unsigned long)num_entries);
102
103         if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
104                 goto err;
105
106         *sg_list = sg = st.sgl;
107
108         for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
109                 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
110
111         *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
112                                  num_entries, PCI_DMA_BIDIRECTIONAL);
113         if (unlikely(!*num_sg))
114                 goto err;
115
116         return 0;
117
118 err:
119         sg_free_table(&st);
120         return -ENOMEM;
121 }
122 EXPORT_SYMBOL(intel_gtt_map_memory);
123
124 void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
125 {
126         struct sg_table st;
127         DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
128
129         pci_unmap_sg(intel_private.pcidev, sg_list,
130                      num_sg, PCI_DMA_BIDIRECTIONAL);
131
132         st.sgl = sg_list;
133         st.orig_nents = st.nents = num_sg;
134
135         sg_free_table(&st);
136 }
137 EXPORT_SYMBOL(intel_gtt_unmap_memory);
138
139 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
140 {
141         return;
142 }
143
144 /* Exists to support ARGB cursors */
145 static struct page *i8xx_alloc_pages(void)
146 {
147         struct page *page;
148
149         page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
150         if (page == NULL)
151                 return NULL;
152
153         if (set_pages_uc(page, 4) < 0) {
154                 set_pages_wb(page, 4);
155                 __free_pages(page, 2);
156                 return NULL;
157         }
158         get_page(page);
159         atomic_inc(&agp_bridge->current_memory_agp);
160         return page;
161 }
162
163 static void i8xx_destroy_pages(struct page *page)
164 {
165         if (page == NULL)
166                 return;
167
168         set_pages_wb(page, 4);
169         put_page(page);
170         __free_pages(page, 2);
171         atomic_dec(&agp_bridge->current_memory_agp);
172 }
173
174 #define I810_GTT_ORDER 4
175 static int i810_setup(void)
176 {
177         u32 reg_addr;
178         char *gtt_table;
179
180         /* i81x does not preallocate the gtt. It's always 64kb in size. */
181         gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
182         if (gtt_table == NULL)
183                 return -ENOMEM;
184         intel_private.i81x_gtt_table = gtt_table;
185
186         pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
187         reg_addr &= 0xfff80000;
188
189         intel_private.registers = ioremap(reg_addr, KB(64));
190         if (!intel_private.registers)
191                 return -ENOMEM;
192
193         writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
194                intel_private.registers+I810_PGETBL_CTL);
195
196         intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
197
198         if ((readl(intel_private.registers+I810_DRAM_CTL)
199                 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
200                 dev_info(&intel_private.pcidev->dev,
201                          "detected 4MB dedicated video ram\n");
202                 intel_private.num_dcache_entries = 1024;
203         }
204
205         return 0;
206 }
207
208 static void i810_cleanup(void)
209 {
210         writel(0, intel_private.registers+I810_PGETBL_CTL);
211         free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
212 }
213
214 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
215                                       int type)
216 {
217         int i;
218
219         if ((pg_start + mem->page_count)
220                         > intel_private.num_dcache_entries)
221                 return -EINVAL;
222
223         if (!mem->is_flushed)
224                 global_cache_flush();
225
226         for (i = pg_start; i < (pg_start + mem->page_count); i++) {
227                 dma_addr_t addr = i << PAGE_SHIFT;
228                 intel_private.driver->write_entry(addr,
229                                                   i, type);
230         }
231         readl(intel_private.gtt+i-1);
232
233         return 0;
234 }
235
236 /*
237  * The i810/i830 requires a physical address to program its mouse
238  * pointer into hardware.
239  * However the Xserver still writes to it through the agp aperture.
240  */
241 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
242 {
243         struct agp_memory *new;
244         struct page *page;
245
246         switch (pg_count) {
247         case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
248                 break;
249         case 4:
250                 /* kludge to get 4 physical pages for ARGB cursor */
251                 page = i8xx_alloc_pages();
252                 break;
253         default:
254                 return NULL;
255         }
256
257         if (page == NULL)
258                 return NULL;
259
260         new = agp_create_memory(pg_count);
261         if (new == NULL)
262                 return NULL;
263
264         new->pages[0] = page;
265         if (pg_count == 4) {
266                 /* kludge to get 4 physical pages for ARGB cursor */
267                 new->pages[1] = new->pages[0] + 1;
268                 new->pages[2] = new->pages[1] + 1;
269                 new->pages[3] = new->pages[2] + 1;
270         }
271         new->page_count = pg_count;
272         new->num_scratch_pages = pg_count;
273         new->type = AGP_PHYS_MEMORY;
274         new->physical = page_to_phys(new->pages[0]);
275         return new;
276 }
277
278 static void intel_i810_free_by_type(struct agp_memory *curr)
279 {
280         agp_free_key(curr->key);
281         if (curr->type == AGP_PHYS_MEMORY) {
282                 if (curr->page_count == 4)
283                         i8xx_destroy_pages(curr->pages[0]);
284                 else {
285                         agp_bridge->driver->agp_destroy_page(curr->pages[0],
286                                                              AGP_PAGE_DESTROY_UNMAP);
287                         agp_bridge->driver->agp_destroy_page(curr->pages[0],
288                                                              AGP_PAGE_DESTROY_FREE);
289                 }
290                 agp_free_page_array(curr);
291         }
292         kfree(curr);
293 }
294
295 static int intel_gtt_setup_scratch_page(void)
296 {
297         struct page *page;
298         dma_addr_t dma_addr;
299
300         page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
301         if (page == NULL)
302                 return -ENOMEM;
303         get_page(page);
304         set_pages_uc(page, 1);
305
306         if (intel_private.base.needs_dmar) {
307                 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
308                                     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
309                 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
310                         return -EINVAL;
311
312                 intel_private.scratch_page_dma = dma_addr;
313         } else
314                 intel_private.scratch_page_dma = page_to_phys(page);
315
316         intel_private.scratch_page = page;
317
318         return 0;
319 }
320
321 static void i810_write_entry(dma_addr_t addr, unsigned int entry,
322                              unsigned int flags)
323 {
324         u32 pte_flags = I810_PTE_VALID;
325
326         switch (flags) {
327         case AGP_DCACHE_MEMORY:
328                 pte_flags |= I810_PTE_LOCAL;
329                 break;
330         case AGP_USER_CACHED_MEMORY:
331                 pte_flags |= I830_PTE_SYSTEM_CACHED;
332                 break;
333         }
334
335         writel(addr | pte_flags, intel_private.gtt + entry);
336 }
337
338 static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
339         {32, 8192, 3},
340         {64, 16384, 4},
341         {128, 32768, 5},
342         {256, 65536, 6},
343         {512, 131072, 7},
344 };
345
346 static unsigned int intel_gtt_stolen_size(void)
347 {
348         u16 gmch_ctrl;
349         u8 rdct;
350         int local = 0;
351         static const int ddt[4] = { 0, 16, 32, 64 };
352         unsigned int stolen_size = 0;
353
354         if (INTEL_GTT_GEN == 1)
355                 return 0; /* no stolen mem on i81x */
356
357         pci_read_config_word(intel_private.bridge_dev,
358                              I830_GMCH_CTRL, &gmch_ctrl);
359
360         if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
361             intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
362                 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
363                 case I830_GMCH_GMS_STOLEN_512:
364                         stolen_size = KB(512);
365                         break;
366                 case I830_GMCH_GMS_STOLEN_1024:
367                         stolen_size = MB(1);
368                         break;
369                 case I830_GMCH_GMS_STOLEN_8192:
370                         stolen_size = MB(8);
371                         break;
372                 case I830_GMCH_GMS_LOCAL:
373                         rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
374                         stolen_size = (I830_RDRAM_ND(rdct) + 1) *
375                                         MB(ddt[I830_RDRAM_DDT(rdct)]);
376                         local = 1;
377                         break;
378                 default:
379                         stolen_size = 0;
380                         break;
381                 }
382         } else if (INTEL_GTT_GEN == 6) {
383                 /*
384                  * SandyBridge has new memory control reg at 0x50.w
385                  */
386                 u16 snb_gmch_ctl;
387                 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
388                 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
389                 case SNB_GMCH_GMS_STOLEN_32M:
390                         stolen_size = MB(32);
391                         break;
392                 case SNB_GMCH_GMS_STOLEN_64M:
393                         stolen_size = MB(64);
394                         break;
395                 case SNB_GMCH_GMS_STOLEN_96M:
396                         stolen_size = MB(96);
397                         break;
398                 case SNB_GMCH_GMS_STOLEN_128M:
399                         stolen_size = MB(128);
400                         break;
401                 case SNB_GMCH_GMS_STOLEN_160M:
402                         stolen_size = MB(160);
403                         break;
404                 case SNB_GMCH_GMS_STOLEN_192M:
405                         stolen_size = MB(192);
406                         break;
407                 case SNB_GMCH_GMS_STOLEN_224M:
408                         stolen_size = MB(224);
409                         break;
410                 case SNB_GMCH_GMS_STOLEN_256M:
411                         stolen_size = MB(256);
412                         break;
413                 case SNB_GMCH_GMS_STOLEN_288M:
414                         stolen_size = MB(288);
415                         break;
416                 case SNB_GMCH_GMS_STOLEN_320M:
417                         stolen_size = MB(320);
418                         break;
419                 case SNB_GMCH_GMS_STOLEN_352M:
420                         stolen_size = MB(352);
421                         break;
422                 case SNB_GMCH_GMS_STOLEN_384M:
423                         stolen_size = MB(384);
424                         break;
425                 case SNB_GMCH_GMS_STOLEN_416M:
426                         stolen_size = MB(416);
427                         break;
428                 case SNB_GMCH_GMS_STOLEN_448M:
429                         stolen_size = MB(448);
430                         break;
431                 case SNB_GMCH_GMS_STOLEN_480M:
432                         stolen_size = MB(480);
433                         break;
434                 case SNB_GMCH_GMS_STOLEN_512M:
435                         stolen_size = MB(512);
436                         break;
437                 }
438         } else {
439                 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
440                 case I855_GMCH_GMS_STOLEN_1M:
441                         stolen_size = MB(1);
442                         break;
443                 case I855_GMCH_GMS_STOLEN_4M:
444                         stolen_size = MB(4);
445                         break;
446                 case I855_GMCH_GMS_STOLEN_8M:
447                         stolen_size = MB(8);
448                         break;
449                 case I855_GMCH_GMS_STOLEN_16M:
450                         stolen_size = MB(16);
451                         break;
452                 case I855_GMCH_GMS_STOLEN_32M:
453                         stolen_size = MB(32);
454                         break;
455                 case I915_GMCH_GMS_STOLEN_48M:
456                         stolen_size = MB(48);
457                         break;
458                 case I915_GMCH_GMS_STOLEN_64M:
459                         stolen_size = MB(64);
460                         break;
461                 case G33_GMCH_GMS_STOLEN_128M:
462                         stolen_size = MB(128);
463                         break;
464                 case G33_GMCH_GMS_STOLEN_256M:
465                         stolen_size = MB(256);
466                         break;
467                 case INTEL_GMCH_GMS_STOLEN_96M:
468                         stolen_size = MB(96);
469                         break;
470                 case INTEL_GMCH_GMS_STOLEN_160M:
471                         stolen_size = MB(160);
472                         break;
473                 case INTEL_GMCH_GMS_STOLEN_224M:
474                         stolen_size = MB(224);
475                         break;
476                 case INTEL_GMCH_GMS_STOLEN_352M:
477                         stolen_size = MB(352);
478                         break;
479                 default:
480                         stolen_size = 0;
481                         break;
482                 }
483         }
484
485         if (stolen_size > 0) {
486                 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
487                        stolen_size / KB(1), local ? "local" : "stolen");
488         } else {
489                 dev_info(&intel_private.bridge_dev->dev,
490                        "no pre-allocated video memory detected\n");
491                 stolen_size = 0;
492         }
493
494         return stolen_size;
495 }
496
497 static void i965_adjust_pgetbl_size(unsigned int size_flag)
498 {
499         u32 pgetbl_ctl, pgetbl_ctl2;
500
501         /* ensure that ppgtt is disabled */
502         pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
503         pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
504         writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
505
506         /* write the new ggtt size */
507         pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
508         pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
509         pgetbl_ctl |= size_flag;
510         writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
511 }
512
513 static unsigned int i965_gtt_total_entries(void)
514 {
515         int size;
516         u32 pgetbl_ctl;
517         u16 gmch_ctl;
518
519         pci_read_config_word(intel_private.bridge_dev,
520                              I830_GMCH_CTRL, &gmch_ctl);
521
522         if (INTEL_GTT_GEN == 5) {
523                 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
524                 case G4x_GMCH_SIZE_1M:
525                 case G4x_GMCH_SIZE_VT_1M:
526                         i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
527                         break;
528                 case G4x_GMCH_SIZE_VT_1_5M:
529                         i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
530                         break;
531                 case G4x_GMCH_SIZE_2M:
532                 case G4x_GMCH_SIZE_VT_2M:
533                         i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
534                         break;
535                 }
536         }
537
538         pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
539
540         switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
541         case I965_PGETBL_SIZE_128KB:
542                 size = KB(128);
543                 break;
544         case I965_PGETBL_SIZE_256KB:
545                 size = KB(256);
546                 break;
547         case I965_PGETBL_SIZE_512KB:
548                 size = KB(512);
549                 break;
550         /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
551         case I965_PGETBL_SIZE_1MB:
552                 size = KB(1024);
553                 break;
554         case I965_PGETBL_SIZE_2MB:
555                 size = KB(2048);
556                 break;
557         case I965_PGETBL_SIZE_1_5MB:
558                 size = KB(1024 + 512);
559                 break;
560         default:
561                 dev_info(&intel_private.pcidev->dev,
562                          "unknown page table size, assuming 512KB\n");
563                 size = KB(512);
564         }
565
566         return size/4;
567 }
568
569 static unsigned int intel_gtt_total_entries(void)
570 {
571         int size;
572
573         if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
574                 return i965_gtt_total_entries();
575         else if (INTEL_GTT_GEN == 6) {
576                 u16 snb_gmch_ctl;
577
578                 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
579                 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
580                 default:
581                 case SNB_GTT_SIZE_0M:
582                         printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
583                         size = MB(0);
584                         break;
585                 case SNB_GTT_SIZE_1M:
586                         size = MB(1);
587                         break;
588                 case SNB_GTT_SIZE_2M:
589                         size = MB(2);
590                         break;
591                 }
592                 return size/4;
593         } else {
594                 /* On previous hardware, the GTT size was just what was
595                  * required to map the aperture.
596                  */
597                 return intel_private.base.gtt_mappable_entries;
598         }
599 }
600
601 static unsigned int intel_gtt_mappable_entries(void)
602 {
603         unsigned int aperture_size;
604
605         if (INTEL_GTT_GEN == 1) {
606                 u32 smram_miscc;
607
608                 pci_read_config_dword(intel_private.bridge_dev,
609                                       I810_SMRAM_MISCC, &smram_miscc);
610
611                 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
612                                 == I810_GFX_MEM_WIN_32M)
613                         aperture_size = MB(32);
614                 else
615                         aperture_size = MB(64);
616         } else if (INTEL_GTT_GEN == 2) {
617                 u16 gmch_ctrl;
618
619                 pci_read_config_word(intel_private.bridge_dev,
620                                      I830_GMCH_CTRL, &gmch_ctrl);
621
622                 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
623                         aperture_size = MB(64);
624                 else
625                         aperture_size = MB(128);
626         } else {
627                 /* 9xx supports large sizes, just look at the length */
628                 aperture_size = pci_resource_len(intel_private.pcidev, 2);
629         }
630
631         return aperture_size >> PAGE_SHIFT;
632 }
633
634 static void intel_gtt_teardown_scratch_page(void)
635 {
636         set_pages_wb(intel_private.scratch_page, 1);
637         pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
638                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
639         put_page(intel_private.scratch_page);
640         __free_page(intel_private.scratch_page);
641 }
642
643 static void intel_gtt_cleanup(void)
644 {
645         intel_private.driver->cleanup();
646
647         iounmap(intel_private.gtt);
648         iounmap(intel_private.registers);
649
650         intel_gtt_teardown_scratch_page();
651 }
652
653 static int intel_gtt_init(void)
654 {
655         u32 gtt_map_size;
656         int ret;
657
658         ret = intel_private.driver->setup();
659         if (ret != 0)
660                 return ret;
661
662         intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
663         intel_private.base.gtt_total_entries = intel_gtt_total_entries();
664
665         /* save the PGETBL reg for resume */
666         intel_private.PGETBL_save =
667                 readl(intel_private.registers+I810_PGETBL_CTL)
668                         & ~I810_PGETBL_ENABLED;
669         /* we only ever restore the register when enabling the PGTBL... */
670         if (HAS_PGTBL_EN)
671                 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
672
673         dev_info(&intel_private.bridge_dev->dev,
674                         "detected gtt size: %dK total, %dK mappable\n",
675                         intel_private.base.gtt_total_entries * 4,
676                         intel_private.base.gtt_mappable_entries * 4);
677
678         gtt_map_size = intel_private.base.gtt_total_entries * 4;
679
680         intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
681                                     gtt_map_size);
682         if (!intel_private.gtt) {
683                 intel_private.driver->cleanup();
684                 iounmap(intel_private.registers);
685                 return -ENOMEM;
686         }
687
688         global_cache_flush();   /* FIXME: ? */
689
690         intel_private.base.stolen_size = intel_gtt_stolen_size();
691
692         intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
693
694         ret = intel_gtt_setup_scratch_page();
695         if (ret != 0) {
696                 intel_gtt_cleanup();
697                 return ret;
698         }
699
700         return 0;
701 }
702
703 static int intel_fake_agp_fetch_size(void)
704 {
705         int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
706         unsigned int aper_size;
707         int i;
708
709         aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
710                     / MB(1);
711
712         for (i = 0; i < num_sizes; i++) {
713                 if (aper_size == intel_fake_agp_sizes[i].size) {
714                         agp_bridge->current_size =
715                                 (void *) (intel_fake_agp_sizes + i);
716                         return aper_size;
717                 }
718         }
719
720         return 0;
721 }
722
723 static void i830_cleanup(void)
724 {
725         if (intel_private.i8xx_flush_page) {
726                 kunmap(intel_private.i8xx_flush_page);
727                 intel_private.i8xx_flush_page = NULL;
728         }
729
730         __free_page(intel_private.i8xx_page);
731         intel_private.i8xx_page = NULL;
732 }
733
734 static void intel_i830_setup_flush(void)
735 {
736         /* return if we've already set the flush mechanism up */
737         if (intel_private.i8xx_page)
738                 return;
739
740         intel_private.i8xx_page = alloc_page(GFP_KERNEL);
741         if (!intel_private.i8xx_page)
742                 return;
743
744         intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
745         if (!intel_private.i8xx_flush_page)
746                 i830_cleanup();
747 }
748
749 /* The chipset_flush interface needs to get data that has already been
750  * flushed out of the CPU all the way out to main memory, because the GPU
751  * doesn't snoop those buffers.
752  *
753  * The 8xx series doesn't have the same lovely interface for flushing the
754  * chipset write buffers that the later chips do. According to the 865
755  * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
756  * that buffer out, we just fill 1KB and clflush it out, on the assumption
757  * that it'll push whatever was in there out.  It appears to work.
758  */
759 static void i830_chipset_flush(void)
760 {
761         unsigned int *pg = intel_private.i8xx_flush_page;
762
763         memset(pg, 0, 1024);
764
765         if (cpu_has_clflush)
766                 clflush_cache_range(pg, 1024);
767         else if (wbinvd_on_all_cpus() != 0)
768                 printk(KERN_ERR "Timed out waiting for cache flush.\n");
769 }
770
771 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
772                              unsigned int flags)
773 {
774         u32 pte_flags = I810_PTE_VALID;
775
776         if (flags ==  AGP_USER_CACHED_MEMORY)
777                 pte_flags |= I830_PTE_SYSTEM_CACHED;
778
779         writel(addr | pte_flags, intel_private.gtt + entry);
780 }
781
782 static bool intel_enable_gtt(void)
783 {
784         u32 gma_addr;
785         u8 __iomem *reg;
786
787         if (INTEL_GTT_GEN <= 2)
788                 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
789                                       &gma_addr);
790         else
791                 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
792                                       &gma_addr);
793
794         intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
795
796         if (INTEL_GTT_GEN >= 6)
797             return true;
798
799         if (INTEL_GTT_GEN == 2) {
800                 u16 gmch_ctrl;
801
802                 pci_read_config_word(intel_private.bridge_dev,
803                                      I830_GMCH_CTRL, &gmch_ctrl);
804                 gmch_ctrl |= I830_GMCH_ENABLED;
805                 pci_write_config_word(intel_private.bridge_dev,
806                                       I830_GMCH_CTRL, gmch_ctrl);
807
808                 pci_read_config_word(intel_private.bridge_dev,
809                                      I830_GMCH_CTRL, &gmch_ctrl);
810                 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
811                         dev_err(&intel_private.pcidev->dev,
812                                 "failed to enable the GTT: GMCH_CTRL=%x\n",
813                                 gmch_ctrl);
814                         return false;
815                 }
816         }
817
818         /* On the resume path we may be adjusting the PGTBL value, so
819          * be paranoid and flush all chipset write buffers...
820          */
821         if (INTEL_GTT_GEN >= 3)
822                 writel(0, intel_private.registers+GFX_FLSH_CNTL);
823
824         reg = intel_private.registers+I810_PGETBL_CTL;
825         writel(intel_private.PGETBL_save, reg);
826         if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
827                 dev_err(&intel_private.pcidev->dev,
828                         "failed to enable the GTT: PGETBL=%x [expected %x]\n",
829                         readl(reg), intel_private.PGETBL_save);
830                 return false;
831         }
832
833         if (INTEL_GTT_GEN >= 3)
834                 writel(0, intel_private.registers+GFX_FLSH_CNTL);
835
836         return true;
837 }
838
839 static int i830_setup(void)
840 {
841         u32 reg_addr;
842
843         pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
844         reg_addr &= 0xfff80000;
845
846         intel_private.registers = ioremap(reg_addr, KB(64));
847         if (!intel_private.registers)
848                 return -ENOMEM;
849
850         intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
851
852         intel_i830_setup_flush();
853
854         return 0;
855 }
856
857 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
858 {
859         agp_bridge->gatt_table_real = NULL;
860         agp_bridge->gatt_table = NULL;
861         agp_bridge->gatt_bus_addr = 0;
862
863         return 0;
864 }
865
866 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
867 {
868         return 0;
869 }
870
871 static int intel_fake_agp_configure(void)
872 {
873         if (!intel_enable_gtt())
874             return -EIO;
875
876         intel_private.clear_fake_agp = true;
877         agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
878
879         return 0;
880 }
881
882 static bool i830_check_flags(unsigned int flags)
883 {
884         switch (flags) {
885         case 0:
886         case AGP_PHYS_MEMORY:
887         case AGP_USER_CACHED_MEMORY:
888         case AGP_USER_MEMORY:
889                 return true;
890         }
891
892         return false;
893 }
894
895 void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
896                                  unsigned int sg_len,
897                                  unsigned int pg_start,
898                                  unsigned int flags)
899 {
900         struct scatterlist *sg;
901         unsigned int len, m;
902         int i, j;
903
904         j = pg_start;
905
906         /* sg may merge pages, but we have to separate
907          * per-page addr for GTT */
908         for_each_sg(sg_list, sg, sg_len, i) {
909                 len = sg_dma_len(sg) >> PAGE_SHIFT;
910                 for (m = 0; m < len; m++) {
911                         dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
912                         intel_private.driver->write_entry(addr,
913                                                           j, flags);
914                         j++;
915                 }
916         }
917         readl(intel_private.gtt+j-1);
918 }
919 EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
920
921 void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
922                             struct page **pages, unsigned int flags)
923 {
924         int i, j;
925
926         for (i = 0, j = first_entry; i < num_entries; i++, j++) {
927                 dma_addr_t addr = page_to_phys(pages[i]);
928                 intel_private.driver->write_entry(addr,
929                                                   j, flags);
930         }
931         readl(intel_private.gtt+j-1);
932 }
933 EXPORT_SYMBOL(intel_gtt_insert_pages);
934
935 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
936                                          off_t pg_start, int type)
937 {
938         int ret = -EINVAL;
939
940         if (intel_private.clear_fake_agp) {
941                 int start = intel_private.base.stolen_size / PAGE_SIZE;
942                 int end = intel_private.base.gtt_mappable_entries;
943                 intel_gtt_clear_range(start, end - start);
944                 intel_private.clear_fake_agp = false;
945         }
946
947         if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
948                 return i810_insert_dcache_entries(mem, pg_start, type);
949
950         if (mem->page_count == 0)
951                 goto out;
952
953         if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
954                 goto out_err;
955
956         if (type != mem->type)
957                 goto out_err;
958
959         if (!intel_private.driver->check_flags(type))
960                 goto out_err;
961
962         if (!mem->is_flushed)
963                 global_cache_flush();
964
965         if (intel_private.base.needs_dmar) {
966                 ret = intel_gtt_map_memory(mem->pages, mem->page_count,
967                                            &mem->sg_list, &mem->num_sg);
968                 if (ret != 0)
969                         return ret;
970
971                 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
972                                             pg_start, type);
973         } else
974                 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
975                                        type);
976
977 out:
978         ret = 0;
979 out_err:
980         mem->is_flushed = true;
981         return ret;
982 }
983
984 void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
985 {
986         unsigned int i;
987
988         for (i = first_entry; i < (first_entry + num_entries); i++) {
989                 intel_private.driver->write_entry(intel_private.scratch_page_dma,
990                                                   i, 0);
991         }
992         readl(intel_private.gtt+i-1);
993 }
994 EXPORT_SYMBOL(intel_gtt_clear_range);
995
996 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
997                                          off_t pg_start, int type)
998 {
999         if (mem->page_count == 0)
1000                 return 0;
1001
1002         intel_gtt_clear_range(pg_start, mem->page_count);
1003
1004         if (intel_private.base.needs_dmar) {
1005                 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
1006                 mem->sg_list = NULL;
1007                 mem->num_sg = 0;
1008         }
1009
1010         return 0;
1011 }
1012
1013 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1014                                                        int type)
1015 {
1016         struct agp_memory *new;
1017
1018         if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
1019                 if (pg_count != intel_private.num_dcache_entries)
1020                         return NULL;
1021
1022                 new = agp_create_memory(1);
1023                 if (new == NULL)
1024                         return NULL;
1025
1026                 new->type = AGP_DCACHE_MEMORY;
1027                 new->page_count = pg_count;
1028                 new->num_scratch_pages = 0;
1029                 agp_free_page_array(new);
1030                 return new;
1031         }
1032         if (type == AGP_PHYS_MEMORY)
1033                 return alloc_agpphysmem_i8xx(pg_count, type);
1034         /* always return NULL for other allocation types for now */
1035         return NULL;
1036 }
1037
1038 static int intel_alloc_chipset_flush_resource(void)
1039 {
1040         int ret;
1041         ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1042                                      PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1043                                      pcibios_align_resource, intel_private.bridge_dev);
1044
1045         return ret;
1046 }
1047
1048 static void intel_i915_setup_chipset_flush(void)
1049 {
1050         int ret;
1051         u32 temp;
1052
1053         pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1054         if (!(temp & 0x1)) {
1055                 intel_alloc_chipset_flush_resource();
1056                 intel_private.resource_valid = 1;
1057                 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1058         } else {
1059                 temp &= ~1;
1060
1061                 intel_private.resource_valid = 1;
1062                 intel_private.ifp_resource.start = temp;
1063                 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1064                 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1065                 /* some BIOSes reserve this area in a pnp some don't */
1066                 if (ret)
1067                         intel_private.resource_valid = 0;
1068         }
1069 }
1070
1071 static void intel_i965_g33_setup_chipset_flush(void)
1072 {
1073         u32 temp_hi, temp_lo;
1074         int ret;
1075
1076         pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1077         pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1078
1079         if (!(temp_lo & 0x1)) {
1080
1081                 intel_alloc_chipset_flush_resource();
1082
1083                 intel_private.resource_valid = 1;
1084                 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1085                         upper_32_bits(intel_private.ifp_resource.start));
1086                 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1087         } else {
1088                 u64 l64;
1089
1090                 temp_lo &= ~0x1;
1091                 l64 = ((u64)temp_hi << 32) | temp_lo;
1092
1093                 intel_private.resource_valid = 1;
1094                 intel_private.ifp_resource.start = l64;
1095                 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1096                 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1097                 /* some BIOSes reserve this area in a pnp some don't */
1098                 if (ret)
1099                         intel_private.resource_valid = 0;
1100         }
1101 }
1102
1103 static void intel_i9xx_setup_flush(void)
1104 {
1105         /* return if already configured */
1106         if (intel_private.ifp_resource.start)
1107                 return;
1108
1109         if (INTEL_GTT_GEN == 6)
1110                 return;
1111
1112         /* setup a resource for this object */
1113         intel_private.ifp_resource.name = "Intel Flush Page";
1114         intel_private.ifp_resource.flags = IORESOURCE_MEM;
1115
1116         /* Setup chipset flush for 915 */
1117         if (IS_G33 || INTEL_GTT_GEN >= 4) {
1118                 intel_i965_g33_setup_chipset_flush();
1119         } else {
1120                 intel_i915_setup_chipset_flush();
1121         }
1122
1123         if (intel_private.ifp_resource.start)
1124                 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1125         if (!intel_private.i9xx_flush_page)
1126                 dev_err(&intel_private.pcidev->dev,
1127                         "can't ioremap flush page - no chipset flushing\n");
1128 }
1129
1130 static void i9xx_cleanup(void)
1131 {
1132         if (intel_private.i9xx_flush_page)
1133                 iounmap(intel_private.i9xx_flush_page);
1134         if (intel_private.resource_valid)
1135                 release_resource(&intel_private.ifp_resource);
1136         intel_private.ifp_resource.start = 0;
1137         intel_private.resource_valid = 0;
1138 }
1139
1140 static void i9xx_chipset_flush(void)
1141 {
1142         if (intel_private.i9xx_flush_page)
1143                 writel(1, intel_private.i9xx_flush_page);
1144 }
1145
1146 static void i965_write_entry(dma_addr_t addr,
1147                              unsigned int entry,
1148                              unsigned int flags)
1149 {
1150         u32 pte_flags;
1151
1152         pte_flags = I810_PTE_VALID;
1153         if (flags == AGP_USER_CACHED_MEMORY)
1154                 pte_flags |= I830_PTE_SYSTEM_CACHED;
1155
1156         /* Shift high bits down */
1157         addr |= (addr >> 28) & 0xf0;
1158         writel(addr | pte_flags, intel_private.gtt + entry);
1159 }
1160
1161 static bool gen6_check_flags(unsigned int flags)
1162 {
1163         return true;
1164 }
1165
1166 static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1167                              unsigned int flags)
1168 {
1169         unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1170         unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1171         u32 pte_flags;
1172
1173         if (type_mask == AGP_USER_MEMORY)
1174                 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1175         else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1176                 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1177                 if (gfdt)
1178                         pte_flags |= GEN6_PTE_GFDT;
1179         } else { /* set 'normal'/'cached' to LLC by default */
1180                 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1181                 if (gfdt)
1182                         pte_flags |= GEN6_PTE_GFDT;
1183         }
1184
1185         /* gen6 has bit11-4 for physical addr bit39-32 */
1186         addr |= (addr >> 28) & 0xff0;
1187         writel(addr | pte_flags, intel_private.gtt + entry);
1188 }
1189
1190 static void gen6_cleanup(void)
1191 {
1192 }
1193
1194 static int i9xx_setup(void)
1195 {
1196         u32 reg_addr;
1197
1198         pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1199
1200         reg_addr &= 0xfff80000;
1201
1202         intel_private.registers = ioremap(reg_addr, 128 * 4096);
1203         if (!intel_private.registers)
1204                 return -ENOMEM;
1205
1206         if (INTEL_GTT_GEN == 3) {
1207                 u32 gtt_addr;
1208
1209                 pci_read_config_dword(intel_private.pcidev,
1210                                       I915_PTEADDR, &gtt_addr);
1211                 intel_private.gtt_bus_addr = gtt_addr;
1212         } else {
1213                 u32 gtt_offset;
1214
1215                 switch (INTEL_GTT_GEN) {
1216                 case 5:
1217                 case 6:
1218                         gtt_offset = MB(2);
1219                         break;
1220                 case 4:
1221                 default:
1222                         gtt_offset =  KB(512);
1223                         break;
1224                 }
1225                 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1226         }
1227
1228         intel_i9xx_setup_flush();
1229
1230         return 0;
1231 }
1232
1233 static const struct agp_bridge_driver intel_fake_agp_driver = {
1234         .owner                  = THIS_MODULE,
1235         .size_type              = FIXED_APER_SIZE,
1236         .aperture_sizes         = intel_fake_agp_sizes,
1237         .num_aperture_sizes     = ARRAY_SIZE(intel_fake_agp_sizes),
1238         .configure              = intel_fake_agp_configure,
1239         .fetch_size             = intel_fake_agp_fetch_size,
1240         .cleanup                = intel_gtt_cleanup,
1241         .agp_enable             = intel_fake_agp_enable,
1242         .cache_flush            = global_cache_flush,
1243         .create_gatt_table      = intel_fake_agp_create_gatt_table,
1244         .free_gatt_table        = intel_fake_agp_free_gatt_table,
1245         .insert_memory          = intel_fake_agp_insert_entries,
1246         .remove_memory          = intel_fake_agp_remove_entries,
1247         .alloc_by_type          = intel_fake_agp_alloc_by_type,
1248         .free_by_type           = intel_i810_free_by_type,
1249         .agp_alloc_page         = agp_generic_alloc_page,
1250         .agp_alloc_pages        = agp_generic_alloc_pages,
1251         .agp_destroy_page       = agp_generic_destroy_page,
1252         .agp_destroy_pages      = agp_generic_destroy_pages,
1253 };
1254
1255 static const struct intel_gtt_driver i81x_gtt_driver = {
1256         .gen = 1,
1257         .has_pgtbl_enable = 1,
1258         .dma_mask_size = 32,
1259         .setup = i810_setup,
1260         .cleanup = i810_cleanup,
1261         .check_flags = i830_check_flags,
1262         .write_entry = i810_write_entry,
1263 };
1264 static const struct intel_gtt_driver i8xx_gtt_driver = {
1265         .gen = 2,
1266         .has_pgtbl_enable = 1,
1267         .setup = i830_setup,
1268         .cleanup = i830_cleanup,
1269         .write_entry = i830_write_entry,
1270         .dma_mask_size = 32,
1271         .check_flags = i830_check_flags,
1272         .chipset_flush = i830_chipset_flush,
1273 };
1274 static const struct intel_gtt_driver i915_gtt_driver = {
1275         .gen = 3,
1276         .has_pgtbl_enable = 1,
1277         .setup = i9xx_setup,
1278         .cleanup = i9xx_cleanup,
1279         /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1280         .write_entry = i830_write_entry,
1281         .dma_mask_size = 32,
1282         .check_flags = i830_check_flags,
1283         .chipset_flush = i9xx_chipset_flush,
1284 };
1285 static const struct intel_gtt_driver g33_gtt_driver = {
1286         .gen = 3,
1287         .is_g33 = 1,
1288         .setup = i9xx_setup,
1289         .cleanup = i9xx_cleanup,
1290         .write_entry = i965_write_entry,
1291         .dma_mask_size = 36,
1292         .check_flags = i830_check_flags,
1293         .chipset_flush = i9xx_chipset_flush,
1294 };
1295 static const struct intel_gtt_driver pineview_gtt_driver = {
1296         .gen = 3,
1297         .is_pineview = 1, .is_g33 = 1,
1298         .setup = i9xx_setup,
1299         .cleanup = i9xx_cleanup,
1300         .write_entry = i965_write_entry,
1301         .dma_mask_size = 36,
1302         .check_flags = i830_check_flags,
1303         .chipset_flush = i9xx_chipset_flush,
1304 };
1305 static const struct intel_gtt_driver i965_gtt_driver = {
1306         .gen = 4,
1307         .has_pgtbl_enable = 1,
1308         .setup = i9xx_setup,
1309         .cleanup = i9xx_cleanup,
1310         .write_entry = i965_write_entry,
1311         .dma_mask_size = 36,
1312         .check_flags = i830_check_flags,
1313         .chipset_flush = i9xx_chipset_flush,
1314 };
1315 static const struct intel_gtt_driver g4x_gtt_driver = {
1316         .gen = 5,
1317         .setup = i9xx_setup,
1318         .cleanup = i9xx_cleanup,
1319         .write_entry = i965_write_entry,
1320         .dma_mask_size = 36,
1321         .check_flags = i830_check_flags,
1322         .chipset_flush = i9xx_chipset_flush,
1323 };
1324 static const struct intel_gtt_driver ironlake_gtt_driver = {
1325         .gen = 5,
1326         .is_ironlake = 1,
1327         .setup = i9xx_setup,
1328         .cleanup = i9xx_cleanup,
1329         .write_entry = i965_write_entry,
1330         .dma_mask_size = 36,
1331         .check_flags = i830_check_flags,
1332         .chipset_flush = i9xx_chipset_flush,
1333 };
1334 static const struct intel_gtt_driver sandybridge_gtt_driver = {
1335         .gen = 6,
1336         .setup = i9xx_setup,
1337         .cleanup = gen6_cleanup,
1338         .write_entry = gen6_write_entry,
1339         .dma_mask_size = 40,
1340         .check_flags = gen6_check_flags,
1341         .chipset_flush = i9xx_chipset_flush,
1342 };
1343
1344 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1345  * driver and gmch_driver must be non-null, and find_gmch will determine
1346  * which one should be used if a gmch_chip_id is present.
1347  */
1348 static const struct intel_gtt_driver_description {
1349         unsigned int gmch_chip_id;
1350         char *name;
1351         const struct intel_gtt_driver *gtt_driver;
1352 } intel_gtt_chipsets[] = {
1353         { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1354                 &i81x_gtt_driver},
1355         { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1356                 &i81x_gtt_driver},
1357         { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1358                 &i81x_gtt_driver},
1359         { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1360                 &i81x_gtt_driver},
1361         { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1362                 &i8xx_gtt_driver},
1363         { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1364                 &i8xx_gtt_driver},
1365         { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1366                 &i8xx_gtt_driver},
1367         { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1368                 &i8xx_gtt_driver},
1369         { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1370                 &i8xx_gtt_driver},
1371         { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1372                 &i915_gtt_driver },
1373         { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1374                 &i915_gtt_driver },
1375         { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1376                 &i915_gtt_driver },
1377         { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1378                 &i915_gtt_driver },
1379         { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1380                 &i915_gtt_driver },
1381         { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1382                 &i915_gtt_driver },
1383         { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1384                 &i965_gtt_driver },
1385         { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1386                 &i965_gtt_driver },
1387         { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1388                 &i965_gtt_driver },
1389         { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1390                 &i965_gtt_driver },
1391         { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1392                 &i965_gtt_driver },
1393         { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1394                 &i965_gtt_driver },
1395         { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1396                 &g33_gtt_driver },
1397         { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1398                 &g33_gtt_driver },
1399         { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1400                 &g33_gtt_driver },
1401         { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1402                 &pineview_gtt_driver },
1403         { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1404                 &pineview_gtt_driver },
1405         { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1406                 &g4x_gtt_driver },
1407         { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1408                 &g4x_gtt_driver },
1409         { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1410                 &g4x_gtt_driver },
1411         { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1412                 &g4x_gtt_driver },
1413         { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1414                 &g4x_gtt_driver },
1415         { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1416                 &g4x_gtt_driver },
1417         { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1418                 &g4x_gtt_driver },
1419         { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1420             "HD Graphics", &ironlake_gtt_driver },
1421         { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1422             "HD Graphics", &ironlake_gtt_driver },
1423         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1424             "Sandybridge", &sandybridge_gtt_driver },
1425         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1426             "Sandybridge", &sandybridge_gtt_driver },
1427         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1428             "Sandybridge", &sandybridge_gtt_driver },
1429         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1430             "Sandybridge", &sandybridge_gtt_driver },
1431         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1432             "Sandybridge", &sandybridge_gtt_driver },
1433         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1434             "Sandybridge", &sandybridge_gtt_driver },
1435         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1436             "Sandybridge", &sandybridge_gtt_driver },
1437         { 0, NULL, NULL }
1438 };
1439
1440 static int find_gmch(u16 device)
1441 {
1442         struct pci_dev *gmch_device;
1443
1444         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1445         if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1446                 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1447                                              device, gmch_device);
1448         }
1449
1450         if (!gmch_device)
1451                 return 0;
1452
1453         intel_private.pcidev = gmch_device;
1454         return 1;
1455 }
1456
1457 int intel_gmch_probe(struct pci_dev *pdev,
1458                                       struct agp_bridge_data *bridge)
1459 {
1460         int i, mask;
1461         intel_private.driver = NULL;
1462
1463         for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1464                 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1465                         intel_private.driver =
1466                                 intel_gtt_chipsets[i].gtt_driver;
1467                         break;
1468                 }
1469         }
1470
1471         if (!intel_private.driver)
1472                 return 0;
1473
1474         bridge->driver = &intel_fake_agp_driver;
1475         bridge->dev_private_data = &intel_private;
1476         bridge->dev = pdev;
1477
1478         intel_private.bridge_dev = pci_dev_get(pdev);
1479
1480         dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1481
1482         mask = intel_private.driver->dma_mask_size;
1483         if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1484                 dev_err(&intel_private.pcidev->dev,
1485                         "set gfx device dma mask %d-bit failed!\n", mask);
1486         else
1487                 pci_set_consistent_dma_mask(intel_private.pcidev,
1488                                             DMA_BIT_MASK(mask));
1489
1490         /*if (bridge->driver == &intel_810_driver)
1491                 return 1;*/
1492
1493         if (intel_gtt_init() != 0)
1494                 return 0;
1495
1496         return 1;
1497 }
1498 EXPORT_SYMBOL(intel_gmch_probe);
1499
1500 const struct intel_gtt *intel_gtt_get(void)
1501 {
1502         return &intel_private.base;
1503 }
1504 EXPORT_SYMBOL(intel_gtt_get);
1505
1506 void intel_gtt_chipset_flush(void)
1507 {
1508         if (intel_private.driver->chipset_flush)
1509                 intel_private.driver->chipset_flush();
1510 }
1511 EXPORT_SYMBOL(intel_gtt_chipset_flush);
1512
1513 void intel_gmch_remove(struct pci_dev *pdev)
1514 {
1515         if (intel_private.pcidev)
1516                 pci_dev_put(intel_private.pcidev);
1517         if (intel_private.bridge_dev)
1518                 pci_dev_put(intel_private.bridge_dev);
1519 }
1520 EXPORT_SYMBOL(intel_gmch_remove);
1521
1522 MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1523 MODULE_LICENSE("GPL and additional rights");