agp/intel: make intel-gtt.c into a real source file
[pandora-kernel.git] / drivers / char / agp / intel-gtt.c
1 /*
2  * Intel GTT (Graphics Translation Table) routines
3  *
4  * Caveat: This driver implements the linux agp interface, but this is far from
5  * a agp driver! GTT support ended up here for purely historical reasons: The
6  * old userspace intel graphics drivers needed an interface to map memory into
7  * the GTT. And the drm provides a default interface for graphic devices sitting
8  * on an agp port. So it made sense to fake the GTT support as an agp port to
9  * avoid having to create a new api.
10  *
11  * With gem this does not make much sense anymore, just needlessly complicates
12  * the code. But as long as the old graphics stack is still support, it's stuck
13  * here.
14  *
15  * /fairy-tale-mode off
16  */
17
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/pagemap.h>
23 #include <linux/agp_backend.h>
24 #include <asm/smp.h>
25 #include "agp.h"
26 #include "intel-agp.h"
27 #include <linux/intel-gtt.h>
28
29 /*
30  * If we have Intel graphics, we're not going to have anything other than
31  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
32  * on the Intel IOMMU support (CONFIG_DMAR).
33  * Only newer chipsets need to bother with this, of course.
34  */
35 #ifdef CONFIG_DMAR
36 #define USE_PCI_DMA_API 1
37 #endif
38
39 /* Max amount of stolen space, anything above will be returned to Linux */
40 int intel_max_stolen = 32 * 1024 * 1024;
41 EXPORT_SYMBOL(intel_max_stolen);
42
43 static const struct aper_size_info_fixed intel_i810_sizes[] =
44 {
45         {64, 16384, 4},
46         /* The 32M mode still requires a 64k gatt */
47         {32, 8192, 4}
48 };
49
50 #define AGP_DCACHE_MEMORY       1
51 #define AGP_PHYS_MEMORY         2
52 #define INTEL_AGP_CACHED_MEMORY 3
53
54 static struct gatt_mask intel_i810_masks[] =
55 {
56         {.mask = I810_PTE_VALID, .type = 0},
57         {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
58         {.mask = I810_PTE_VALID, .type = 0},
59         {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
60          .type = INTEL_AGP_CACHED_MEMORY}
61 };
62
63 #define INTEL_AGP_UNCACHED_MEMORY              0
64 #define INTEL_AGP_CACHED_MEMORY_LLC            1
65 #define INTEL_AGP_CACHED_MEMORY_LLC_GFDT       2
66 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
67 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
68
69 static struct gatt_mask intel_gen6_masks[] =
70 {
71         {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
72          .type = INTEL_AGP_UNCACHED_MEMORY },
73         {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
74          .type = INTEL_AGP_CACHED_MEMORY_LLC },
75         {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
76          .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
77         {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
78          .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
79         {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
80          .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
81 };
82
83 static struct _intel_private {
84         struct pci_dev *pcidev; /* device one */
85         u8 __iomem *registers;
86         u32 __iomem *gtt;               /* I915G */
87         int num_dcache_entries;
88         /* gtt_entries is the number of gtt entries that are already mapped
89          * to stolen memory.  Stolen memory is larger than the memory mapped
90          * through gtt_entries, as it includes some reserved space for the BIOS
91          * popup and for the GTT.
92          */
93         int gtt_entries;                        /* i830+ */
94         int gtt_total_size;
95         union {
96                 void __iomem *i9xx_flush_page;
97                 void *i8xx_flush_page;
98         };
99         struct page *i8xx_page;
100         struct resource ifp_resource;
101         int resource_valid;
102 } intel_private;
103
104 #ifdef USE_PCI_DMA_API
105 static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
106 {
107         *ret = pci_map_page(intel_private.pcidev, page, 0,
108                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
109         if (pci_dma_mapping_error(intel_private.pcidev, *ret))
110                 return -EINVAL;
111         return 0;
112 }
113
114 static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
115 {
116         pci_unmap_page(intel_private.pcidev, dma,
117                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
118 }
119
120 static void intel_agp_free_sglist(struct agp_memory *mem)
121 {
122         struct sg_table st;
123
124         st.sgl = mem->sg_list;
125         st.orig_nents = st.nents = mem->page_count;
126
127         sg_free_table(&st);
128
129         mem->sg_list = NULL;
130         mem->num_sg = 0;
131 }
132
133 static int intel_agp_map_memory(struct agp_memory *mem)
134 {
135         struct sg_table st;
136         struct scatterlist *sg;
137         int i;
138
139         DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
140
141         if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
142                 goto err;
143
144         mem->sg_list = sg = st.sgl;
145
146         for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
147                 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
148
149         mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
150                                  mem->page_count, PCI_DMA_BIDIRECTIONAL);
151         if (unlikely(!mem->num_sg))
152                 goto err;
153
154         return 0;
155
156 err:
157         sg_free_table(&st);
158         return -ENOMEM;
159 }
160
161 static void intel_agp_unmap_memory(struct agp_memory *mem)
162 {
163         DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
164
165         pci_unmap_sg(intel_private.pcidev, mem->sg_list,
166                      mem->page_count, PCI_DMA_BIDIRECTIONAL);
167         intel_agp_free_sglist(mem);
168 }
169
170 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
171                                         off_t pg_start, int mask_type)
172 {
173         struct scatterlist *sg;
174         int i, j;
175
176         j = pg_start;
177
178         WARN_ON(!mem->num_sg);
179
180         if (mem->num_sg == mem->page_count) {
181                 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
182                         writel(agp_bridge->driver->mask_memory(agp_bridge,
183                                         sg_dma_address(sg), mask_type),
184                                         intel_private.gtt+j);
185                         j++;
186                 }
187         } else {
188                 /* sg may merge pages, but we have to separate
189                  * per-page addr for GTT */
190                 unsigned int len, m;
191
192                 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
193                         len = sg_dma_len(sg) / PAGE_SIZE;
194                         for (m = 0; m < len; m++) {
195                                 writel(agp_bridge->driver->mask_memory(agp_bridge,
196                                                                        sg_dma_address(sg) + m * PAGE_SIZE,
197                                                                        mask_type),
198                                        intel_private.gtt+j);
199                                 j++;
200                         }
201                 }
202         }
203         readl(intel_private.gtt+j-1);
204 }
205
206 #else
207
208 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
209                                         off_t pg_start, int mask_type)
210 {
211         int i, j;
212
213         for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
214                 writel(agp_bridge->driver->mask_memory(agp_bridge,
215                                 page_to_phys(mem->pages[i]), mask_type),
216                        intel_private.gtt+j);
217         }
218
219         readl(intel_private.gtt+j-1);
220 }
221
222 #endif
223
224 static int intel_i810_fetch_size(void)
225 {
226         u32 smram_miscc;
227         struct aper_size_info_fixed *values;
228
229         pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
230         values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
231
232         if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
233                 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
234                 return 0;
235         }
236         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
237                 agp_bridge->current_size = (void *) (values + 1);
238                 agp_bridge->aperture_size_idx = 1;
239                 return values[1].size;
240         } else {
241                 agp_bridge->current_size = (void *) (values);
242                 agp_bridge->aperture_size_idx = 0;
243                 return values[0].size;
244         }
245
246         return 0;
247 }
248
249 static int intel_i810_configure(void)
250 {
251         struct aper_size_info_fixed *current_size;
252         u32 temp;
253         int i;
254
255         current_size = A_SIZE_FIX(agp_bridge->current_size);
256
257         if (!intel_private.registers) {
258                 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
259                 temp &= 0xfff80000;
260
261                 intel_private.registers = ioremap(temp, 128 * 4096);
262                 if (!intel_private.registers) {
263                         dev_err(&intel_private.pcidev->dev,
264                                 "can't remap memory\n");
265                         return -ENOMEM;
266                 }
267         }
268
269         if ((readl(intel_private.registers+I810_DRAM_CTL)
270                 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
271                 /* This will need to be dynamically assigned */
272                 dev_info(&intel_private.pcidev->dev,
273                          "detected 4MB dedicated video ram\n");
274                 intel_private.num_dcache_entries = 1024;
275         }
276         pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
277         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
278         writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
279         readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
280
281         if (agp_bridge->driver->needs_scratch_page) {
282                 for (i = 0; i < current_size->num_entries; i++) {
283                         writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
284                 }
285                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
286         }
287         global_cache_flush();
288         return 0;
289 }
290
291 static void intel_i810_cleanup(void)
292 {
293         writel(0, intel_private.registers+I810_PGETBL_CTL);
294         readl(intel_private.registers); /* PCI Posting. */
295         iounmap(intel_private.registers);
296 }
297
298 static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
299 {
300         return;
301 }
302
303 /* Exists to support ARGB cursors */
304 static struct page *i8xx_alloc_pages(void)
305 {
306         struct page *page;
307
308         page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
309         if (page == NULL)
310                 return NULL;
311
312         if (set_pages_uc(page, 4) < 0) {
313                 set_pages_wb(page, 4);
314                 __free_pages(page, 2);
315                 return NULL;
316         }
317         get_page(page);
318         atomic_inc(&agp_bridge->current_memory_agp);
319         return page;
320 }
321
322 static void i8xx_destroy_pages(struct page *page)
323 {
324         if (page == NULL)
325                 return;
326
327         set_pages_wb(page, 4);
328         put_page(page);
329         __free_pages(page, 2);
330         atomic_dec(&agp_bridge->current_memory_agp);
331 }
332
333 static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
334                                         int type)
335 {
336         if (type < AGP_USER_TYPES)
337                 return type;
338         else if (type == AGP_USER_CACHED_MEMORY)
339                 return INTEL_AGP_CACHED_MEMORY;
340         else
341                 return 0;
342 }
343
344 static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
345                                         int type)
346 {
347         unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
348         unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
349
350         if (type_mask == AGP_USER_UNCACHED_MEMORY)
351                 return INTEL_AGP_UNCACHED_MEMORY;
352         else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
353                 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
354                               INTEL_AGP_CACHED_MEMORY_LLC_MLC;
355         else /* set 'normal'/'cached' to LLC by default */
356                 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
357                               INTEL_AGP_CACHED_MEMORY_LLC;
358 }
359
360
361 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
362                                 int type)
363 {
364         int i, j, num_entries;
365         void *temp;
366         int ret = -EINVAL;
367         int mask_type;
368
369         if (mem->page_count == 0)
370                 goto out;
371
372         temp = agp_bridge->current_size;
373         num_entries = A_SIZE_FIX(temp)->num_entries;
374
375         if ((pg_start + mem->page_count) > num_entries)
376                 goto out_err;
377
378
379         for (j = pg_start; j < (pg_start + mem->page_count); j++) {
380                 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
381                         ret = -EBUSY;
382                         goto out_err;
383                 }
384         }
385
386         if (type != mem->type)
387                 goto out_err;
388
389         mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
390
391         switch (mask_type) {
392         case AGP_DCACHE_MEMORY:
393                 if (!mem->is_flushed)
394                         global_cache_flush();
395                 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
396                         writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
397                                intel_private.registers+I810_PTE_BASE+(i*4));
398                 }
399                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
400                 break;
401         case AGP_PHYS_MEMORY:
402         case AGP_NORMAL_MEMORY:
403                 if (!mem->is_flushed)
404                         global_cache_flush();
405                 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
406                         writel(agp_bridge->driver->mask_memory(agp_bridge,
407                                         page_to_phys(mem->pages[i]), mask_type),
408                                intel_private.registers+I810_PTE_BASE+(j*4));
409                 }
410                 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
411                 break;
412         default:
413                 goto out_err;
414         }
415
416 out:
417         ret = 0;
418 out_err:
419         mem->is_flushed = true;
420         return ret;
421 }
422
423 static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
424                                 int type)
425 {
426         int i;
427
428         if (mem->page_count == 0)
429                 return 0;
430
431         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
432                 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
433         }
434         readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
435
436         return 0;
437 }
438
439 /*
440  * The i810/i830 requires a physical address to program its mouse
441  * pointer into hardware.
442  * However the Xserver still writes to it through the agp aperture.
443  */
444 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
445 {
446         struct agp_memory *new;
447         struct page *page;
448
449         switch (pg_count) {
450         case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
451                 break;
452         case 4:
453                 /* kludge to get 4 physical pages for ARGB cursor */
454                 page = i8xx_alloc_pages();
455                 break;
456         default:
457                 return NULL;
458         }
459
460         if (page == NULL)
461                 return NULL;
462
463         new = agp_create_memory(pg_count);
464         if (new == NULL)
465                 return NULL;
466
467         new->pages[0] = page;
468         if (pg_count == 4) {
469                 /* kludge to get 4 physical pages for ARGB cursor */
470                 new->pages[1] = new->pages[0] + 1;
471                 new->pages[2] = new->pages[1] + 1;
472                 new->pages[3] = new->pages[2] + 1;
473         }
474         new->page_count = pg_count;
475         new->num_scratch_pages = pg_count;
476         new->type = AGP_PHYS_MEMORY;
477         new->physical = page_to_phys(new->pages[0]);
478         return new;
479 }
480
481 static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
482 {
483         struct agp_memory *new;
484
485         if (type == AGP_DCACHE_MEMORY) {
486                 if (pg_count != intel_private.num_dcache_entries)
487                         return NULL;
488
489                 new = agp_create_memory(1);
490                 if (new == NULL)
491                         return NULL;
492
493                 new->type = AGP_DCACHE_MEMORY;
494                 new->page_count = pg_count;
495                 new->num_scratch_pages = 0;
496                 agp_free_page_array(new);
497                 return new;
498         }
499         if (type == AGP_PHYS_MEMORY)
500                 return alloc_agpphysmem_i8xx(pg_count, type);
501         return NULL;
502 }
503
504 static void intel_i810_free_by_type(struct agp_memory *curr)
505 {
506         agp_free_key(curr->key);
507         if (curr->type == AGP_PHYS_MEMORY) {
508                 if (curr->page_count == 4)
509                         i8xx_destroy_pages(curr->pages[0]);
510                 else {
511                         agp_bridge->driver->agp_destroy_page(curr->pages[0],
512                                                              AGP_PAGE_DESTROY_UNMAP);
513                         agp_bridge->driver->agp_destroy_page(curr->pages[0],
514                                                              AGP_PAGE_DESTROY_FREE);
515                 }
516                 agp_free_page_array(curr);
517         }
518         kfree(curr);
519 }
520
521 static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
522                                             dma_addr_t addr, int type)
523 {
524         /* Type checking must be done elsewhere */
525         return addr | bridge->driver->masks[type].mask;
526 }
527
528 static struct aper_size_info_fixed intel_i830_sizes[] =
529 {
530         {128, 32768, 5},
531         /* The 64M mode still requires a 128k gatt */
532         {64, 16384, 5},
533         {256, 65536, 6},
534         {512, 131072, 7},
535 };
536
537 static void intel_i830_init_gtt_entries(void)
538 {
539         u16 gmch_ctrl;
540         int gtt_entries = 0;
541         u8 rdct;
542         int local = 0;
543         static const int ddt[4] = { 0, 16, 32, 64 };
544         int size; /* reserved space (in kb) at the top of stolen memory */
545
546         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
547
548         if (IS_I965) {
549                 u32 pgetbl_ctl;
550                 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
551
552                 /* The 965 has a field telling us the size of the GTT,
553                  * which may be larger than what is necessary to map the
554                  * aperture.
555                  */
556                 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
557                 case I965_PGETBL_SIZE_128KB:
558                         size = 128;
559                         break;
560                 case I965_PGETBL_SIZE_256KB:
561                         size = 256;
562                         break;
563                 case I965_PGETBL_SIZE_512KB:
564                         size = 512;
565                         break;
566                 case I965_PGETBL_SIZE_1MB:
567                         size = 1024;
568                         break;
569                 case I965_PGETBL_SIZE_2MB:
570                         size = 2048;
571                         break;
572                 case I965_PGETBL_SIZE_1_5MB:
573                         size = 1024 + 512;
574                         break;
575                 default:
576                         dev_info(&intel_private.pcidev->dev,
577                                  "unknown page table size, assuming 512KB\n");
578                         size = 512;
579                 }
580                 size += 4; /* add in BIOS popup space */
581         } else if (IS_G33 && !IS_PINEVIEW) {
582         /* G33's GTT size defined in gmch_ctrl */
583                 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
584                 case G33_PGETBL_SIZE_1M:
585                         size = 1024;
586                         break;
587                 case G33_PGETBL_SIZE_2M:
588                         size = 2048;
589                         break;
590                 default:
591                         dev_info(&agp_bridge->dev->dev,
592                                  "unknown page table size 0x%x, assuming 512KB\n",
593                                 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
594                         size = 512;
595                 }
596                 size += 4;
597         } else if (IS_G4X || IS_PINEVIEW) {
598                 /* On 4 series hardware, GTT stolen is separate from graphics
599                  * stolen, ignore it in stolen gtt entries counting.  However,
600                  * 4KB of the stolen memory doesn't get mapped to the GTT.
601                  */
602                 size = 4;
603         } else {
604                 /* On previous hardware, the GTT size was just what was
605                  * required to map the aperture.
606                  */
607                 size = agp_bridge->driver->fetch_size() + 4;
608         }
609
610         if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
611             agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
612                 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
613                 case I830_GMCH_GMS_STOLEN_512:
614                         gtt_entries = KB(512) - KB(size);
615                         break;
616                 case I830_GMCH_GMS_STOLEN_1024:
617                         gtt_entries = MB(1) - KB(size);
618                         break;
619                 case I830_GMCH_GMS_STOLEN_8192:
620                         gtt_entries = MB(8) - KB(size);
621                         break;
622                 case I830_GMCH_GMS_LOCAL:
623                         rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
624                         gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
625                                         MB(ddt[I830_RDRAM_DDT(rdct)]);
626                         local = 1;
627                         break;
628                 default:
629                         gtt_entries = 0;
630                         break;
631                 }
632         } else if (IS_SNB) {
633                 /*
634                  * SandyBridge has new memory control reg at 0x50.w
635                  */
636                 u16 snb_gmch_ctl;
637                 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
638                 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
639                 case SNB_GMCH_GMS_STOLEN_32M:
640                         gtt_entries = MB(32) - KB(size);
641                         break;
642                 case SNB_GMCH_GMS_STOLEN_64M:
643                         gtt_entries = MB(64) - KB(size);
644                         break;
645                 case SNB_GMCH_GMS_STOLEN_96M:
646                         gtt_entries = MB(96) - KB(size);
647                         break;
648                 case SNB_GMCH_GMS_STOLEN_128M:
649                         gtt_entries = MB(128) - KB(size);
650                         break;
651                 case SNB_GMCH_GMS_STOLEN_160M:
652                         gtt_entries = MB(160) - KB(size);
653                         break;
654                 case SNB_GMCH_GMS_STOLEN_192M:
655                         gtt_entries = MB(192) - KB(size);
656                         break;
657                 case SNB_GMCH_GMS_STOLEN_224M:
658                         gtt_entries = MB(224) - KB(size);
659                         break;
660                 case SNB_GMCH_GMS_STOLEN_256M:
661                         gtt_entries = MB(256) - KB(size);
662                         break;
663                 case SNB_GMCH_GMS_STOLEN_288M:
664                         gtt_entries = MB(288) - KB(size);
665                         break;
666                 case SNB_GMCH_GMS_STOLEN_320M:
667                         gtt_entries = MB(320) - KB(size);
668                         break;
669                 case SNB_GMCH_GMS_STOLEN_352M:
670                         gtt_entries = MB(352) - KB(size);
671                         break;
672                 case SNB_GMCH_GMS_STOLEN_384M:
673                         gtt_entries = MB(384) - KB(size);
674                         break;
675                 case SNB_GMCH_GMS_STOLEN_416M:
676                         gtt_entries = MB(416) - KB(size);
677                         break;
678                 case SNB_GMCH_GMS_STOLEN_448M:
679                         gtt_entries = MB(448) - KB(size);
680                         break;
681                 case SNB_GMCH_GMS_STOLEN_480M:
682                         gtt_entries = MB(480) - KB(size);
683                         break;
684                 case SNB_GMCH_GMS_STOLEN_512M:
685                         gtt_entries = MB(512) - KB(size);
686                         break;
687                 }
688         } else {
689                 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
690                 case I855_GMCH_GMS_STOLEN_1M:
691                         gtt_entries = MB(1) - KB(size);
692                         break;
693                 case I855_GMCH_GMS_STOLEN_4M:
694                         gtt_entries = MB(4) - KB(size);
695                         break;
696                 case I855_GMCH_GMS_STOLEN_8M:
697                         gtt_entries = MB(8) - KB(size);
698                         break;
699                 case I855_GMCH_GMS_STOLEN_16M:
700                         gtt_entries = MB(16) - KB(size);
701                         break;
702                 case I855_GMCH_GMS_STOLEN_32M:
703                         gtt_entries = MB(32) - KB(size);
704                         break;
705                 case I915_GMCH_GMS_STOLEN_48M:
706                         /* Check it's really I915G */
707                         if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
708                                 gtt_entries = MB(48) - KB(size);
709                         else
710                                 gtt_entries = 0;
711                         break;
712                 case I915_GMCH_GMS_STOLEN_64M:
713                         /* Check it's really I915G */
714                         if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
715                                 gtt_entries = MB(64) - KB(size);
716                         else
717                                 gtt_entries = 0;
718                         break;
719                 case G33_GMCH_GMS_STOLEN_128M:
720                         if (IS_G33 || IS_I965 || IS_G4X)
721                                 gtt_entries = MB(128) - KB(size);
722                         else
723                                 gtt_entries = 0;
724                         break;
725                 case G33_GMCH_GMS_STOLEN_256M:
726                         if (IS_G33 || IS_I965 || IS_G4X)
727                                 gtt_entries = MB(256) - KB(size);
728                         else
729                                 gtt_entries = 0;
730                         break;
731                 case INTEL_GMCH_GMS_STOLEN_96M:
732                         if (IS_I965 || IS_G4X)
733                                 gtt_entries = MB(96) - KB(size);
734                         else
735                                 gtt_entries = 0;
736                         break;
737                 case INTEL_GMCH_GMS_STOLEN_160M:
738                         if (IS_I965 || IS_G4X)
739                                 gtt_entries = MB(160) - KB(size);
740                         else
741                                 gtt_entries = 0;
742                         break;
743                 case INTEL_GMCH_GMS_STOLEN_224M:
744                         if (IS_I965 || IS_G4X)
745                                 gtt_entries = MB(224) - KB(size);
746                         else
747                                 gtt_entries = 0;
748                         break;
749                 case INTEL_GMCH_GMS_STOLEN_352M:
750                         if (IS_I965 || IS_G4X)
751                                 gtt_entries = MB(352) - KB(size);
752                         else
753                                 gtt_entries = 0;
754                         break;
755                 default:
756                         gtt_entries = 0;
757                         break;
758                 }
759         }
760         if (!local && gtt_entries > intel_max_stolen) {
761                 dev_info(&agp_bridge->dev->dev,
762                          "detected %dK stolen memory, trimming to %dK\n",
763                          gtt_entries / KB(1), intel_max_stolen / KB(1));
764                 gtt_entries = intel_max_stolen / KB(4);
765         } else if (gtt_entries > 0) {
766                 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
767                        gtt_entries / KB(1), local ? "local" : "stolen");
768                 gtt_entries /= KB(4);
769         } else {
770                 dev_info(&agp_bridge->dev->dev,
771                        "no pre-allocated video memory detected\n");
772                 gtt_entries = 0;
773         }
774
775         intel_private.gtt_entries = gtt_entries;
776 }
777
778 static void intel_i830_fini_flush(void)
779 {
780         kunmap(intel_private.i8xx_page);
781         intel_private.i8xx_flush_page = NULL;
782         unmap_page_from_agp(intel_private.i8xx_page);
783
784         __free_page(intel_private.i8xx_page);
785         intel_private.i8xx_page = NULL;
786 }
787
788 static void intel_i830_setup_flush(void)
789 {
790         /* return if we've already set the flush mechanism up */
791         if (intel_private.i8xx_page)
792                 return;
793
794         intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
795         if (!intel_private.i8xx_page)
796                 return;
797
798         intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
799         if (!intel_private.i8xx_flush_page)
800                 intel_i830_fini_flush();
801 }
802
803 /* The chipset_flush interface needs to get data that has already been
804  * flushed out of the CPU all the way out to main memory, because the GPU
805  * doesn't snoop those buffers.
806  *
807  * The 8xx series doesn't have the same lovely interface for flushing the
808  * chipset write buffers that the later chips do. According to the 865
809  * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
810  * that buffer out, we just fill 1KB and clflush it out, on the assumption
811  * that it'll push whatever was in there out.  It appears to work.
812  */
813 static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
814 {
815         unsigned int *pg = intel_private.i8xx_flush_page;
816
817         memset(pg, 0, 1024);
818
819         if (cpu_has_clflush)
820                 clflush_cache_range(pg, 1024);
821         else if (wbinvd_on_all_cpus() != 0)
822                 printk(KERN_ERR "Timed out waiting for cache flush.\n");
823 }
824
825 /* The intel i830 automatically initializes the agp aperture during POST.
826  * Use the memory already set aside for in the GTT.
827  */
828 static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
829 {
830         int page_order;
831         struct aper_size_info_fixed *size;
832         int num_entries;
833         u32 temp;
834
835         size = agp_bridge->current_size;
836         page_order = size->page_order;
837         num_entries = size->num_entries;
838         agp_bridge->gatt_table_real = NULL;
839
840         pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
841         temp &= 0xfff80000;
842
843         intel_private.registers = ioremap(temp, 128 * 4096);
844         if (!intel_private.registers)
845                 return -ENOMEM;
846
847         temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
848         global_cache_flush();   /* FIXME: ?? */
849
850         /* we have to call this as early as possible after the MMIO base address is known */
851         intel_i830_init_gtt_entries();
852         if (intel_private.gtt_entries == 0) {
853                 iounmap(intel_private.registers);
854                 return -ENOMEM;
855         }
856
857         agp_bridge->gatt_table = NULL;
858
859         agp_bridge->gatt_bus_addr = temp;
860
861         return 0;
862 }
863
864 /* Return the gatt table to a sane state. Use the top of stolen
865  * memory for the GTT.
866  */
867 static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
868 {
869         return 0;
870 }
871
872 static int intel_i830_fetch_size(void)
873 {
874         u16 gmch_ctrl;
875         struct aper_size_info_fixed *values;
876
877         values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
878
879         if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
880             agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
881                 /* 855GM/852GM/865G has 128MB aperture size */
882                 agp_bridge->current_size = (void *) values;
883                 agp_bridge->aperture_size_idx = 0;
884                 return values[0].size;
885         }
886
887         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
888
889         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
890                 agp_bridge->current_size = (void *) values;
891                 agp_bridge->aperture_size_idx = 0;
892                 return values[0].size;
893         } else {
894                 agp_bridge->current_size = (void *) (values + 1);
895                 agp_bridge->aperture_size_idx = 1;
896                 return values[1].size;
897         }
898
899         return 0;
900 }
901
902 static int intel_i830_configure(void)
903 {
904         struct aper_size_info_fixed *current_size;
905         u32 temp;
906         u16 gmch_ctrl;
907         int i;
908
909         current_size = A_SIZE_FIX(agp_bridge->current_size);
910
911         pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
912         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
913
914         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
915         gmch_ctrl |= I830_GMCH_ENABLED;
916         pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
917
918         writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
919         readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
920
921         if (agp_bridge->driver->needs_scratch_page) {
922                 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
923                         writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
924                 }
925                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
926         }
927
928         global_cache_flush();
929
930         intel_i830_setup_flush();
931         return 0;
932 }
933
934 static void intel_i830_cleanup(void)
935 {
936         iounmap(intel_private.registers);
937 }
938
939 static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
940                                      int type)
941 {
942         int i, j, num_entries;
943         void *temp;
944         int ret = -EINVAL;
945         int mask_type;
946
947         if (mem->page_count == 0)
948                 goto out;
949
950         temp = agp_bridge->current_size;
951         num_entries = A_SIZE_FIX(temp)->num_entries;
952
953         if (pg_start < intel_private.gtt_entries) {
954                 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
955                            "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
956                            pg_start, intel_private.gtt_entries);
957
958                 dev_info(&intel_private.pcidev->dev,
959                          "trying to insert into local/stolen memory\n");
960                 goto out_err;
961         }
962
963         if ((pg_start + mem->page_count) > num_entries)
964                 goto out_err;
965
966         /* The i830 can't check the GTT for entries since its read only,
967          * depend on the caller to make the correct offset decisions.
968          */
969
970         if (type != mem->type)
971                 goto out_err;
972
973         mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
974
975         if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
976             mask_type != INTEL_AGP_CACHED_MEMORY)
977                 goto out_err;
978
979         if (!mem->is_flushed)
980                 global_cache_flush();
981
982         for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
983                 writel(agp_bridge->driver->mask_memory(agp_bridge,
984                                 page_to_phys(mem->pages[i]), mask_type),
985                        intel_private.registers+I810_PTE_BASE+(j*4));
986         }
987         readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
988
989 out:
990         ret = 0;
991 out_err:
992         mem->is_flushed = true;
993         return ret;
994 }
995
996 static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
997                                      int type)
998 {
999         int i;
1000
1001         if (mem->page_count == 0)
1002                 return 0;
1003
1004         if (pg_start < intel_private.gtt_entries) {
1005                 dev_info(&intel_private.pcidev->dev,
1006                          "trying to disable local/stolen memory\n");
1007                 return -EINVAL;
1008         }
1009
1010         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1011                 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
1012         }
1013         readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
1014
1015         return 0;
1016 }
1017
1018 static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
1019 {
1020         if (type == AGP_PHYS_MEMORY)
1021                 return alloc_agpphysmem_i8xx(pg_count, type);
1022         /* always return NULL for other allocation types for now */
1023         return NULL;
1024 }
1025
1026 static int intel_alloc_chipset_flush_resource(void)
1027 {
1028         int ret;
1029         ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1030                                      PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1031                                      pcibios_align_resource, agp_bridge->dev);
1032
1033         return ret;
1034 }
1035
1036 static void intel_i915_setup_chipset_flush(void)
1037 {
1038         int ret;
1039         u32 temp;
1040
1041         pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
1042         if (!(temp & 0x1)) {
1043                 intel_alloc_chipset_flush_resource();
1044                 intel_private.resource_valid = 1;
1045                 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1046         } else {
1047                 temp &= ~1;
1048
1049                 intel_private.resource_valid = 1;
1050                 intel_private.ifp_resource.start = temp;
1051                 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1052                 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1053                 /* some BIOSes reserve this area in a pnp some don't */
1054                 if (ret)
1055                         intel_private.resource_valid = 0;
1056         }
1057 }
1058
1059 static void intel_i965_g33_setup_chipset_flush(void)
1060 {
1061         u32 temp_hi, temp_lo;
1062         int ret;
1063
1064         pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1065         pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1066
1067         if (!(temp_lo & 0x1)) {
1068
1069                 intel_alloc_chipset_flush_resource();
1070
1071                 intel_private.resource_valid = 1;
1072                 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1073                         upper_32_bits(intel_private.ifp_resource.start));
1074                 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1075         } else {
1076                 u64 l64;
1077
1078                 temp_lo &= ~0x1;
1079                 l64 = ((u64)temp_hi << 32) | temp_lo;
1080
1081                 intel_private.resource_valid = 1;
1082                 intel_private.ifp_resource.start = l64;
1083                 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1084                 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1085                 /* some BIOSes reserve this area in a pnp some don't */
1086                 if (ret)
1087                         intel_private.resource_valid = 0;
1088         }
1089 }
1090
1091 static void intel_i9xx_setup_flush(void)
1092 {
1093         /* return if already configured */
1094         if (intel_private.ifp_resource.start)
1095                 return;
1096
1097         if (IS_SNB)
1098                 return;
1099
1100         /* setup a resource for this object */
1101         intel_private.ifp_resource.name = "Intel Flush Page";
1102         intel_private.ifp_resource.flags = IORESOURCE_MEM;
1103
1104         /* Setup chipset flush for 915 */
1105         if (IS_I965 || IS_G33 || IS_G4X) {
1106                 intel_i965_g33_setup_chipset_flush();
1107         } else {
1108                 intel_i915_setup_chipset_flush();
1109         }
1110
1111         if (intel_private.ifp_resource.start)
1112                 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1113         if (!intel_private.i9xx_flush_page)
1114                 dev_err(&intel_private.pcidev->dev,
1115                         "can't ioremap flush page - no chipset flushing\n");
1116 }
1117
1118 static int intel_i9xx_configure(void)
1119 {
1120         struct aper_size_info_fixed *current_size;
1121         u32 temp;
1122         u16 gmch_ctrl;
1123         int i;
1124
1125         current_size = A_SIZE_FIX(agp_bridge->current_size);
1126
1127         pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1128
1129         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1130
1131         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1132         gmch_ctrl |= I830_GMCH_ENABLED;
1133         pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1134
1135         writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1136         readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1137
1138         if (agp_bridge->driver->needs_scratch_page) {
1139                 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1140                         writel(agp_bridge->scratch_page, intel_private.gtt+i);
1141                 }
1142                 readl(intel_private.gtt+i-1);   /* PCI Posting. */
1143         }
1144
1145         global_cache_flush();
1146
1147         intel_i9xx_setup_flush();
1148
1149         return 0;
1150 }
1151
1152 static void intel_i915_cleanup(void)
1153 {
1154         if (intel_private.i9xx_flush_page)
1155                 iounmap(intel_private.i9xx_flush_page);
1156         if (intel_private.resource_valid)
1157                 release_resource(&intel_private.ifp_resource);
1158         intel_private.ifp_resource.start = 0;
1159         intel_private.resource_valid = 0;
1160         iounmap(intel_private.gtt);
1161         iounmap(intel_private.registers);
1162 }
1163
1164 static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1165 {
1166         if (intel_private.i9xx_flush_page)
1167                 writel(1, intel_private.i9xx_flush_page);
1168 }
1169
1170 static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1171                                      int type)
1172 {
1173         int num_entries;
1174         void *temp;
1175         int ret = -EINVAL;
1176         int mask_type;
1177
1178         if (mem->page_count == 0)
1179                 goto out;
1180
1181         temp = agp_bridge->current_size;
1182         num_entries = A_SIZE_FIX(temp)->num_entries;
1183
1184         if (pg_start < intel_private.gtt_entries) {
1185                 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1186                            "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1187                            pg_start, intel_private.gtt_entries);
1188
1189                 dev_info(&intel_private.pcidev->dev,
1190                          "trying to insert into local/stolen memory\n");
1191                 goto out_err;
1192         }
1193
1194         if ((pg_start + mem->page_count) > num_entries)
1195                 goto out_err;
1196
1197         /* The i915 can't check the GTT for entries since it's read only;
1198          * depend on the caller to make the correct offset decisions.
1199          */
1200
1201         if (type != mem->type)
1202                 goto out_err;
1203
1204         mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1205
1206         if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1207             mask_type != INTEL_AGP_CACHED_MEMORY)
1208                 goto out_err;
1209
1210         if (!mem->is_flushed)
1211                 global_cache_flush();
1212
1213         intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1214
1215  out:
1216         ret = 0;
1217  out_err:
1218         mem->is_flushed = true;
1219         return ret;
1220 }
1221
1222 static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1223                                      int type)
1224 {
1225         int i;
1226
1227         if (mem->page_count == 0)
1228                 return 0;
1229
1230         if (pg_start < intel_private.gtt_entries) {
1231                 dev_info(&intel_private.pcidev->dev,
1232                          "trying to disable local/stolen memory\n");
1233                 return -EINVAL;
1234         }
1235
1236         for (i = pg_start; i < (mem->page_count + pg_start); i++)
1237                 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1238
1239         readl(intel_private.gtt+i-1);
1240
1241         return 0;
1242 }
1243
1244 /* Return the aperture size by just checking the resource length.  The effect
1245  * described in the spec of the MSAC registers is just changing of the
1246  * resource size.
1247  */
1248 static int intel_i9xx_fetch_size(void)
1249 {
1250         int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1251         int aper_size; /* size in megabytes */
1252         int i;
1253
1254         aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1255
1256         for (i = 0; i < num_sizes; i++) {
1257                 if (aper_size == intel_i830_sizes[i].size) {
1258                         agp_bridge->current_size = intel_i830_sizes + i;
1259                         return aper_size;
1260                 }
1261         }
1262
1263         return 0;
1264 }
1265
1266 static int intel_i915_get_gtt_size(void)
1267 {
1268         int size;
1269
1270         if (IS_G33) {
1271                 u16 gmch_ctrl;
1272
1273                 /* G33's GTT size defined in gmch_ctrl */
1274                 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1275                 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
1276                 case I830_GMCH_GMS_STOLEN_512:
1277                         size = 512;
1278                         break;
1279                 case I830_GMCH_GMS_STOLEN_1024:
1280                         size = 1024;
1281                         break;
1282                 case I830_GMCH_GMS_STOLEN_8192:
1283                         size = 8*1024;
1284                         break;
1285                 default:
1286                         dev_info(&agp_bridge->dev->dev,
1287                                  "unknown page table size 0x%x, assuming 512KB\n",
1288                                 (gmch_ctrl & I830_GMCH_GMS_MASK));
1289                         size = 512;
1290                 }
1291         } else {
1292                 /* On previous hardware, the GTT size was just what was
1293                  * required to map the aperture.
1294                  */
1295                 size = agp_bridge->driver->fetch_size();
1296         }
1297
1298         return KB(size);
1299 }
1300
1301 /* The intel i915 automatically initializes the agp aperture during POST.
1302  * Use the memory already set aside for in the GTT.
1303  */
1304 static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1305 {
1306         int page_order;
1307         struct aper_size_info_fixed *size;
1308         int num_entries;
1309         u32 temp, temp2;
1310         int gtt_map_size;
1311
1312         size = agp_bridge->current_size;
1313         page_order = size->page_order;
1314         num_entries = size->num_entries;
1315         agp_bridge->gatt_table_real = NULL;
1316
1317         pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1318         pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1319
1320         gtt_map_size = intel_i915_get_gtt_size();
1321
1322         intel_private.gtt = ioremap(temp2, gtt_map_size);
1323         if (!intel_private.gtt)
1324                 return -ENOMEM;
1325
1326         intel_private.gtt_total_size = gtt_map_size / 4;
1327
1328         temp &= 0xfff80000;
1329
1330         intel_private.registers = ioremap(temp, 128 * 4096);
1331         if (!intel_private.registers) {
1332                 iounmap(intel_private.gtt);
1333                 return -ENOMEM;
1334         }
1335
1336         temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1337         global_cache_flush();   /* FIXME: ? */
1338
1339         /* we have to call this as early as possible after the MMIO base address is known */
1340         intel_i830_init_gtt_entries();
1341         if (intel_private.gtt_entries == 0) {
1342                 iounmap(intel_private.gtt);
1343                 iounmap(intel_private.registers);
1344                 return -ENOMEM;
1345         }
1346
1347         agp_bridge->gatt_table = NULL;
1348
1349         agp_bridge->gatt_bus_addr = temp;
1350
1351         return 0;
1352 }
1353
1354 /*
1355  * The i965 supports 36-bit physical addresses, but to keep
1356  * the format of the GTT the same, the bits that don't fit
1357  * in a 32-bit word are shifted down to bits 4..7.
1358  *
1359  * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1360  * is always zero on 32-bit architectures, so no need to make
1361  * this conditional.
1362  */
1363 static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1364                                             dma_addr_t addr, int type)
1365 {
1366         /* Shift high bits down */
1367         addr |= (addr >> 28) & 0xf0;
1368
1369         /* Type checking must be done elsewhere */
1370         return addr | bridge->driver->masks[type].mask;
1371 }
1372
1373 static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
1374                                             dma_addr_t addr, int type)
1375 {
1376         /* gen6 has bit11-4 for physical addr bit39-32 */
1377         addr |= (addr >> 28) & 0xff0;
1378
1379         /* Type checking must be done elsewhere */
1380         return addr | bridge->driver->masks[type].mask;
1381 }
1382
1383 static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1384 {
1385         u16 snb_gmch_ctl;
1386
1387         switch (agp_bridge->dev->device) {
1388         case PCI_DEVICE_ID_INTEL_GM45_HB:
1389         case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1390         case PCI_DEVICE_ID_INTEL_Q45_HB:
1391         case PCI_DEVICE_ID_INTEL_G45_HB:
1392         case PCI_DEVICE_ID_INTEL_G41_HB:
1393         case PCI_DEVICE_ID_INTEL_B43_HB:
1394         case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1395         case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1396         case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1397         case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1398                 *gtt_offset = *gtt_size = MB(2);
1399                 break;
1400         case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1401         case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1402         case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
1403                 *gtt_offset = MB(2);
1404
1405                 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1406                 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1407                 default:
1408                 case SNB_GTT_SIZE_0M:
1409                         printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1410                         *gtt_size = MB(0);
1411                         break;
1412                 case SNB_GTT_SIZE_1M:
1413                         *gtt_size = MB(1);
1414                         break;
1415                 case SNB_GTT_SIZE_2M:
1416                         *gtt_size = MB(2);
1417                         break;
1418                 }
1419                 break;
1420         default:
1421                 *gtt_offset = *gtt_size = KB(512);
1422         }
1423 }
1424
1425 /* The intel i965 automatically initializes the agp aperture during POST.
1426  * Use the memory already set aside for in the GTT.
1427  */
1428 static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1429 {
1430         int page_order;
1431         struct aper_size_info_fixed *size;
1432         int num_entries;
1433         u32 temp;
1434         int gtt_offset, gtt_size;
1435
1436         size = agp_bridge->current_size;
1437         page_order = size->page_order;
1438         num_entries = size->num_entries;
1439         agp_bridge->gatt_table_real = NULL;
1440
1441         pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1442
1443         temp &= 0xfff00000;
1444
1445         intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1446
1447         intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1448
1449         if (!intel_private.gtt)
1450                 return -ENOMEM;
1451
1452         intel_private.gtt_total_size = gtt_size / 4;
1453
1454         intel_private.registers = ioremap(temp, 128 * 4096);
1455         if (!intel_private.registers) {
1456                 iounmap(intel_private.gtt);
1457                 return -ENOMEM;
1458         }
1459
1460         temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1461         global_cache_flush();   /* FIXME: ? */
1462
1463         /* we have to call this as early as possible after the MMIO base address is known */
1464         intel_i830_init_gtt_entries();
1465         if (intel_private.gtt_entries == 0) {
1466                 iounmap(intel_private.gtt);
1467                 iounmap(intel_private.registers);
1468                 return -ENOMEM;
1469         }
1470
1471         agp_bridge->gatt_table = NULL;
1472
1473         agp_bridge->gatt_bus_addr = temp;
1474
1475         return 0;
1476 }
1477
1478 static const struct agp_bridge_driver intel_810_driver = {
1479         .owner                  = THIS_MODULE,
1480         .aperture_sizes         = intel_i810_sizes,
1481         .size_type              = FIXED_APER_SIZE,
1482         .num_aperture_sizes     = 2,
1483         .needs_scratch_page     = true,
1484         .configure              = intel_i810_configure,
1485         .fetch_size             = intel_i810_fetch_size,
1486         .cleanup                = intel_i810_cleanup,
1487         .mask_memory            = intel_i810_mask_memory,
1488         .masks                  = intel_i810_masks,
1489         .agp_enable             = intel_i810_agp_enable,
1490         .cache_flush            = global_cache_flush,
1491         .create_gatt_table      = agp_generic_create_gatt_table,
1492         .free_gatt_table        = agp_generic_free_gatt_table,
1493         .insert_memory          = intel_i810_insert_entries,
1494         .remove_memory          = intel_i810_remove_entries,
1495         .alloc_by_type          = intel_i810_alloc_by_type,
1496         .free_by_type           = intel_i810_free_by_type,
1497         .agp_alloc_page         = agp_generic_alloc_page,
1498         .agp_alloc_pages        = agp_generic_alloc_pages,
1499         .agp_destroy_page       = agp_generic_destroy_page,
1500         .agp_destroy_pages      = agp_generic_destroy_pages,
1501         .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
1502 };
1503
1504 static const struct agp_bridge_driver intel_830_driver = {
1505         .owner                  = THIS_MODULE,
1506         .aperture_sizes         = intel_i830_sizes,
1507         .size_type              = FIXED_APER_SIZE,
1508         .num_aperture_sizes     = 4,
1509         .needs_scratch_page     = true,
1510         .configure              = intel_i830_configure,
1511         .fetch_size             = intel_i830_fetch_size,
1512         .cleanup                = intel_i830_cleanup,
1513         .mask_memory            = intel_i810_mask_memory,
1514         .masks                  = intel_i810_masks,
1515         .agp_enable             = intel_i810_agp_enable,
1516         .cache_flush            = global_cache_flush,
1517         .create_gatt_table      = intel_i830_create_gatt_table,
1518         .free_gatt_table        = intel_i830_free_gatt_table,
1519         .insert_memory          = intel_i830_insert_entries,
1520         .remove_memory          = intel_i830_remove_entries,
1521         .alloc_by_type          = intel_i830_alloc_by_type,
1522         .free_by_type           = intel_i810_free_by_type,
1523         .agp_alloc_page         = agp_generic_alloc_page,
1524         .agp_alloc_pages        = agp_generic_alloc_pages,
1525         .agp_destroy_page       = agp_generic_destroy_page,
1526         .agp_destroy_pages      = agp_generic_destroy_pages,
1527         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1528         .chipset_flush          = intel_i830_chipset_flush,
1529 };
1530
1531 static const struct agp_bridge_driver intel_915_driver = {
1532         .owner                  = THIS_MODULE,
1533         .aperture_sizes         = intel_i830_sizes,
1534         .size_type              = FIXED_APER_SIZE,
1535         .num_aperture_sizes     = 4,
1536         .needs_scratch_page     = true,
1537         .configure              = intel_i9xx_configure,
1538         .fetch_size             = intel_i9xx_fetch_size,
1539         .cleanup                = intel_i915_cleanup,
1540         .mask_memory            = intel_i810_mask_memory,
1541         .masks                  = intel_i810_masks,
1542         .agp_enable             = intel_i810_agp_enable,
1543         .cache_flush            = global_cache_flush,
1544         .create_gatt_table      = intel_i915_create_gatt_table,
1545         .free_gatt_table        = intel_i830_free_gatt_table,
1546         .insert_memory          = intel_i915_insert_entries,
1547         .remove_memory          = intel_i915_remove_entries,
1548         .alloc_by_type          = intel_i830_alloc_by_type,
1549         .free_by_type           = intel_i810_free_by_type,
1550         .agp_alloc_page         = agp_generic_alloc_page,
1551         .agp_alloc_pages        = agp_generic_alloc_pages,
1552         .agp_destroy_page       = agp_generic_destroy_page,
1553         .agp_destroy_pages      = agp_generic_destroy_pages,
1554         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1555         .chipset_flush          = intel_i915_chipset_flush,
1556 #ifdef USE_PCI_DMA_API
1557         .agp_map_page           = intel_agp_map_page,
1558         .agp_unmap_page         = intel_agp_unmap_page,
1559         .agp_map_memory         = intel_agp_map_memory,
1560         .agp_unmap_memory       = intel_agp_unmap_memory,
1561 #endif
1562 };
1563
1564 static const struct agp_bridge_driver intel_i965_driver = {
1565         .owner                  = THIS_MODULE,
1566         .aperture_sizes         = intel_i830_sizes,
1567         .size_type              = FIXED_APER_SIZE,
1568         .num_aperture_sizes     = 4,
1569         .needs_scratch_page     = true,
1570         .configure              = intel_i9xx_configure,
1571         .fetch_size             = intel_i9xx_fetch_size,
1572         .cleanup                = intel_i915_cleanup,
1573         .mask_memory            = intel_i965_mask_memory,
1574         .masks                  = intel_i810_masks,
1575         .agp_enable             = intel_i810_agp_enable,
1576         .cache_flush            = global_cache_flush,
1577         .create_gatt_table      = intel_i965_create_gatt_table,
1578         .free_gatt_table        = intel_i830_free_gatt_table,
1579         .insert_memory          = intel_i915_insert_entries,
1580         .remove_memory          = intel_i915_remove_entries,
1581         .alloc_by_type          = intel_i830_alloc_by_type,
1582         .free_by_type           = intel_i810_free_by_type,
1583         .agp_alloc_page         = agp_generic_alloc_page,
1584         .agp_alloc_pages        = agp_generic_alloc_pages,
1585         .agp_destroy_page       = agp_generic_destroy_page,
1586         .agp_destroy_pages      = agp_generic_destroy_pages,
1587         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1588         .chipset_flush          = intel_i915_chipset_flush,
1589 #ifdef USE_PCI_DMA_API
1590         .agp_map_page           = intel_agp_map_page,
1591         .agp_unmap_page         = intel_agp_unmap_page,
1592         .agp_map_memory         = intel_agp_map_memory,
1593         .agp_unmap_memory       = intel_agp_unmap_memory,
1594 #endif
1595 };
1596
1597 static const struct agp_bridge_driver intel_gen6_driver = {
1598         .owner                  = THIS_MODULE,
1599         .aperture_sizes         = intel_i830_sizes,
1600         .size_type              = FIXED_APER_SIZE,
1601         .num_aperture_sizes     = 4,
1602         .needs_scratch_page     = true,
1603         .configure              = intel_i9xx_configure,
1604         .fetch_size             = intel_i9xx_fetch_size,
1605         .cleanup                = intel_i915_cleanup,
1606         .mask_memory            = intel_gen6_mask_memory,
1607         .masks                  = intel_gen6_masks,
1608         .agp_enable             = intel_i810_agp_enable,
1609         .cache_flush            = global_cache_flush,
1610         .create_gatt_table      = intel_i965_create_gatt_table,
1611         .free_gatt_table        = intel_i830_free_gatt_table,
1612         .insert_memory          = intel_i915_insert_entries,
1613         .remove_memory          = intel_i915_remove_entries,
1614         .alloc_by_type          = intel_i830_alloc_by_type,
1615         .free_by_type           = intel_i810_free_by_type,
1616         .agp_alloc_page         = agp_generic_alloc_page,
1617         .agp_alloc_pages        = agp_generic_alloc_pages,
1618         .agp_destroy_page       = agp_generic_destroy_page,
1619         .agp_destroy_pages      = agp_generic_destroy_pages,
1620         .agp_type_to_mask_type  = intel_gen6_type_to_mask_type,
1621         .chipset_flush          = intel_i915_chipset_flush,
1622 #ifdef USE_PCI_DMA_API
1623         .agp_map_page           = intel_agp_map_page,
1624         .agp_unmap_page         = intel_agp_unmap_page,
1625         .agp_map_memory         = intel_agp_map_memory,
1626         .agp_unmap_memory       = intel_agp_unmap_memory,
1627 #endif
1628 };
1629
1630 static const struct agp_bridge_driver intel_g33_driver = {
1631         .owner                  = THIS_MODULE,
1632         .aperture_sizes         = intel_i830_sizes,
1633         .size_type              = FIXED_APER_SIZE,
1634         .num_aperture_sizes     = 4,
1635         .needs_scratch_page     = true,
1636         .configure              = intel_i9xx_configure,
1637         .fetch_size             = intel_i9xx_fetch_size,
1638         .cleanup                = intel_i915_cleanup,
1639         .mask_memory            = intel_i965_mask_memory,
1640         .masks                  = intel_i810_masks,
1641         .agp_enable             = intel_i810_agp_enable,
1642         .cache_flush            = global_cache_flush,
1643         .create_gatt_table      = intel_i915_create_gatt_table,
1644         .free_gatt_table        = intel_i830_free_gatt_table,
1645         .insert_memory          = intel_i915_insert_entries,
1646         .remove_memory          = intel_i915_remove_entries,
1647         .alloc_by_type          = intel_i830_alloc_by_type,
1648         .free_by_type           = intel_i810_free_by_type,
1649         .agp_alloc_page         = agp_generic_alloc_page,
1650         .agp_alloc_pages        = agp_generic_alloc_pages,
1651         .agp_destroy_page       = agp_generic_destroy_page,
1652         .agp_destroy_pages      = agp_generic_destroy_pages,
1653         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1654         .chipset_flush          = intel_i915_chipset_flush,
1655 #ifdef USE_PCI_DMA_API
1656         .agp_map_page           = intel_agp_map_page,
1657         .agp_unmap_page         = intel_agp_unmap_page,
1658         .agp_map_memory         = intel_agp_map_memory,
1659         .agp_unmap_memory       = intel_agp_unmap_memory,
1660 #endif
1661 };
1662
1663 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1664  * driver and gmch_driver must be non-null, and find_gmch will determine
1665  * which one should be used if a gmch_chip_id is present.
1666  */
1667 static const struct intel_gtt_driver_description {
1668         unsigned int gmch_chip_id;
1669         char *name;
1670         const struct agp_bridge_driver *gmch_driver;
1671 } intel_gtt_chipsets[] = {
1672         { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver },
1673         { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver },
1674         { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver },
1675         { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver },
1676         { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", &intel_830_driver },
1677         { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", &intel_830_driver },
1678         { PCI_DEVICE_ID_INTEL_82854_IG, "854", &intel_830_driver },
1679         { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", &intel_830_driver },
1680         { PCI_DEVICE_ID_INTEL_82865_IG, "865", &intel_830_driver },
1681         { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", &intel_915_driver },
1682         { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", &intel_915_driver },
1683         { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", &intel_915_driver },
1684         { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", &intel_915_driver },
1685         { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", &intel_915_driver },
1686         { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", &intel_915_driver },
1687         { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", &intel_i965_driver },
1688         { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", &intel_i965_driver },
1689         { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", &intel_i965_driver },
1690         { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", &intel_i965_driver },
1691         { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", &intel_i965_driver },
1692         { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", &intel_i965_driver },
1693         { PCI_DEVICE_ID_INTEL_G33_IG, "G33", &intel_g33_driver },
1694         { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", &intel_g33_driver },
1695         { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", &intel_g33_driver },
1696         { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", &intel_g33_driver },
1697         { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", &intel_g33_driver },
1698         { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", &intel_i965_driver },
1699         { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", &intel_i965_driver },
1700         { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", &intel_i965_driver },
1701         { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", &intel_i965_driver },
1702         { PCI_DEVICE_ID_INTEL_B43_IG, "B43", &intel_i965_driver },
1703         { PCI_DEVICE_ID_INTEL_G41_IG, "G41", &intel_i965_driver },
1704         { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1705             "HD Graphics", &intel_i965_driver },
1706         { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1707             "HD Graphics", &intel_i965_driver },
1708         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1709             "Sandybridge", &intel_gen6_driver },
1710         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1711             "Sandybridge", &intel_gen6_driver },
1712         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1713             "Sandybridge", &intel_gen6_driver },
1714         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1715             "Sandybridge", &intel_gen6_driver },
1716         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1717             "Sandybridge", &intel_gen6_driver },
1718         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1719             "Sandybridge", &intel_gen6_driver },
1720         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1721             "Sandybridge", &intel_gen6_driver },
1722         { 0, NULL, NULL }
1723 };
1724
1725 static int find_gmch(u16 device)
1726 {
1727         struct pci_dev *gmch_device;
1728
1729         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1730         if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1731                 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1732                                              device, gmch_device);
1733         }
1734
1735         if (!gmch_device)
1736                 return 0;
1737
1738         intel_private.pcidev = gmch_device;
1739         return 1;
1740 }
1741
1742 int intel_gmch_probe(struct pci_dev *pdev,
1743                                       struct agp_bridge_data *bridge)
1744 {
1745         int i, mask;
1746         bridge->driver = NULL;
1747
1748         for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1749                 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1750                         bridge->driver =
1751                                 intel_gtt_chipsets[i].gmch_driver;
1752                         break;
1753                 }
1754         }
1755
1756         if (!bridge->driver)
1757                 return 0;
1758
1759         bridge->dev_private_data = &intel_private;
1760         bridge->dev = pdev;
1761
1762         dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1763
1764         if (bridge->driver->mask_memory == intel_gen6_mask_memory)
1765                 mask = 40;
1766         else if (bridge->driver->mask_memory == intel_i965_mask_memory)
1767                 mask = 36;
1768         else
1769                 mask = 32;
1770
1771         if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1772                 dev_err(&intel_private.pcidev->dev,
1773                         "set gfx device dma mask %d-bit failed!\n", mask);
1774         else
1775                 pci_set_consistent_dma_mask(intel_private.pcidev,
1776                                             DMA_BIT_MASK(mask));
1777
1778         return 1;
1779 }
1780 EXPORT_SYMBOL(intel_gmch_probe);
1781
1782 void intel_gmch_remove(struct pci_dev *pdev)
1783 {
1784         if (intel_private.pcidev)
1785                 pci_dev_put(intel_private.pcidev);
1786 }
1787 EXPORT_SYMBOL(intel_gmch_remove);
1788
1789 MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1790 MODULE_LICENSE("GPL and additional rights");