2 * Copyright (c) 2007, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19 * Alan Cox <alan@linux.intel.com>
27 * GTT resource allocator - manage page mappings in GTT space
31 * psb_gtt_mask_pte - generate GART pte entry
32 * @pfn: page number to encode
33 * @type: type of memory in the GART
35 * Set the GART entry for the appropriate memory type.
37 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
39 uint32_t mask = PSB_PTE_VALID;
41 if (type & PSB_MMU_CACHED_MEMORY)
42 mask |= PSB_PTE_CACHED;
43 if (type & PSB_MMU_RO_MEMORY)
45 if (type & PSB_MMU_WO_MEMORY)
48 return (pfn << PAGE_SHIFT) | mask;
52 * psb_gtt_entry - find the GART entries for a gtt_range
53 * @dev: our DRM device
56 * Given a gtt_range object return the GART offset of the page table
57 * entries for this gtt_range
59 u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
61 struct drm_psb_private *dev_priv = dev->dev_private;
64 offset = r->resource.start - dev_priv->gtt_mem->start;
66 return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
70 * psb_gtt_insert - put an object into the GART
71 * @dev: our DRM device
74 * Take our preallocated GTT range and insert the GEM object into
79 static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
81 struct drm_psb_private *dev_priv = dev->dev_private;
83 int numpages = (r->resource.end + 1 - r->resource.start) >> PAGE_SHIFT;
87 if (r->pages == NULL) {
92 WARN_ON(r->stolen); /* refcount these maybe ? */
94 gtt_slot = psb_gtt_entry(dev, r);
97 /* Make sure we have no alias present */
100 /* Write our page entries into the GART itself */
101 for (i = 0; i < numpages; i++) {
102 pte = psb_gtt_mask_pte(page_to_pfn(*pages++), 0/*type*/);
103 iowrite32(pte, gtt_slot++);
105 /* Make sure all the entries are set before we return */
106 ioread32(gtt_slot - 1);
112 * psb_gtt_remove - remove an object from the GART
113 * @dev: our DRM device
116 * Remove a preallocated GTT range from the GART. Overwrite all the
117 * page table entries with the dummy page
120 static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
122 struct drm_psb_private *dev_priv = dev->dev_private;
124 int numpages = (r->resource.end + 1 - r->resource.start) >> PAGE_SHIFT;
129 gtt_slot = psb_gtt_entry(dev, r);
130 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);;
132 for (i = 0; i < numpages; i++)
133 iowrite32(pte, gtt_slot++);
134 ioread32(gtt_slot - 1);
138 * psb_gtt_attach_pages - attach and pin GEM pages
141 * Pin and build an in kernel list of the pages that back our GEM object.
142 * While we hold this the pages cannot be swapped out
144 * FIXME: Do we need to cache flush when we update the GTT
146 static int psb_gtt_attach_pages(struct gtt_range *gt)
149 struct address_space *mapping;
152 int pages = (gt->resource.end + 1 - gt->resource.start) >> PAGE_SHIFT;
156 /* This is the shared memory object that backs the GEM resource */
157 inode = gt->gem.filp->f_path.dentry->d_inode;
158 mapping = inode->i_mapping;
160 gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
161 if (gt->pages == NULL)
163 for (i = 0; i < pages; i++) {
164 /* FIXME: review flags later */
165 p = read_cache_page_gfp(mapping, i,
166 __GFP_COLD | GFP_KERNEL);
175 page_cache_release(gt->pages[i]);
182 * psb_gtt_detach_pages - attach and pin GEM pages
185 * Undo the effect of psb_gtt_attach_pages. At this point the pages
186 * must have been removed from the GART as they could now be paged out
187 * and move bus address.
189 * FIXME: Do we need to cache flush when we update the GTT
191 static void psb_gtt_detach_pages(struct gtt_range *gt)
194 int pages = (gt->resource.end + 1 - gt->resource.start) >> PAGE_SHIFT;
196 for (i = 0; i < pages; i++) {
197 /* FIXME: do we need to force dirty */
198 set_page_dirty(gt->pages[i]);
199 /* Undo the reference we took when populating the table */
200 page_cache_release(gt->pages[i]);
207 * psb_gtt_pin - pin pages into the GTT
210 * Pin a set of pages into the GTT. The pins are refcounted so that
211 * multiple pins need multiple unpins to undo.
213 * Non GEM backed objects treat this as a no-op as they are always GTT
216 int psb_gtt_pin(struct gtt_range *gt)
219 struct drm_device *dev = gt->gem.dev;
220 struct drm_psb_private *dev_priv = dev->dev_private;
222 mutex_lock(&dev_priv->gtt_mutex);
224 if (gt->in_gart == 0 && gt->stolen == 0) {
225 ret = psb_gtt_attach_pages(gt);
228 ret = psb_gtt_insert(dev, gt);
230 psb_gtt_detach_pages(gt);
236 mutex_unlock(&dev_priv->gtt_mutex);
241 * psb_gtt_unpin - Drop a GTT pin requirement
244 * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
245 * will be removed from the GTT which will also drop the page references
246 * and allow the VM to clean up or page stuff.
248 * Non GEM backed objects treat this as a no-op as they are always GTT
251 void psb_gtt_unpin(struct gtt_range *gt)
253 struct drm_device *dev = gt->gem.dev;
254 struct drm_psb_private *dev_priv = dev->dev_private;
256 mutex_lock(&dev_priv->gtt_mutex);
258 WARN_ON(!gt->in_gart);
261 if (gt->in_gart == 0 && gt->stolen == 0) {
262 psb_gtt_remove(dev, gt);
263 psb_gtt_detach_pages(gt);
265 mutex_unlock(&dev_priv->gtt_mutex);
269 * GTT resource allocator - allocate and manage GTT address space
273 * psb_gtt_alloc_range - allocate GTT address space
274 * @dev: Our DRM device
275 * @len: length (bytes) of address space required
276 * @name: resource name
277 * @backed: resource should be backed by stolen pages
279 * Ask the kernel core to find us a suitable range of addresses
280 * to use for a GTT mapping.
282 * Returns a gtt_range structure describing the object, or NULL on
283 * error. On successful return the resource is both allocated and marked
286 struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
287 const char *name, int backed)
289 struct drm_psb_private *dev_priv = dev->dev_private;
290 struct gtt_range *gt;
291 struct resource *r = dev_priv->gtt_mem;
293 unsigned long start, end;
296 /* The start of the GTT is the stolen pages */
298 end = r->start + dev_priv->pg->stolen_size - 1;
300 /* The rest we will use for GEM backed objects */
301 start = r->start + dev_priv->pg->stolen_size;
305 gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
308 gt->resource.name = name;
310 gt->in_gart = backed;
311 /* Ensure this is set for non GEM objects */
313 kref_init(>->kref);
315 ret = allocate_resource(dev_priv->gtt_mem, >->resource,
316 len, start, end, PAGE_SIZE, NULL, NULL);
318 gt->offset = gt->resource.start - r->start;
326 * psb_gtt_destroy - final free up of a gtt
327 * @kref: the kref of the gtt
329 * Called from the kernel kref put when the final reference to our
330 * GTT object is dropped. At that point we can free up the resources.
332 * For now we handle mmap clean up here to work around limits in GEM
334 static void psb_gtt_destroy(struct kref *kref)
336 struct gtt_range *gt = container_of(kref, struct gtt_range, kref);
338 /* Undo the mmap pin if we are destroying the object */
343 WARN_ON(gt->in_gart && !gt->stolen);
344 release_resource(>->resource);
349 * psb_gtt_kref_put - drop reference to a GTT object
350 * @gt: the GT being dropped
352 * Drop a reference to a psb gtt
354 void psb_gtt_kref_put(struct gtt_range *gt)
356 kref_put(>->kref, psb_gtt_destroy);
360 * psb_gtt_free_range - release GTT address space
361 * @dev: our DRM device
362 * @gt: a mapping created with psb_gtt_alloc_range
364 * Release a resource that was allocated with psb_gtt_alloc_range
366 void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
368 psb_gtt_kref_put(gt);
372 struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
374 struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
379 init_rwsem(&tmp->sem);
385 void psb_gtt_takedown(struct drm_device *dev)
387 struct drm_psb_private *dev_priv = dev->dev_private;
389 /* FIXME: iounmap dev_priv->vram_addr etc */
390 if (dev_priv->gtt_map) {
391 iounmap(dev_priv->gtt_map);
392 dev_priv->gtt_map = NULL;
394 if (dev_priv->gtt_initialized) {
395 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
396 dev_priv->gmch_ctrl);
397 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
398 (void) PSB_RVDC32(PSB_PGETBL_CTL);
404 int psb_gtt_init(struct drm_device *dev, int resume)
406 struct drm_psb_private *dev_priv = dev->dev_private;
408 unsigned long stolen_size, vram_stolen_size;
409 unsigned i, num_pages;
413 uint32_t *ttm_gtt_map;
414 uint32_t dvmt_mode = 0;
420 mutex_init(&dev_priv->gtt_mutex);
422 dev_priv->pg = pg = psb_gtt_alloc(dev);
426 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
427 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
428 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
430 dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
431 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
432 (void) PSB_RVDC32(PSB_PGETBL_CTL);
434 /* The root resource we allocate address space from */
435 dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
437 dev_priv->gtt_initialized = 1;
439 pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
441 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
442 /* fix me: video mmu has hw bug to access 0x0D0000000,
443 * then make gatt start at 0x0e000,0000 */
444 pg->mmu_gatt_start = 0xE0000000;
445 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
447 pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
448 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
451 pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
452 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
454 stolen_size = vram_stolen_size;
456 printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
457 pg->gatt_start, pg->gatt_pages/256);
458 printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
459 pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
460 printk(KERN_INFO "Stolen memory information\n");
461 printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
462 printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
463 vram_stolen_size/1024);
464 dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
465 printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
466 (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
468 if (resume && (gtt_pages != pg->gtt_pages) &&
469 (stolen_size != pg->stolen_size)) {
470 DRM_ERROR("GTT resume error.\n");
475 pg->gtt_pages = gtt_pages;
476 pg->stolen_size = stolen_size;
477 dev_priv->vram_stolen_size = vram_stolen_size;
479 ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
480 if (!dev_priv->gtt_map) {
481 DRM_ERROR("Failure to map gtt.\n");
486 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
487 if (!dev_priv->vram_addr) {
488 DRM_ERROR("Failure to map stolen base.\n");
493 DRM_DEBUG("%s: vram kernel virtual address %p\n", dev_priv->vram_addr);
495 tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
496 (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
498 ttm_gtt_map = dev_priv->gtt_map + tt_pages / 2;
501 * insert vram stolen pages.
504 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
505 vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
506 printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
507 num_pages, pfn_base, 0);
508 for (i = 0; i < num_pages; ++i) {
509 pte = psb_gtt_mask_pte(pfn_base + i, 0);
510 iowrite32(pte, dev_priv->gtt_map + i);
514 * Init rest of gtt managed by IMG.
516 pfn_base = page_to_pfn(dev_priv->scratch_page);
517 pte = psb_gtt_mask_pte(pfn_base, 0);
518 for (; i < tt_pages / 2 - 1; ++i)
519 iowrite32(pte, dev_priv->gtt_map + i);
522 * Init rest of gtt managed by TTM.
525 pfn_base = page_to_pfn(dev_priv->scratch_page);
526 pte = psb_gtt_mask_pte(pfn_base, 0);
527 PSB_DEBUG_INIT("Initializing the rest of a total "
528 "of %d gtt pages.\n", pg->gatt_pages);
530 for (; i < pg->gatt_pages - tt_pages / 2; ++i)
531 iowrite32(pte, ttm_gtt_map + i);
532 (void) ioread32(dev_priv->gtt_map + i - 1);
537 psb_gtt_takedown(dev);