1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
41 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43 ttm_bo_mem_put(bo, &bo->mem);
46 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
50 struct ttm_tt *ttm = bo->ttm;
51 struct ttm_mem_reg *old_mem = &bo->mem;
54 if (old_mem->mem_type != TTM_PL_SYSTEM) {
56 ttm_bo_free_old_node(bo);
57 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
59 old_mem->mem_type = TTM_PL_SYSTEM;
62 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
63 if (unlikely(ret != 0))
66 if (new_mem->mem_type != TTM_PL_SYSTEM) {
67 ret = ttm_tt_bind(ttm, new_mem);
68 if (unlikely(ret != 0))
73 new_mem->mm_node = NULL;
77 EXPORT_SYMBOL(ttm_bo_move_ttm);
79 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
81 if (likely(man->io_reserve_fastpath))
85 return mutex_lock_interruptible(&man->io_reserve_mutex);
87 mutex_lock(&man->io_reserve_mutex);
90 EXPORT_SYMBOL(ttm_mem_io_lock);
92 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
94 if (likely(man->io_reserve_fastpath))
97 mutex_unlock(&man->io_reserve_mutex);
99 EXPORT_SYMBOL(ttm_mem_io_unlock);
101 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
103 struct ttm_buffer_object *bo;
105 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
108 bo = list_first_entry(&man->io_reserve_lru,
109 struct ttm_buffer_object,
111 list_del_init(&bo->io_reserve_lru);
112 ttm_bo_unmap_virtual_locked(bo);
118 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
119 struct ttm_mem_reg *mem)
121 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
124 if (!bdev->driver->io_mem_reserve)
126 if (likely(man->io_reserve_fastpath))
127 return bdev->driver->io_mem_reserve(bdev, mem);
129 if (bdev->driver->io_mem_reserve &&
130 mem->bus.io_reserved_count++ == 0) {
132 ret = bdev->driver->io_mem_reserve(bdev, mem);
133 if (ret == -EAGAIN) {
134 ret = ttm_mem_io_evict(man);
141 EXPORT_SYMBOL(ttm_mem_io_reserve);
143 void ttm_mem_io_free(struct ttm_bo_device *bdev,
144 struct ttm_mem_reg *mem)
146 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
148 if (likely(man->io_reserve_fastpath))
151 if (bdev->driver->io_mem_reserve &&
152 --mem->bus.io_reserved_count == 0 &&
153 bdev->driver->io_mem_free)
154 bdev->driver->io_mem_free(bdev, mem);
157 EXPORT_SYMBOL(ttm_mem_io_free);
159 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
161 struct ttm_mem_reg *mem = &bo->mem;
164 if (!mem->bus.io_reserved_vm) {
165 struct ttm_mem_type_manager *man =
166 &bo->bdev->man[mem->mem_type];
168 ret = ttm_mem_io_reserve(bo->bdev, mem);
169 if (unlikely(ret != 0))
171 mem->bus.io_reserved_vm = true;
172 if (man->use_io_reserve_lru)
173 list_add_tail(&bo->io_reserve_lru,
174 &man->io_reserve_lru);
179 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
181 struct ttm_mem_reg *mem = &bo->mem;
183 if (mem->bus.io_reserved_vm) {
184 mem->bus.io_reserved_vm = false;
185 list_del_init(&bo->io_reserve_lru);
186 ttm_mem_io_free(bo->bdev, mem);
190 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
193 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
198 (void) ttm_mem_io_lock(man, false);
199 ret = ttm_mem_io_reserve(bdev, mem);
200 ttm_mem_io_unlock(man);
201 if (ret || !mem->bus.is_iomem)
205 addr = mem->bus.addr;
207 if (mem->placement & TTM_PL_FLAG_WC)
208 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
210 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
212 (void) ttm_mem_io_lock(man, false);
213 ttm_mem_io_free(bdev, mem);
214 ttm_mem_io_unlock(man);
222 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
225 struct ttm_mem_type_manager *man;
227 man = &bdev->man[mem->mem_type];
229 if (virtual && mem->bus.addr == NULL)
231 (void) ttm_mem_io_lock(man, false);
232 ttm_mem_io_free(bdev, mem);
233 ttm_mem_io_unlock(man);
236 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
239 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
241 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
244 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
245 iowrite32(ioread32(srcP++), dstP++);
249 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
253 struct page *d = ttm->pages[page];
259 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
262 dst = kmap_atomic_prot(d, prot);
264 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
265 dst = vmap(&d, 1, 0, prot);
272 memcpy_fromio(dst, src, PAGE_SIZE);
277 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
286 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
290 struct page *s = ttm->pages[page];
296 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
298 src = kmap_atomic_prot(s, prot);
300 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
301 src = vmap(&s, 1, 0, prot);
308 memcpy_toio(dst, src, PAGE_SIZE);
313 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
322 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
323 bool evict, bool no_wait_gpu,
324 struct ttm_mem_reg *new_mem)
326 struct ttm_bo_device *bdev = bo->bdev;
327 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
328 struct ttm_tt *ttm = bo->ttm;
329 struct ttm_mem_reg *old_mem = &bo->mem;
330 struct ttm_mem_reg old_copy = *old_mem;
336 unsigned long add = 0;
339 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
342 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
346 if (old_iomap == NULL && new_iomap == NULL)
348 if (old_iomap == NULL && ttm == NULL)
351 if (ttm->state == tt_unpopulated) {
352 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
354 /* if we fail here don't nuke the mm node
355 * as the bo still owns it */
356 old_copy.mm_node = NULL;
364 if ((old_mem->mem_type == new_mem->mem_type) &&
365 (new_mem->start < old_mem->start + old_mem->size)) {
367 add = new_mem->num_pages - 1;
370 for (i = 0; i < new_mem->num_pages; ++i) {
371 page = i * dir + add;
372 if (old_iomap == NULL) {
373 pgprot_t prot = ttm_io_prot(old_mem->placement,
375 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
377 } else if (new_iomap == NULL) {
378 pgprot_t prot = ttm_io_prot(new_mem->placement,
380 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
383 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
385 /* failing here, means keep old copy as-is */
386 old_copy.mm_node = NULL;
394 new_mem->mm_node = NULL;
396 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
403 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
405 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
406 ttm_bo_mem_put(bo, &old_copy);
409 EXPORT_SYMBOL(ttm_bo_move_memcpy);
411 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
417 * ttm_buffer_object_transfer
419 * @bo: A pointer to a struct ttm_buffer_object.
420 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
421 * holding the data of @bo with the old placement.
423 * This is a utility function that may be called after an accelerated move
424 * has been scheduled. A new buffer object is created as a placeholder for
425 * the old data while it's being copied. When that buffer object is idle,
426 * it can be destroyed, releasing the space of the old placement.
431 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
432 struct ttm_buffer_object **new_obj)
434 struct ttm_buffer_object *fbo;
435 struct ttm_bo_device *bdev = bo->bdev;
436 struct ttm_bo_driver *driver = bdev->driver;
439 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
446 * Fix up members that we shouldn't copy directly:
447 * TODO: Explicit member copy would probably be better here.
450 INIT_LIST_HEAD(&fbo->ddestroy);
451 INIT_LIST_HEAD(&fbo->lru);
452 INIT_LIST_HEAD(&fbo->swap);
453 INIT_LIST_HEAD(&fbo->io_reserve_lru);
454 drm_vma_node_reset(&fbo->vma_node);
455 atomic_set(&fbo->cpu_writers, 0);
457 spin_lock(&bdev->fence_lock);
459 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
461 fbo->sync_obj = NULL;
462 spin_unlock(&bdev->fence_lock);
463 kref_init(&fbo->list_kref);
464 kref_init(&fbo->kref);
465 fbo->destroy = &ttm_transfered_destroy;
467 fbo->resv = &fbo->ttm_resv;
468 reservation_object_init(fbo->resv);
469 ret = ww_mutex_trylock(&fbo->resv->lock);
476 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
478 #if defined(__i386__) || defined(__x86_64__)
479 if (caching_flags & TTM_PL_FLAG_WC)
480 tmp = pgprot_writecombine(tmp);
481 else if (boot_cpu_data.x86 > 3)
482 tmp = pgprot_noncached(tmp);
484 #elif defined(__powerpc__)
485 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
486 pgprot_val(tmp) |= _PAGE_NO_CACHE;
487 if (caching_flags & TTM_PL_FLAG_UNCACHED)
488 pgprot_val(tmp) |= _PAGE_GUARDED;
491 #if defined(__ia64__)
492 if (caching_flags & TTM_PL_FLAG_WC)
493 tmp = pgprot_writecombine(tmp);
495 tmp = pgprot_noncached(tmp);
497 #if defined(__sparc__) || defined(__mips__)
498 if (!(caching_flags & TTM_PL_FLAG_CACHED))
499 tmp = pgprot_noncached(tmp);
503 EXPORT_SYMBOL(ttm_io_prot);
505 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
506 unsigned long offset,
508 struct ttm_bo_kmap_obj *map)
510 struct ttm_mem_reg *mem = &bo->mem;
512 if (bo->mem.bus.addr) {
513 map->bo_kmap_type = ttm_bo_map_premapped;
514 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
516 map->bo_kmap_type = ttm_bo_map_iomap;
517 if (mem->placement & TTM_PL_FLAG_WC)
518 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
521 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
524 return (!map->virtual) ? -ENOMEM : 0;
527 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
528 unsigned long start_page,
529 unsigned long num_pages,
530 struct ttm_bo_kmap_obj *map)
532 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
533 struct ttm_tt *ttm = bo->ttm;
538 if (ttm->state == tt_unpopulated) {
539 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
544 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
546 * We're mapping a single page, and the desired
547 * page protection is consistent with the bo.
550 map->bo_kmap_type = ttm_bo_map_kmap;
551 map->page = ttm->pages[start_page];
552 map->virtual = kmap(map->page);
555 * We need to use vmap to get the desired page protection
556 * or to make the buffer object look contiguous.
558 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
560 ttm_io_prot(mem->placement, PAGE_KERNEL);
561 map->bo_kmap_type = ttm_bo_map_vmap;
562 map->virtual = vmap(ttm->pages + start_page, num_pages,
565 return (!map->virtual) ? -ENOMEM : 0;
568 int ttm_bo_kmap(struct ttm_buffer_object *bo,
569 unsigned long start_page, unsigned long num_pages,
570 struct ttm_bo_kmap_obj *map)
572 struct ttm_mem_type_manager *man =
573 &bo->bdev->man[bo->mem.mem_type];
574 unsigned long offset, size;
577 BUG_ON(!list_empty(&bo->swap));
580 if (num_pages > bo->num_pages)
582 if (start_page > bo->num_pages)
585 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
588 (void) ttm_mem_io_lock(man, false);
589 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
590 ttm_mem_io_unlock(man);
593 if (!bo->mem.bus.is_iomem) {
594 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
596 offset = start_page << PAGE_SHIFT;
597 size = num_pages << PAGE_SHIFT;
598 return ttm_bo_ioremap(bo, offset, size, map);
601 EXPORT_SYMBOL(ttm_bo_kmap);
603 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
605 struct ttm_buffer_object *bo = map->bo;
606 struct ttm_mem_type_manager *man =
607 &bo->bdev->man[bo->mem.mem_type];
611 switch (map->bo_kmap_type) {
612 case ttm_bo_map_iomap:
613 iounmap(map->virtual);
615 case ttm_bo_map_vmap:
616 vunmap(map->virtual);
618 case ttm_bo_map_kmap:
621 case ttm_bo_map_premapped:
626 (void) ttm_mem_io_lock(man, false);
627 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
628 ttm_mem_io_unlock(man);
632 EXPORT_SYMBOL(ttm_bo_kunmap);
634 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
638 struct ttm_mem_reg *new_mem)
640 struct ttm_bo_device *bdev = bo->bdev;
641 struct ttm_bo_driver *driver = bdev->driver;
642 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
643 struct ttm_mem_reg *old_mem = &bo->mem;
645 struct ttm_buffer_object *ghost_obj;
646 void *tmp_obj = NULL;
648 spin_lock(&bdev->fence_lock);
650 tmp_obj = bo->sync_obj;
653 bo->sync_obj = driver->sync_obj_ref(sync_obj);
655 ret = ttm_bo_wait(bo, false, false, false);
656 spin_unlock(&bdev->fence_lock);
658 driver->sync_obj_unref(&tmp_obj);
662 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
664 ttm_tt_unbind(bo->ttm);
665 ttm_tt_destroy(bo->ttm);
668 ttm_bo_free_old_node(bo);
671 * This should help pipeline ordinary buffer moves.
673 * Hang old buffer memory on a new buffer object,
674 * and leave it to be released when the GPU
675 * operation has completed.
678 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
679 spin_unlock(&bdev->fence_lock);
681 driver->sync_obj_unref(&tmp_obj);
683 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
688 * If we're not moving to fixed memory, the TTM object
689 * needs to stay alive. Otherwhise hang it on the ghost
690 * bo to be unbound and destroyed.
693 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
694 ghost_obj->ttm = NULL;
698 ttm_bo_unreserve(ghost_obj);
699 ttm_bo_unref(&ghost_obj);
703 new_mem->mm_node = NULL;
707 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);