2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
31 #include "ttm/ttm_page_alloc.h"
33 #include "nouveau_drm.h"
34 #include "nouveau_drv.h"
35 #include "nouveau_dma.h"
36 #include "nouveau_mm.h"
37 #include "nouveau_vm.h"
39 #include <linux/log2.h>
40 #include <linux/slab.h>
43 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
45 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
46 struct drm_device *dev = dev_priv->dev;
47 struct nouveau_bo *nvbo = nouveau_bo(bo);
49 if (unlikely(nvbo->gem))
50 DRM_ERROR("bo %p still attached to GEM object\n", bo);
52 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
57 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
58 int *align, int *size)
60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
62 if (dev_priv->card_type < NV_50) {
63 if (nvbo->tile_mode) {
64 if (dev_priv->chipset >= 0x40) {
66 *size = roundup(*size, 64 * nvbo->tile_mode);
68 } else if (dev_priv->chipset >= 0x30) {
70 *size = roundup(*size, 64 * nvbo->tile_mode);
72 } else if (dev_priv->chipset >= 0x20) {
74 *size = roundup(*size, 64 * nvbo->tile_mode);
76 } else if (dev_priv->chipset >= 0x10) {
78 *size = roundup(*size, 32 * nvbo->tile_mode);
82 *size = roundup(*size, (1 << nvbo->page_shift));
83 *align = max((1 << nvbo->page_shift), *align);
86 *size = roundup(*size, PAGE_SIZE);
90 nouveau_bo_new(struct drm_device *dev, int size, int align,
91 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
92 struct nouveau_bo **pnvbo)
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_bo *nvbo;
99 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
102 INIT_LIST_HEAD(&nvbo->head);
103 INIT_LIST_HEAD(&nvbo->entry);
104 INIT_LIST_HEAD(&nvbo->vma_list);
105 nvbo->tile_mode = tile_mode;
106 nvbo->tile_flags = tile_flags;
107 nvbo->bo.bdev = &dev_priv->ttm.bdev;
109 nvbo->page_shift = 12;
110 if (dev_priv->bar1_vm) {
111 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
112 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
115 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
116 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
117 nouveau_bo_placement_set(nvbo, flags, 0);
119 acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
120 sizeof(struct nouveau_bo));
122 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
123 ttm_bo_type_device, &nvbo->placement,
124 align >> PAGE_SHIFT, 0, false, NULL, acc_size,
127 /* ttm will call nouveau_bo_del_ttm if it fails.. */
136 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
140 if (type & TTM_PL_FLAG_VRAM)
141 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
142 if (type & TTM_PL_FLAG_TT)
143 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
144 if (type & TTM_PL_FLAG_SYSTEM)
145 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
149 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
151 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
152 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
154 if (dev_priv->card_type == NV_10 &&
155 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
156 nvbo->bo.mem.num_pages < vram_pages / 4) {
158 * Make sure that the color and depth buffers are handled
159 * by independent memory controller units. Up to a 9x
160 * speed up when alpha-blending and depth-test are enabled
163 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
164 nvbo->placement.fpfn = vram_pages / 2;
165 nvbo->placement.lpfn = ~0;
167 nvbo->placement.fpfn = 0;
168 nvbo->placement.lpfn = vram_pages / 2;
174 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
176 struct ttm_placement *pl = &nvbo->placement;
177 uint32_t flags = TTM_PL_MASK_CACHING |
178 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
180 pl->placement = nvbo->placements;
181 set_placement_list(nvbo->placements, &pl->num_placement,
184 pl->busy_placement = nvbo->busy_placements;
185 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
188 set_placement_range(nvbo, type);
192 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
194 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
195 struct ttm_buffer_object *bo = &nvbo->bo;
198 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
199 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
200 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
201 1 << bo->mem.mem_type, memtype);
205 if (nvbo->pin_refcnt++)
208 ret = ttm_bo_reserve(bo, false, false, false, 0);
212 nouveau_bo_placement_set(nvbo, memtype, 0);
214 ret = nouveau_bo_validate(nvbo, false, false, false);
216 switch (bo->mem.mem_type) {
218 dev_priv->fb_aper_free -= bo->mem.size;
221 dev_priv->gart_info.aper_free -= bo->mem.size;
227 ttm_bo_unreserve(bo);
235 nouveau_bo_unpin(struct nouveau_bo *nvbo)
237 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
238 struct ttm_buffer_object *bo = &nvbo->bo;
241 if (--nvbo->pin_refcnt)
244 ret = ttm_bo_reserve(bo, false, false, false, 0);
248 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
250 ret = nouveau_bo_validate(nvbo, false, false, false);
252 switch (bo->mem.mem_type) {
254 dev_priv->fb_aper_free += bo->mem.size;
257 dev_priv->gart_info.aper_free += bo->mem.size;
264 ttm_bo_unreserve(bo);
269 nouveau_bo_map(struct nouveau_bo *nvbo)
273 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
277 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
278 ttm_bo_unreserve(&nvbo->bo);
283 nouveau_bo_unmap(struct nouveau_bo *nvbo)
286 ttm_bo_kunmap(&nvbo->kmap);
290 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
291 bool no_wait_reserve, bool no_wait_gpu)
295 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
296 no_wait_reserve, no_wait_gpu);
304 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
307 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
310 return ioread16_native((void __force __iomem *)mem);
316 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
319 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
322 iowrite16_native(val, (void __force __iomem *)mem);
328 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
331 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
334 return ioread32_native((void __force __iomem *)mem);
340 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
343 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
346 iowrite32_native(val, (void __force __iomem *)mem);
351 static struct ttm_tt *
352 nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
353 unsigned long size, uint32_t page_flags,
354 struct page *dummy_read_page)
356 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
357 struct drm_device *dev = dev_priv->dev;
359 switch (dev_priv->gart_info.type) {
361 case NOUVEAU_GART_AGP:
362 return ttm_agp_tt_create(bdev, dev->agp->bridge,
363 size, page_flags, dummy_read_page);
365 case NOUVEAU_GART_PDMA:
366 case NOUVEAU_GART_HW:
367 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
370 NV_ERROR(dev, "Unknown GART type %d\n",
371 dev_priv->gart_info.type);
379 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
381 /* We'll do this from user space. */
386 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
387 struct ttm_mem_type_manager *man)
389 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
390 struct drm_device *dev = dev_priv->dev;
394 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
395 man->available_caching = TTM_PL_MASK_CACHING;
396 man->default_caching = TTM_PL_FLAG_CACHED;
399 if (dev_priv->card_type >= NV_50) {
400 man->func = &nouveau_vram_manager;
401 man->io_reserve_fastpath = false;
402 man->use_io_reserve_lru = true;
404 man->func = &ttm_bo_manager_func;
406 man->flags = TTM_MEMTYPE_FLAG_FIXED |
407 TTM_MEMTYPE_FLAG_MAPPABLE;
408 man->available_caching = TTM_PL_FLAG_UNCACHED |
410 man->default_caching = TTM_PL_FLAG_WC;
413 if (dev_priv->card_type >= NV_50)
414 man->func = &nouveau_gart_manager;
416 man->func = &ttm_bo_manager_func;
417 switch (dev_priv->gart_info.type) {
418 case NOUVEAU_GART_AGP:
419 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
420 man->available_caching = TTM_PL_FLAG_UNCACHED |
422 man->default_caching = TTM_PL_FLAG_WC;
424 case NOUVEAU_GART_PDMA:
425 case NOUVEAU_GART_HW:
426 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
427 TTM_MEMTYPE_FLAG_CMA;
428 man->available_caching = TTM_PL_MASK_CACHING;
429 man->default_caching = TTM_PL_FLAG_CACHED;
432 NV_ERROR(dev, "Unknown GART type: %d\n",
433 dev_priv->gart_info.type);
438 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
445 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
447 struct nouveau_bo *nvbo = nouveau_bo(bo);
449 switch (bo->mem.mem_type) {
451 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
455 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
459 *pl = nvbo->placement;
463 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
464 * TTM_PL_{VRAM,TT} directly.
468 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
469 struct nouveau_bo *nvbo, bool evict,
470 bool no_wait_reserve, bool no_wait_gpu,
471 struct ttm_mem_reg *new_mem)
473 struct nouveau_fence *fence = NULL;
476 ret = nouveau_fence_new(chan, &fence, true);
480 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
481 no_wait_reserve, no_wait_gpu, new_mem);
482 nouveau_fence_unref(&fence);
487 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
488 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
490 struct nouveau_mem *node = old_mem->mm_node;
491 u64 src_offset = node->vma[0].offset;
492 u64 dst_offset = node->vma[1].offset;
493 u32 page_count = new_mem->num_pages;
496 page_count = new_mem->num_pages;
498 int line_count = (page_count > 2047) ? 2047 : page_count;
500 ret = RING_SPACE(chan, 12);
504 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
505 OUT_RING (chan, upper_32_bits(dst_offset));
506 OUT_RING (chan, lower_32_bits(dst_offset));
507 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
508 OUT_RING (chan, upper_32_bits(src_offset));
509 OUT_RING (chan, lower_32_bits(src_offset));
510 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
511 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
512 OUT_RING (chan, PAGE_SIZE); /* line_length */
513 OUT_RING (chan, line_count);
514 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
515 OUT_RING (chan, 0x00100110);
517 page_count -= line_count;
518 src_offset += (PAGE_SIZE * line_count);
519 dst_offset += (PAGE_SIZE * line_count);
526 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
527 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
529 struct nouveau_mem *node = old_mem->mm_node;
530 struct nouveau_bo *nvbo = nouveau_bo(bo);
531 u64 length = (new_mem->num_pages << PAGE_SHIFT);
532 u64 src_offset = node->vma[0].offset;
533 u64 dst_offset = node->vma[1].offset;
537 u32 amount, stride, height;
539 amount = min(length, (u64)(4 * 1024 * 1024));
541 height = amount / stride;
543 if (new_mem->mem_type == TTM_PL_VRAM &&
544 nouveau_bo_tile_layout(nvbo)) {
545 ret = RING_SPACE(chan, 8);
549 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
552 OUT_RING (chan, stride);
553 OUT_RING (chan, height);
558 ret = RING_SPACE(chan, 2);
562 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
565 if (old_mem->mem_type == TTM_PL_VRAM &&
566 nouveau_bo_tile_layout(nvbo)) {
567 ret = RING_SPACE(chan, 8);
571 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
574 OUT_RING (chan, stride);
575 OUT_RING (chan, height);
580 ret = RING_SPACE(chan, 2);
584 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
588 ret = RING_SPACE(chan, 14);
592 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
593 OUT_RING (chan, upper_32_bits(src_offset));
594 OUT_RING (chan, upper_32_bits(dst_offset));
595 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
596 OUT_RING (chan, lower_32_bits(src_offset));
597 OUT_RING (chan, lower_32_bits(dst_offset));
598 OUT_RING (chan, stride);
599 OUT_RING (chan, stride);
600 OUT_RING (chan, stride);
601 OUT_RING (chan, height);
602 OUT_RING (chan, 0x00000101);
603 OUT_RING (chan, 0x00000000);
604 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
608 src_offset += amount;
609 dst_offset += amount;
615 static inline uint32_t
616 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
617 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
619 if (mem->mem_type == TTM_PL_TT)
620 return chan->gart_handle;
621 return chan->vram_handle;
625 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
626 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
628 u32 src_offset = old_mem->start << PAGE_SHIFT;
629 u32 dst_offset = new_mem->start << PAGE_SHIFT;
630 u32 page_count = new_mem->num_pages;
633 ret = RING_SPACE(chan, 3);
637 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
638 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
639 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
641 page_count = new_mem->num_pages;
643 int line_count = (page_count > 2047) ? 2047 : page_count;
645 ret = RING_SPACE(chan, 11);
649 BEGIN_RING(chan, NvSubM2MF,
650 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
651 OUT_RING (chan, src_offset);
652 OUT_RING (chan, dst_offset);
653 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
654 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
655 OUT_RING (chan, PAGE_SIZE); /* line_length */
656 OUT_RING (chan, line_count);
657 OUT_RING (chan, 0x00000101);
658 OUT_RING (chan, 0x00000000);
659 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
662 page_count -= line_count;
663 src_offset += (PAGE_SIZE * line_count);
664 dst_offset += (PAGE_SIZE * line_count);
671 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
672 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
674 struct nouveau_mem *node = mem->mm_node;
677 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
678 node->page_shift, NV_MEM_ACCESS_RO, vma);
682 if (mem->mem_type == TTM_PL_VRAM)
683 nouveau_vm_map(vma, node);
685 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
692 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
693 bool no_wait_reserve, bool no_wait_gpu,
694 struct ttm_mem_reg *new_mem)
696 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
697 struct nouveau_bo *nvbo = nouveau_bo(bo);
698 struct ttm_mem_reg *old_mem = &bo->mem;
699 struct nouveau_channel *chan;
702 chan = nvbo->channel;
704 chan = dev_priv->channel;
705 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
708 /* create temporary vmas for the transfer and attach them to the
709 * old nouveau_mem node, these will get cleaned up after ttm has
710 * destroyed the ttm_mem_reg
712 if (dev_priv->card_type >= NV_50) {
713 struct nouveau_mem *node = old_mem->mm_node;
715 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
719 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
724 if (dev_priv->card_type < NV_50)
725 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
727 if (dev_priv->card_type < NV_C0)
728 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
730 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
732 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
734 no_wait_gpu, new_mem);
738 if (chan == dev_priv->channel)
739 mutex_unlock(&chan->mutex);
744 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
745 bool no_wait_reserve, bool no_wait_gpu,
746 struct ttm_mem_reg *new_mem)
748 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
749 struct ttm_placement placement;
750 struct ttm_mem_reg tmp_mem;
753 placement.fpfn = placement.lpfn = 0;
754 placement.num_placement = placement.num_busy_placement = 1;
755 placement.placement = placement.busy_placement = &placement_memtype;
758 tmp_mem.mm_node = NULL;
759 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
763 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
767 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
771 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
773 ttm_bo_mem_put(bo, &tmp_mem);
778 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
779 bool no_wait_reserve, bool no_wait_gpu,
780 struct ttm_mem_reg *new_mem)
782 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
783 struct ttm_placement placement;
784 struct ttm_mem_reg tmp_mem;
787 placement.fpfn = placement.lpfn = 0;
788 placement.num_placement = placement.num_busy_placement = 1;
789 placement.placement = placement.busy_placement = &placement_memtype;
792 tmp_mem.mm_node = NULL;
793 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
797 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
801 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
806 ttm_bo_mem_put(bo, &tmp_mem);
811 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
813 struct nouveau_mem *node = new_mem->mm_node;
814 struct nouveau_bo *nvbo = nouveau_bo(bo);
815 struct nouveau_vma *vma;
817 list_for_each_entry(vma, &nvbo->vma_list, head) {
818 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
819 nouveau_vm_map(vma, new_mem->mm_node);
821 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
822 nvbo->page_shift == vma->vm->spg_shift) {
823 nouveau_vm_map_sg(vma, 0, new_mem->
824 num_pages << PAGE_SHIFT,
827 nouveau_vm_unmap(vma);
833 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
834 struct nouveau_tile_reg **new_tile)
836 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
837 struct drm_device *dev = dev_priv->dev;
838 struct nouveau_bo *nvbo = nouveau_bo(bo);
839 u64 offset = new_mem->start << PAGE_SHIFT;
842 if (new_mem->mem_type != TTM_PL_VRAM)
845 if (dev_priv->card_type >= NV_10) {
846 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
855 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
856 struct nouveau_tile_reg *new_tile,
857 struct nouveau_tile_reg **old_tile)
859 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
860 struct drm_device *dev = dev_priv->dev;
862 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
863 *old_tile = new_tile;
867 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
868 bool no_wait_reserve, bool no_wait_gpu,
869 struct ttm_mem_reg *new_mem)
871 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
872 struct nouveau_bo *nvbo = nouveau_bo(bo);
873 struct ttm_mem_reg *old_mem = &bo->mem;
874 struct nouveau_tile_reg *new_tile = NULL;
877 if (dev_priv->card_type < NV_50) {
878 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
884 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
885 BUG_ON(bo->mem.mm_node != NULL);
887 new_mem->mm_node = NULL;
891 /* Software copy if the card isn't up and running yet. */
892 if (!dev_priv->channel) {
893 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
897 /* Hardware assisted copy. */
898 if (new_mem->mem_type == TTM_PL_SYSTEM)
899 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
900 else if (old_mem->mem_type == TTM_PL_SYSTEM)
901 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
903 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
908 /* Fallback to software copy. */
909 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
912 if (dev_priv->card_type < NV_50) {
914 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
916 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
923 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
929 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
931 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
932 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
933 struct drm_device *dev = dev_priv->dev;
936 mem->bus.addr = NULL;
938 mem->bus.size = mem->num_pages << PAGE_SHIFT;
940 mem->bus.is_iomem = false;
941 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
943 switch (mem->mem_type) {
949 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
950 mem->bus.offset = mem->start << PAGE_SHIFT;
951 mem->bus.base = dev_priv->gart_info.aper_base;
952 mem->bus.is_iomem = true;
958 struct nouveau_mem *node = mem->mm_node;
961 if (!dev_priv->bar1_vm) {
962 mem->bus.offset = mem->start << PAGE_SHIFT;
963 mem->bus.base = pci_resource_start(dev->pdev, 1);
964 mem->bus.is_iomem = true;
968 if (dev_priv->card_type >= NV_C0)
969 page_shift = node->page_shift;
973 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
974 page_shift, NV_MEM_ACCESS_RW,
979 nouveau_vm_map(&node->bar_vma, node);
981 nouveau_vm_put(&node->bar_vma);
985 mem->bus.offset = node->bar_vma.offset;
986 if (dev_priv->card_type == NV_50) /*XXX*/
987 mem->bus.offset -= 0x0020000000ULL;
988 mem->bus.base = pci_resource_start(dev->pdev, 1);
989 mem->bus.is_iomem = true;
999 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1001 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1002 struct nouveau_mem *node = mem->mm_node;
1004 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1007 if (!node->bar_vma.node)
1010 nouveau_vm_unmap(&node->bar_vma);
1011 nouveau_vm_put(&node->bar_vma);
1015 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1017 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1018 struct nouveau_bo *nvbo = nouveau_bo(bo);
1020 /* as long as the bo isn't in vram, and isn't tiled, we've got
1021 * nothing to do here.
1023 if (bo->mem.mem_type != TTM_PL_VRAM) {
1024 if (dev_priv->card_type < NV_50 ||
1025 !nouveau_bo_tile_layout(nvbo))
1029 /* make sure bo is in mappable vram */
1030 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1034 nvbo->placement.fpfn = 0;
1035 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1036 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
1037 return nouveau_bo_validate(nvbo, false, true, false);
1041 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1043 struct nouveau_fence *old_fence;
1046 nouveau_fence_ref(fence);
1048 spin_lock(&nvbo->bo.bdev->fence_lock);
1049 old_fence = nvbo->bo.sync_obj;
1050 nvbo->bo.sync_obj = fence;
1051 spin_unlock(&nvbo->bo.bdev->fence_lock);
1053 nouveau_fence_unref(&old_fence);
1057 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1059 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1060 struct drm_nouveau_private *dev_priv;
1061 struct drm_device *dev;
1065 if (ttm->state != tt_unpopulated)
1068 dev_priv = nouveau_bdev(ttm->bdev);
1069 dev = dev_priv->dev;
1071 #ifdef CONFIG_SWIOTLB
1072 if (swiotlb_nr_tbl()) {
1073 return ttm_dma_populate((void *)ttm, dev->dev);
1077 r = ttm_pool_populate(ttm);
1082 for (i = 0; i < ttm->num_pages; i++) {
1083 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1085 PCI_DMA_BIDIRECTIONAL);
1086 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1088 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1089 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1090 ttm_dma->dma_address[i] = 0;
1092 ttm_pool_unpopulate(ttm);
1100 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1102 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1103 struct drm_nouveau_private *dev_priv;
1104 struct drm_device *dev;
1107 dev_priv = nouveau_bdev(ttm->bdev);
1108 dev = dev_priv->dev;
1110 #ifdef CONFIG_SWIOTLB
1111 if (swiotlb_nr_tbl()) {
1112 ttm_dma_unpopulate((void *)ttm, dev->dev);
1117 for (i = 0; i < ttm->num_pages; i++) {
1118 if (ttm_dma->dma_address[i]) {
1119 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1120 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1124 ttm_pool_unpopulate(ttm);
1127 struct ttm_bo_driver nouveau_bo_driver = {
1128 .ttm_tt_create = &nouveau_ttm_tt_create,
1129 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1130 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1131 .invalidate_caches = nouveau_bo_invalidate_caches,
1132 .init_mem_type = nouveau_bo_init_mem_type,
1133 .evict_flags = nouveau_bo_evict_flags,
1134 .move_notify = nouveau_bo_move_ntfy,
1135 .move = nouveau_bo_move,
1136 .verify_access = nouveau_bo_verify_access,
1137 .sync_obj_signaled = __nouveau_fence_signalled,
1138 .sync_obj_wait = __nouveau_fence_wait,
1139 .sync_obj_flush = __nouveau_fence_flush,
1140 .sync_obj_unref = __nouveau_fence_unref,
1141 .sync_obj_ref = __nouveau_fence_ref,
1142 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1143 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1144 .io_mem_free = &nouveau_ttm_io_mem_free,
1147 struct nouveau_vma *
1148 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1150 struct nouveau_vma *vma;
1151 list_for_each_entry(vma, &nvbo->vma_list, head) {
1160 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1161 struct nouveau_vma *vma)
1163 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1164 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1167 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1168 NV_MEM_ACCESS_RW, vma);
1172 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1173 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1175 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
1176 nouveau_vm_map_sg(vma, 0, size, node, node->pages);
1178 list_add_tail(&vma->head, &nvbo->vma_list);
1184 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1187 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1188 spin_lock(&nvbo->bo.bdev->fence_lock);
1189 ttm_bo_wait(&nvbo->bo, false, false, false);
1190 spin_unlock(&nvbo->bo.bdev->fence_lock);
1191 nouveau_vm_unmap(vma);
1194 nouveau_vm_put(vma);
1195 list_del(&vma->head);