2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_mm.h"
36 #include "nouveau_vm.h"
38 #include <linux/log2.h>
39 #include <linux/slab.h>
42 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
45 struct drm_device *dev = dev_priv->dev;
46 struct nouveau_bo *nvbo = nouveau_bo(bo);
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
60 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
61 int *align, int *size, int *page_shift)
63 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
65 if (dev_priv->card_type < NV_50) {
66 if (nvbo->tile_mode) {
67 if (dev_priv->chipset >= 0x40) {
69 *size = roundup(*size, 64 * nvbo->tile_mode);
71 } else if (dev_priv->chipset >= 0x30) {
73 *size = roundup(*size, 64 * nvbo->tile_mode);
75 } else if (dev_priv->chipset >= 0x20) {
77 *size = roundup(*size, 64 * nvbo->tile_mode);
79 } else if (dev_priv->chipset >= 0x10) {
81 *size = roundup(*size, 32 * nvbo->tile_mode);
85 if (likely(dev_priv->chan_vm)) {
86 if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
87 *page_shift = dev_priv->chan_vm->lpg_shift;
89 *page_shift = dev_priv->chan_vm->spg_shift;
94 *size = roundup(*size, (1 << *page_shift));
95 *align = max((1 << *page_shift), *align);
98 *size = roundup(*size, PAGE_SIZE);
102 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
103 int size, int align, uint32_t flags, uint32_t tile_mode,
104 uint32_t tile_flags, struct nouveau_bo **pnvbo)
106 struct drm_nouveau_private *dev_priv = dev->dev_private;
107 struct nouveau_bo *nvbo;
108 int ret = 0, page_shift = 0;
110 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
113 INIT_LIST_HEAD(&nvbo->head);
114 INIT_LIST_HEAD(&nvbo->entry);
115 nvbo->tile_mode = tile_mode;
116 nvbo->tile_flags = tile_flags;
117 nvbo->bo.bdev = &dev_priv->ttm.bdev;
119 nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
120 align >>= PAGE_SHIFT;
122 if (dev_priv->chan_vm) {
123 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124 NV_MEM_ACCESS_RW, &nvbo->vma);
131 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
132 nouveau_bo_placement_set(nvbo, flags, 0);
134 nvbo->channel = chan;
135 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
136 ttm_bo_type_device, &nvbo->placement, align, 0,
137 false, NULL, size, nouveau_bo_del_ttm);
139 /* ttm will call nouveau_bo_del_ttm if it fails.. */
142 nvbo->channel = NULL;
145 nvbo->bo.offset = nvbo->vma.offset;
151 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
155 if (type & TTM_PL_FLAG_VRAM)
156 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
157 if (type & TTM_PL_FLAG_TT)
158 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
159 if (type & TTM_PL_FLAG_SYSTEM)
160 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
164 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
166 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
167 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
169 if (dev_priv->card_type == NV_10 &&
170 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
171 nvbo->bo.mem.num_pages < vram_pages / 2) {
173 * Make sure that the color and depth buffers are handled
174 * by independent memory controller units. Up to a 9x
175 * speed up when alpha-blending and depth-test are enabled
178 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
179 nvbo->placement.fpfn = vram_pages / 2;
180 nvbo->placement.lpfn = ~0;
182 nvbo->placement.fpfn = 0;
183 nvbo->placement.lpfn = vram_pages / 2;
189 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
191 struct ttm_placement *pl = &nvbo->placement;
192 uint32_t flags = TTM_PL_MASK_CACHING |
193 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
195 pl->placement = nvbo->placements;
196 set_placement_list(nvbo->placements, &pl->num_placement,
199 pl->busy_placement = nvbo->busy_placements;
200 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
203 set_placement_range(nvbo, type);
207 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
209 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
210 struct ttm_buffer_object *bo = &nvbo->bo;
213 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
214 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
215 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
216 1 << bo->mem.mem_type, memtype);
220 if (nvbo->pin_refcnt++)
223 ret = ttm_bo_reserve(bo, false, false, false, 0);
227 nouveau_bo_placement_set(nvbo, memtype, 0);
229 ret = nouveau_bo_validate(nvbo, false, false, false);
231 switch (bo->mem.mem_type) {
233 dev_priv->fb_aper_free -= bo->mem.size;
236 dev_priv->gart_info.aper_free -= bo->mem.size;
242 ttm_bo_unreserve(bo);
250 nouveau_bo_unpin(struct nouveau_bo *nvbo)
252 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
253 struct ttm_buffer_object *bo = &nvbo->bo;
256 if (--nvbo->pin_refcnt)
259 ret = ttm_bo_reserve(bo, false, false, false, 0);
263 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
265 ret = nouveau_bo_validate(nvbo, false, false, false);
267 switch (bo->mem.mem_type) {
269 dev_priv->fb_aper_free += bo->mem.size;
272 dev_priv->gart_info.aper_free += bo->mem.size;
279 ttm_bo_unreserve(bo);
284 nouveau_bo_map(struct nouveau_bo *nvbo)
288 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
292 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
293 ttm_bo_unreserve(&nvbo->bo);
298 nouveau_bo_unmap(struct nouveau_bo *nvbo)
301 ttm_bo_kunmap(&nvbo->kmap);
305 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
306 bool no_wait_reserve, bool no_wait_gpu)
310 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
311 no_wait_reserve, no_wait_gpu);
319 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
322 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
325 return ioread16_native((void __force __iomem *)mem);
331 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
334 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
337 iowrite16_native(val, (void __force __iomem *)mem);
343 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
346 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
349 return ioread32_native((void __force __iomem *)mem);
355 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
358 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
361 iowrite32_native(val, (void __force __iomem *)mem);
366 static struct ttm_backend *
367 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
369 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
370 struct drm_device *dev = dev_priv->dev;
372 switch (dev_priv->gart_info.type) {
374 case NOUVEAU_GART_AGP:
375 return ttm_agp_backend_init(bdev, dev->agp->bridge);
377 case NOUVEAU_GART_PDMA:
378 case NOUVEAU_GART_HW:
379 return nouveau_sgdma_init_ttm(dev);
381 NV_ERROR(dev, "Unknown GART type %d\n",
382 dev_priv->gart_info.type);
390 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
392 /* We'll do this from user space. */
397 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
398 struct ttm_mem_type_manager *man)
400 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
401 struct drm_device *dev = dev_priv->dev;
405 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
406 man->available_caching = TTM_PL_MASK_CACHING;
407 man->default_caching = TTM_PL_FLAG_CACHED;
410 if (dev_priv->card_type >= NV_50) {
411 man->func = &nouveau_vram_manager;
412 man->io_reserve_fastpath = false;
413 man->use_io_reserve_lru = true;
415 man->func = &ttm_bo_manager_func;
417 man->flags = TTM_MEMTYPE_FLAG_FIXED |
418 TTM_MEMTYPE_FLAG_MAPPABLE;
419 man->available_caching = TTM_PL_FLAG_UNCACHED |
421 man->default_caching = TTM_PL_FLAG_WC;
424 if (dev_priv->card_type >= NV_50)
425 man->func = &nouveau_gart_manager;
427 man->func = &ttm_bo_manager_func;
428 switch (dev_priv->gart_info.type) {
429 case NOUVEAU_GART_AGP:
430 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
431 man->available_caching = TTM_PL_FLAG_UNCACHED |
433 man->default_caching = TTM_PL_FLAG_WC;
435 case NOUVEAU_GART_PDMA:
436 case NOUVEAU_GART_HW:
437 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
438 TTM_MEMTYPE_FLAG_CMA;
439 man->available_caching = TTM_PL_MASK_CACHING;
440 man->default_caching = TTM_PL_FLAG_CACHED;
441 man->gpu_offset = dev_priv->gart_info.aper_base;
444 NV_ERROR(dev, "Unknown GART type: %d\n",
445 dev_priv->gart_info.type);
450 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
457 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
459 struct nouveau_bo *nvbo = nouveau_bo(bo);
461 switch (bo->mem.mem_type) {
463 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
467 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
471 *pl = nvbo->placement;
475 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
476 * TTM_PL_{VRAM,TT} directly.
480 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
481 struct nouveau_bo *nvbo, bool evict,
482 bool no_wait_reserve, bool no_wait_gpu,
483 struct ttm_mem_reg *new_mem)
485 struct nouveau_fence *fence = NULL;
488 ret = nouveau_fence_new(chan, &fence, true);
492 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
493 no_wait_reserve, no_wait_gpu, new_mem);
494 nouveau_fence_unref(&fence);
499 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
500 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
502 struct nouveau_mem *old_node = old_mem->mm_node;
503 struct nouveau_mem *new_node = new_mem->mm_node;
504 struct nouveau_bo *nvbo = nouveau_bo(bo);
505 u32 page_count = new_mem->num_pages;
506 u64 src_offset, dst_offset;
509 src_offset = old_node->tmp_vma.offset;
510 if (new_node->tmp_vma.node)
511 dst_offset = new_node->tmp_vma.offset;
513 dst_offset = nvbo->vma.offset;
515 page_count = new_mem->num_pages;
517 int line_count = (page_count > 2047) ? 2047 : page_count;
519 ret = RING_SPACE(chan, 12);
523 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
524 OUT_RING (chan, upper_32_bits(dst_offset));
525 OUT_RING (chan, lower_32_bits(dst_offset));
526 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
527 OUT_RING (chan, upper_32_bits(src_offset));
528 OUT_RING (chan, lower_32_bits(src_offset));
529 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
530 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
531 OUT_RING (chan, PAGE_SIZE); /* line_length */
532 OUT_RING (chan, line_count);
533 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
534 OUT_RING (chan, 0x00100110);
536 page_count -= line_count;
537 src_offset += (PAGE_SIZE * line_count);
538 dst_offset += (PAGE_SIZE * line_count);
545 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
546 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
548 struct nouveau_mem *old_node = old_mem->mm_node;
549 struct nouveau_mem *new_node = new_mem->mm_node;
550 struct nouveau_bo *nvbo = nouveau_bo(bo);
551 u64 length = (new_mem->num_pages << PAGE_SHIFT);
552 u64 src_offset, dst_offset;
555 src_offset = old_node->tmp_vma.offset;
556 if (new_node->tmp_vma.node)
557 dst_offset = new_node->tmp_vma.offset;
559 dst_offset = nvbo->vma.offset;
562 u32 amount, stride, height;
564 amount = min(length, (u64)(4 * 1024 * 1024));
566 height = amount / stride;
568 if (new_mem->mem_type == TTM_PL_VRAM &&
569 nouveau_bo_tile_layout(nvbo)) {
570 ret = RING_SPACE(chan, 8);
574 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
577 OUT_RING (chan, stride);
578 OUT_RING (chan, height);
583 ret = RING_SPACE(chan, 2);
587 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
590 if (old_mem->mem_type == TTM_PL_VRAM &&
591 nouveau_bo_tile_layout(nvbo)) {
592 ret = RING_SPACE(chan, 8);
596 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
599 OUT_RING (chan, stride);
600 OUT_RING (chan, height);
605 ret = RING_SPACE(chan, 2);
609 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
613 ret = RING_SPACE(chan, 14);
617 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
618 OUT_RING (chan, upper_32_bits(src_offset));
619 OUT_RING (chan, upper_32_bits(dst_offset));
620 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
621 OUT_RING (chan, lower_32_bits(src_offset));
622 OUT_RING (chan, lower_32_bits(dst_offset));
623 OUT_RING (chan, stride);
624 OUT_RING (chan, stride);
625 OUT_RING (chan, stride);
626 OUT_RING (chan, height);
627 OUT_RING (chan, 0x00000101);
628 OUT_RING (chan, 0x00000000);
629 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
633 src_offset += amount;
634 dst_offset += amount;
640 static inline uint32_t
641 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
642 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
644 if (mem->mem_type == TTM_PL_TT)
645 return chan->gart_handle;
646 return chan->vram_handle;
650 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
651 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
653 u32 src_offset = old_mem->start << PAGE_SHIFT;
654 u32 dst_offset = new_mem->start << PAGE_SHIFT;
655 u32 page_count = new_mem->num_pages;
658 ret = RING_SPACE(chan, 3);
662 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
663 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
664 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
666 page_count = new_mem->num_pages;
668 int line_count = (page_count > 2047) ? 2047 : page_count;
670 ret = RING_SPACE(chan, 11);
674 BEGIN_RING(chan, NvSubM2MF,
675 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
676 OUT_RING (chan, src_offset);
677 OUT_RING (chan, dst_offset);
678 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
679 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
680 OUT_RING (chan, PAGE_SIZE); /* line_length */
681 OUT_RING (chan, line_count);
682 OUT_RING (chan, 0x00000101);
683 OUT_RING (chan, 0x00000000);
684 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
687 page_count -= line_count;
688 src_offset += (PAGE_SIZE * line_count);
689 dst_offset += (PAGE_SIZE * line_count);
696 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
697 bool no_wait_reserve, bool no_wait_gpu,
698 struct ttm_mem_reg *new_mem)
700 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
701 struct nouveau_bo *nvbo = nouveau_bo(bo);
702 struct ttm_mem_reg *old_mem = &bo->mem;
703 struct nouveau_channel *chan;
706 chan = nvbo->channel;
708 chan = dev_priv->channel;
709 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
712 /* create temporary vma for old memory, this will get cleaned
713 * up after ttm destroys the ttm_mem_reg
715 if (dev_priv->card_type >= NV_50) {
716 struct nouveau_mem *node = old_mem->mm_node;
717 if (!node->tmp_vma.node) {
718 u32 page_shift = nvbo->vma.node->type;
719 if (old_mem->mem_type == TTM_PL_TT)
720 page_shift = nvbo->vma.vm->spg_shift;
722 ret = nouveau_vm_get(chan->vm,
723 old_mem->num_pages << PAGE_SHIFT,
724 page_shift, NV_MEM_ACCESS_RO,
730 if (old_mem->mem_type == TTM_PL_VRAM)
731 nouveau_vm_map(&node->tmp_vma, node);
733 nouveau_vm_map_sg(&node->tmp_vma, 0,
734 old_mem->num_pages << PAGE_SHIFT,
739 if (dev_priv->card_type < NV_50)
740 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
742 if (dev_priv->card_type < NV_C0)
743 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
745 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
747 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
749 no_wait_gpu, new_mem);
753 if (chan == dev_priv->channel)
754 mutex_unlock(&chan->mutex);
759 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
760 bool no_wait_reserve, bool no_wait_gpu,
761 struct ttm_mem_reg *new_mem)
763 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
764 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
765 struct ttm_placement placement;
766 struct ttm_mem_reg tmp_mem;
769 placement.fpfn = placement.lpfn = 0;
770 placement.num_placement = placement.num_busy_placement = 1;
771 placement.placement = placement.busy_placement = &placement_memtype;
774 tmp_mem.mm_node = NULL;
775 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
779 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
783 if (dev_priv->card_type >= NV_50) {
784 struct nouveau_bo *nvbo = nouveau_bo(bo);
785 struct nouveau_mem *node = tmp_mem.mm_node;
786 struct nouveau_vma *vma = &nvbo->vma;
787 if (vma->node->type != vma->vm->spg_shift)
788 vma = &node->tmp_vma;
789 nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
793 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
795 if (dev_priv->card_type >= NV_50) {
796 struct nouveau_bo *nvbo = nouveau_bo(bo);
797 nouveau_vm_unmap(&nvbo->vma);
803 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
805 ttm_bo_mem_put(bo, &tmp_mem);
810 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
811 bool no_wait_reserve, bool no_wait_gpu,
812 struct ttm_mem_reg *new_mem)
814 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
815 struct ttm_placement placement;
816 struct ttm_mem_reg tmp_mem;
819 placement.fpfn = placement.lpfn = 0;
820 placement.num_placement = placement.num_busy_placement = 1;
821 placement.placement = placement.busy_placement = &placement_memtype;
824 tmp_mem.mm_node = NULL;
825 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
829 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
833 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
838 ttm_bo_mem_put(bo, &tmp_mem);
843 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
845 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
846 struct nouveau_mem *node = new_mem->mm_node;
847 struct nouveau_bo *nvbo = nouveau_bo(bo);
848 struct nouveau_vma *vma = &nvbo->vma;
849 struct nouveau_vm *vm = vma->vm;
851 if (dev_priv->card_type < NV_50)
854 switch (new_mem->mem_type) {
856 nouveau_vm_map(vma, node);
859 if (vma->node->type != vm->spg_shift) {
860 nouveau_vm_unmap(vma);
861 vma = &node->tmp_vma;
863 nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
867 nouveau_vm_unmap(&nvbo->vma);
873 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
874 struct nouveau_tile_reg **new_tile)
876 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
877 struct drm_device *dev = dev_priv->dev;
878 struct nouveau_bo *nvbo = nouveau_bo(bo);
879 u64 offset = new_mem->start << PAGE_SHIFT;
882 if (new_mem->mem_type != TTM_PL_VRAM)
885 if (dev_priv->card_type >= NV_10) {
886 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
895 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
896 struct nouveau_tile_reg *new_tile,
897 struct nouveau_tile_reg **old_tile)
899 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
900 struct drm_device *dev = dev_priv->dev;
902 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
903 *old_tile = new_tile;
907 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
908 bool no_wait_reserve, bool no_wait_gpu,
909 struct ttm_mem_reg *new_mem)
911 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
912 struct nouveau_bo *nvbo = nouveau_bo(bo);
913 struct ttm_mem_reg *old_mem = &bo->mem;
914 struct nouveau_tile_reg *new_tile = NULL;
917 if (dev_priv->card_type < NV_50) {
918 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
924 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
925 BUG_ON(bo->mem.mm_node != NULL);
927 new_mem->mm_node = NULL;
931 /* Software copy if the card isn't up and running yet. */
932 if (!dev_priv->channel) {
933 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
937 /* Hardware assisted copy. */
938 if (new_mem->mem_type == TTM_PL_SYSTEM)
939 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
940 else if (old_mem->mem_type == TTM_PL_SYSTEM)
941 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
943 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
948 /* Fallback to software copy. */
949 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
952 if (dev_priv->card_type < NV_50) {
954 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
956 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
963 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
969 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
971 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
972 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
973 struct drm_device *dev = dev_priv->dev;
976 mem->bus.addr = NULL;
978 mem->bus.size = mem->num_pages << PAGE_SHIFT;
980 mem->bus.is_iomem = false;
981 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
983 switch (mem->mem_type) {
989 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
990 mem->bus.offset = mem->start << PAGE_SHIFT;
991 mem->bus.base = dev_priv->gart_info.aper_base;
992 mem->bus.is_iomem = true;
998 struct nouveau_mem *node = mem->mm_node;
1001 if (!dev_priv->bar1_vm) {
1002 mem->bus.offset = mem->start << PAGE_SHIFT;
1003 mem->bus.base = pci_resource_start(dev->pdev, 1);
1004 mem->bus.is_iomem = true;
1008 if (dev_priv->card_type == NV_C0)
1009 page_shift = node->page_shift;
1013 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
1014 page_shift, NV_MEM_ACCESS_RW,
1019 nouveau_vm_map(&node->bar_vma, node);
1021 nouveau_vm_put(&node->bar_vma);
1025 mem->bus.offset = node->bar_vma.offset;
1026 if (dev_priv->card_type == NV_50) /*XXX*/
1027 mem->bus.offset -= 0x0020000000ULL;
1028 mem->bus.base = pci_resource_start(dev->pdev, 1);
1029 mem->bus.is_iomem = true;
1039 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1041 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1042 struct nouveau_mem *node = mem->mm_node;
1044 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1047 if (!node->bar_vma.node)
1050 nouveau_vm_unmap(&node->bar_vma);
1051 nouveau_vm_put(&node->bar_vma);
1055 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1057 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1058 struct nouveau_bo *nvbo = nouveau_bo(bo);
1060 /* as long as the bo isn't in vram, and isn't tiled, we've got
1061 * nothing to do here.
1063 if (bo->mem.mem_type != TTM_PL_VRAM) {
1064 if (dev_priv->card_type < NV_50 ||
1065 !nouveau_bo_tile_layout(nvbo))
1069 /* make sure bo is in mappable vram */
1070 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1074 nvbo->placement.fpfn = 0;
1075 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1076 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
1077 return nouveau_bo_validate(nvbo, false, true, false);
1081 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1083 struct nouveau_fence *old_fence;
1086 nouveau_fence_ref(fence);
1088 spin_lock(&nvbo->bo.bdev->fence_lock);
1089 old_fence = nvbo->bo.sync_obj;
1090 nvbo->bo.sync_obj = fence;
1091 spin_unlock(&nvbo->bo.bdev->fence_lock);
1093 nouveau_fence_unref(&old_fence);
1096 struct ttm_bo_driver nouveau_bo_driver = {
1097 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1098 .invalidate_caches = nouveau_bo_invalidate_caches,
1099 .init_mem_type = nouveau_bo_init_mem_type,
1100 .evict_flags = nouveau_bo_evict_flags,
1101 .move_notify = nouveau_bo_move_ntfy,
1102 .move = nouveau_bo_move,
1103 .verify_access = nouveau_bo_verify_access,
1104 .sync_obj_signaled = __nouveau_fence_signalled,
1105 .sync_obj_wait = __nouveau_fence_wait,
1106 .sync_obj_flush = __nouveau_fence_flush,
1107 .sync_obj_unref = __nouveau_fence_unref,
1108 .sync_obj_ref = __nouveau_fence_ref,
1109 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1110 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1111 .io_mem_free = &nouveau_ttm_io_mem_free,