2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
37 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
39 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
40 struct nouveau_bo *nvbo = nouveau_bo(bo);
42 ttm_bo_kunmap(&nvbo->kmap);
44 if (unlikely(nvbo->gem))
45 DRM_ERROR("bo %p still attached to GEM object\n", bo);
47 spin_lock(&dev_priv->ttm.bo_list_lock);
48 list_del(&nvbo->head);
49 spin_unlock(&dev_priv->ttm.bo_list_lock);
54 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
55 int size, int align, uint32_t flags, uint32_t tile_mode,
56 uint32_t tile_flags, bool no_vm, bool mappable,
57 struct nouveau_bo **pnvbo)
59 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 struct nouveau_bo *nvbo;
63 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
66 INIT_LIST_HEAD(&nvbo->head);
67 INIT_LIST_HEAD(&nvbo->entry);
68 nvbo->mappable = mappable;
70 nvbo->tile_mode = tile_mode;
71 nvbo->tile_flags = tile_flags;
74 * Some of the tile_flags have a periodic structure of N*4096 bytes,
75 * align to to that as well as the page size. Overallocate memory to
76 * avoid corruption of other buffer objects.
83 if (dev_priv->chipset >= 0xA0) {
84 /* This is based on high end cards with 448 bits
85 * memory bus, could be different elsewhere.*/
87 /* 8 * 28672 is the actual alignment requirement,
88 * but we must also align to page size. */
89 align = 2 * 8 * 28672;
90 } else if (dev_priv->chipset >= 0x90) {
95 /* 12 * 8192 is the actual alignment requirement,
96 * but we must also align to page size. */
97 align = 2 * 12 * 8192;
104 align >>= PAGE_SHIFT;
106 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
107 if (dev_priv->card_type == NV_50) {
108 size = (size + 65535) & ~65535;
109 if (align < (65536 / PAGE_SIZE))
110 align = (65536 / PAGE_SIZE);
113 if (flags & TTM_PL_FLAG_VRAM)
114 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
115 if (flags & TTM_PL_FLAG_TT)
116 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
117 nvbo->placement.fpfn = 0;
118 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
119 nvbo->placement.placement = nvbo->placements;
120 nvbo->placement.busy_placement = nvbo->placements;
121 nvbo->placement.num_placement = n;
122 nvbo->placement.num_busy_placement = n;
124 nvbo->channel = chan;
125 nouveau_bo_placement_set(nvbo, flags);
126 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
127 ttm_bo_type_device, &nvbo->placement, align, 0,
128 false, NULL, size, nouveau_bo_del_ttm);
129 nvbo->channel = NULL;
131 /* ttm will call nouveau_bo_del_ttm if it fails.. */
135 spin_lock(&dev_priv->ttm.bo_list_lock);
136 list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
137 spin_unlock(&dev_priv->ttm.bo_list_lock);
143 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
147 if (memtype & TTM_PL_FLAG_VRAM)
148 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
149 if (memtype & TTM_PL_FLAG_TT)
150 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
151 if (memtype & TTM_PL_FLAG_SYSTEM)
152 nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
153 nvbo->placement.placement = nvbo->placements;
154 nvbo->placement.busy_placement = nvbo->placements;
155 nvbo->placement.num_placement = n;
156 nvbo->placement.num_busy_placement = n;
160 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
162 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
163 struct ttm_buffer_object *bo = &nvbo->bo;
166 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
167 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
168 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
169 1 << bo->mem.mem_type, memtype);
173 if (nvbo->pin_refcnt++)
176 ret = ttm_bo_reserve(bo, false, false, false, 0);
180 nouveau_bo_placement_set(nvbo, memtype);
181 for (i = 0; i < nvbo->placement.num_placement; i++)
182 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
184 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
186 switch (bo->mem.mem_type) {
188 dev_priv->fb_aper_free -= bo->mem.size;
191 dev_priv->gart_info.aper_free -= bo->mem.size;
197 ttm_bo_unreserve(bo);
205 nouveau_bo_unpin(struct nouveau_bo *nvbo)
207 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
208 struct ttm_buffer_object *bo = &nvbo->bo;
211 if (--nvbo->pin_refcnt)
214 ret = ttm_bo_reserve(bo, false, false, false, 0);
218 for (i = 0; i < nvbo->placement.num_placement; i++)
219 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
221 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
223 switch (bo->mem.mem_type) {
225 dev_priv->fb_aper_free += bo->mem.size;
228 dev_priv->gart_info.aper_free += bo->mem.size;
235 ttm_bo_unreserve(bo);
240 nouveau_bo_map(struct nouveau_bo *nvbo)
244 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
248 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
249 ttm_bo_unreserve(&nvbo->bo);
254 nouveau_bo_unmap(struct nouveau_bo *nvbo)
256 ttm_bo_kunmap(&nvbo->kmap);
260 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
263 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
266 return ioread16_native((void __force __iomem *)mem);
272 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
275 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
278 iowrite16_native(val, (void __force __iomem *)mem);
284 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
287 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
290 return ioread32_native((void __force __iomem *)mem);
296 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
299 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
302 iowrite32_native(val, (void __force __iomem *)mem);
307 static struct ttm_backend *
308 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
310 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
311 struct drm_device *dev = dev_priv->dev;
313 switch (dev_priv->gart_info.type) {
314 case NOUVEAU_GART_AGP:
315 return ttm_agp_backend_init(bdev, dev->agp->bridge);
316 case NOUVEAU_GART_SGDMA:
317 return nouveau_sgdma_init_ttm(dev);
319 NV_ERROR(dev, "Unknown GART type %d\n",
320 dev_priv->gart_info.type);
328 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
330 /* We'll do this from user space. */
335 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
336 struct ttm_mem_type_manager *man)
338 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
339 struct drm_device *dev = dev_priv->dev;
343 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
344 man->available_caching = TTM_PL_MASK_CACHING;
345 man->default_caching = TTM_PL_FLAG_CACHED;
348 man->flags = TTM_MEMTYPE_FLAG_FIXED |
349 TTM_MEMTYPE_FLAG_MAPPABLE |
350 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
351 man->available_caching = TTM_PL_FLAG_UNCACHED |
353 man->default_caching = TTM_PL_FLAG_WC;
356 man->io_offset = drm_get_resource_start(dev, 1);
357 man->io_size = drm_get_resource_len(dev, 1);
358 if (man->io_size > nouveau_mem_fb_amount(dev))
359 man->io_size = nouveau_mem_fb_amount(dev);
361 man->gpu_offset = dev_priv->vm_vram_base;
364 switch (dev_priv->gart_info.type) {
365 case NOUVEAU_GART_AGP:
366 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
367 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
368 man->available_caching = TTM_PL_FLAG_UNCACHED;
369 man->default_caching = TTM_PL_FLAG_UNCACHED;
371 case NOUVEAU_GART_SGDMA:
372 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
373 TTM_MEMTYPE_FLAG_CMA;
374 man->available_caching = TTM_PL_MASK_CACHING;
375 man->default_caching = TTM_PL_FLAG_CACHED;
378 NV_ERROR(dev, "Unknown GART type: %d\n",
379 dev_priv->gart_info.type);
383 man->io_offset = dev_priv->gart_info.aper_base;
384 man->io_size = dev_priv->gart_info.aper_size;
386 man->gpu_offset = dev_priv->vm_gart_base;
389 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
396 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
398 struct nouveau_bo *nvbo = nouveau_bo(bo);
400 switch (bo->mem.mem_type) {
402 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
408 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
409 * TTM_PL_{VRAM,TT} directly.
412 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
413 struct nouveau_bo *nvbo, bool evict, bool no_wait,
414 struct ttm_mem_reg *new_mem)
416 struct nouveau_fence *fence = NULL;
419 ret = nouveau_fence_new(chan, &fence, true);
423 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
424 evict, no_wait, new_mem);
425 nouveau_fence_unref((void *)&fence);
429 static inline uint32_t
430 nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
431 struct ttm_mem_reg *mem)
433 if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
434 if (mem->mem_type == TTM_PL_TT)
439 if (mem->mem_type == TTM_PL_TT)
440 return chan->gart_handle;
441 return chan->vram_handle;
445 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
446 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
448 struct nouveau_bo *nvbo = nouveau_bo(bo);
449 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
450 struct nouveau_channel *chan;
451 uint64_t src_offset, dst_offset;
455 chan = nvbo->channel;
456 if (!chan || nvbo->tile_flags || nvbo->no_vm) {
457 chan = dev_priv->channel;
462 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
463 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
464 if (chan != dev_priv->channel) {
465 if (old_mem->mem_type == TTM_PL_TT)
466 src_offset += dev_priv->vm_gart_base;
468 src_offset += dev_priv->vm_vram_base;
470 if (new_mem->mem_type == TTM_PL_TT)
471 dst_offset += dev_priv->vm_gart_base;
473 dst_offset += dev_priv->vm_vram_base;
476 ret = RING_SPACE(chan, 3);
479 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
480 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
481 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
483 if (dev_priv->card_type >= NV_50) {
484 ret = RING_SPACE(chan, 4);
487 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
489 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
493 page_count = new_mem->num_pages;
495 int line_count = (page_count > 2047) ? 2047 : page_count;
497 if (dev_priv->card_type >= NV_50) {
498 ret = RING_SPACE(chan, 3);
501 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
502 OUT_RING(chan, upper_32_bits(src_offset));
503 OUT_RING(chan, upper_32_bits(dst_offset));
505 ret = RING_SPACE(chan, 11);
508 BEGIN_RING(chan, NvSubM2MF,
509 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
510 OUT_RING(chan, lower_32_bits(src_offset));
511 OUT_RING(chan, lower_32_bits(dst_offset));
512 OUT_RING(chan, PAGE_SIZE); /* src_pitch */
513 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
514 OUT_RING(chan, PAGE_SIZE); /* line_length */
515 OUT_RING(chan, line_count);
516 OUT_RING(chan, (1<<8)|(1<<0));
518 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
521 page_count -= line_count;
522 src_offset += (PAGE_SIZE * line_count);
523 dst_offset += (PAGE_SIZE * line_count);
526 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
530 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
531 bool no_wait, struct ttm_mem_reg *new_mem)
533 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
534 struct ttm_placement placement;
535 struct ttm_mem_reg tmp_mem;
538 placement.fpfn = placement.lpfn = 0;
539 placement.num_placement = placement.num_busy_placement = 1;
540 placement.placement = &placement_memtype;
543 tmp_mem.mm_node = NULL;
544 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
548 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
552 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
556 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
558 if (tmp_mem.mm_node) {
559 spin_lock(&bo->bdev->glob->lru_lock);
560 drm_mm_put_block(tmp_mem.mm_node);
561 spin_unlock(&bo->bdev->glob->lru_lock);
568 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
569 bool no_wait, struct ttm_mem_reg *new_mem)
571 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
572 struct ttm_placement placement;
573 struct ttm_mem_reg tmp_mem;
576 placement.fpfn = placement.lpfn = 0;
577 placement.num_placement = placement.num_busy_placement = 1;
578 placement.placement = &placement_memtype;
581 tmp_mem.mm_node = NULL;
582 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
586 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
590 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
595 if (tmp_mem.mm_node) {
596 spin_lock(&bo->bdev->glob->lru_lock);
597 drm_mm_put_block(tmp_mem.mm_node);
598 spin_unlock(&bo->bdev->glob->lru_lock);
605 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
606 bool no_wait, struct ttm_mem_reg *new_mem)
608 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
609 struct nouveau_bo *nvbo = nouveau_bo(bo);
610 struct drm_device *dev = dev_priv->dev;
611 struct ttm_mem_reg *old_mem = &bo->mem;
614 if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
616 uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
618 ret = nv50_mem_vm_bind_linear(dev,
619 offset + dev_priv->vm_vram_base,
620 new_mem->size, nvbo->tile_flags,
626 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE)
627 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
629 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
630 BUG_ON(bo->mem.mm_node != NULL);
632 new_mem->mm_node = NULL;
636 if (new_mem->mem_type == TTM_PL_SYSTEM) {
637 if (old_mem->mem_type == TTM_PL_SYSTEM)
638 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
639 if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
640 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
641 } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
642 if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
643 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
645 if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
646 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
653 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
658 struct ttm_bo_driver nouveau_bo_driver = {
659 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
660 .invalidate_caches = nouveau_bo_invalidate_caches,
661 .init_mem_type = nouveau_bo_init_mem_type,
662 .evict_flags = nouveau_bo_evict_flags,
663 .move = nouveau_bo_move,
664 .verify_access = nouveau_bo_verify_access,
665 .sync_obj_signaled = nouveau_fence_signalled,
666 .sync_obj_wait = nouveau_fence_wait,
667 .sync_obj_flush = nouveau_fence_flush,
668 .sync_obj_unref = nouveau_fence_unref,
669 .sync_obj_ref = nouveau_fence_ref,