drm/nouveau: no need to update bo.offset from vma after validate
[pandora-kernel.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *          Ben Skeggs   <darktama@iinet.net.au>
27  *          Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29
30 #include "drmP.h"
31
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_mm.h"
36 #include "nouveau_vm.h"
37
38 #include <linux/log2.h>
39 #include <linux/slab.h>
40
41 static void
42 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
43 {
44         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
45         struct drm_device *dev = dev_priv->dev;
46         struct nouveau_bo *nvbo = nouveau_bo(bo);
47
48         if (unlikely(nvbo->gem))
49                 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
51         nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52         if (nvbo->vma.node) {
53                 nouveau_vm_unmap(&nvbo->vma);
54                 nouveau_vm_put(&nvbo->vma);
55         }
56         kfree(nvbo);
57 }
58
59 static void
60 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
61                        int *align, int *size, int *page_shift)
62 {
63         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
64
65         if (dev_priv->card_type < NV_50) {
66                 if (nvbo->tile_mode) {
67                         if (dev_priv->chipset >= 0x40) {
68                                 *align = 65536;
69                                 *size = roundup(*size, 64 * nvbo->tile_mode);
70
71                         } else if (dev_priv->chipset >= 0x30) {
72                                 *align = 32768;
73                                 *size = roundup(*size, 64 * nvbo->tile_mode);
74
75                         } else if (dev_priv->chipset >= 0x20) {
76                                 *align = 16384;
77                                 *size = roundup(*size, 64 * nvbo->tile_mode);
78
79                         } else if (dev_priv->chipset >= 0x10) {
80                                 *align = 16384;
81                                 *size = roundup(*size, 32 * nvbo->tile_mode);
82                         }
83                 }
84         } else {
85                 if (likely(dev_priv->chan_vm)) {
86                         if (!(flags & TTM_PL_FLAG_TT) &&  *size > 256 * 1024)
87                                 *page_shift = dev_priv->chan_vm->lpg_shift;
88                         else
89                                 *page_shift = dev_priv->chan_vm->spg_shift;
90                 } else {
91                         *page_shift = 12;
92                 }
93
94                 *size = roundup(*size, (1 << *page_shift));
95                 *align = max((1 << *page_shift), *align);
96         }
97
98         *size = roundup(*size, PAGE_SIZE);
99 }
100
101 int
102 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
103                int size, int align, uint32_t flags, uint32_t tile_mode,
104                uint32_t tile_flags, struct nouveau_bo **pnvbo)
105 {
106         struct drm_nouveau_private *dev_priv = dev->dev_private;
107         struct nouveau_bo *nvbo;
108         int ret = 0, page_shift = 0;
109
110         nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
111         if (!nvbo)
112                 return -ENOMEM;
113         INIT_LIST_HEAD(&nvbo->head);
114         INIT_LIST_HEAD(&nvbo->entry);
115         nvbo->tile_mode = tile_mode;
116         nvbo->tile_flags = tile_flags;
117         nvbo->bo.bdev = &dev_priv->ttm.bdev;
118
119         nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
120         align >>= PAGE_SHIFT;
121
122         if (dev_priv->chan_vm) {
123                 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124                                      NV_MEM_ACCESS_RW, &nvbo->vma);
125                 if (ret) {
126                         kfree(nvbo);
127                         return ret;
128                 }
129         }
130
131         nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
132         nouveau_bo_placement_set(nvbo, flags, 0);
133
134         nvbo->channel = chan;
135         ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
136                           ttm_bo_type_device, &nvbo->placement, align, 0,
137                           false, NULL, size, nouveau_bo_del_ttm);
138         if (ret) {
139                 /* ttm will call nouveau_bo_del_ttm if it fails.. */
140                 return ret;
141         }
142         nvbo->channel = NULL;
143
144         if (nvbo->vma.node)
145                 nvbo->bo.offset = nvbo->vma.offset;
146         *pnvbo = nvbo;
147         return 0;
148 }
149
150 static void
151 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
152 {
153         *n = 0;
154
155         if (type & TTM_PL_FLAG_VRAM)
156                 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
157         if (type & TTM_PL_FLAG_TT)
158                 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
159         if (type & TTM_PL_FLAG_SYSTEM)
160                 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
161 }
162
163 static void
164 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
165 {
166         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
167         int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
168
169         if (dev_priv->card_type == NV_10 &&
170             nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
171             nvbo->bo.mem.num_pages < vram_pages / 2) {
172                 /*
173                  * Make sure that the color and depth buffers are handled
174                  * by independent memory controller units. Up to a 9x
175                  * speed up when alpha-blending and depth-test are enabled
176                  * at the same time.
177                  */
178                 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
179                         nvbo->placement.fpfn = vram_pages / 2;
180                         nvbo->placement.lpfn = ~0;
181                 } else {
182                         nvbo->placement.fpfn = 0;
183                         nvbo->placement.lpfn = vram_pages / 2;
184                 }
185         }
186 }
187
188 void
189 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
190 {
191         struct ttm_placement *pl = &nvbo->placement;
192         uint32_t flags = TTM_PL_MASK_CACHING |
193                 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
194
195         pl->placement = nvbo->placements;
196         set_placement_list(nvbo->placements, &pl->num_placement,
197                            type, flags);
198
199         pl->busy_placement = nvbo->busy_placements;
200         set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
201                            type | busy, flags);
202
203         set_placement_range(nvbo, type);
204 }
205
206 int
207 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
208 {
209         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
210         struct ttm_buffer_object *bo = &nvbo->bo;
211         int ret;
212
213         if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
214                 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
215                          "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
216                          1 << bo->mem.mem_type, memtype);
217                 return -EINVAL;
218         }
219
220         if (nvbo->pin_refcnt++)
221                 return 0;
222
223         ret = ttm_bo_reserve(bo, false, false, false, 0);
224         if (ret)
225                 goto out;
226
227         nouveau_bo_placement_set(nvbo, memtype, 0);
228
229         ret = nouveau_bo_validate(nvbo, false, false, false);
230         if (ret == 0) {
231                 switch (bo->mem.mem_type) {
232                 case TTM_PL_VRAM:
233                         dev_priv->fb_aper_free -= bo->mem.size;
234                         break;
235                 case TTM_PL_TT:
236                         dev_priv->gart_info.aper_free -= bo->mem.size;
237                         break;
238                 default:
239                         break;
240                 }
241         }
242         ttm_bo_unreserve(bo);
243 out:
244         if (unlikely(ret))
245                 nvbo->pin_refcnt--;
246         return ret;
247 }
248
249 int
250 nouveau_bo_unpin(struct nouveau_bo *nvbo)
251 {
252         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
253         struct ttm_buffer_object *bo = &nvbo->bo;
254         int ret;
255
256         if (--nvbo->pin_refcnt)
257                 return 0;
258
259         ret = ttm_bo_reserve(bo, false, false, false, 0);
260         if (ret)
261                 return ret;
262
263         nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
264
265         ret = nouveau_bo_validate(nvbo, false, false, false);
266         if (ret == 0) {
267                 switch (bo->mem.mem_type) {
268                 case TTM_PL_VRAM:
269                         dev_priv->fb_aper_free += bo->mem.size;
270                         break;
271                 case TTM_PL_TT:
272                         dev_priv->gart_info.aper_free += bo->mem.size;
273                         break;
274                 default:
275                         break;
276                 }
277         }
278
279         ttm_bo_unreserve(bo);
280         return ret;
281 }
282
283 int
284 nouveau_bo_map(struct nouveau_bo *nvbo)
285 {
286         int ret;
287
288         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
289         if (ret)
290                 return ret;
291
292         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
293         ttm_bo_unreserve(&nvbo->bo);
294         return ret;
295 }
296
297 void
298 nouveau_bo_unmap(struct nouveau_bo *nvbo)
299 {
300         if (nvbo)
301                 ttm_bo_kunmap(&nvbo->kmap);
302 }
303
304 int
305 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
306                     bool no_wait_reserve, bool no_wait_gpu)
307 {
308         int ret;
309
310         ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
311                               no_wait_reserve, no_wait_gpu);
312         if (ret)
313                 return ret;
314
315         return 0;
316 }
317
318 u16
319 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
320 {
321         bool is_iomem;
322         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
323         mem = &mem[index];
324         if (is_iomem)
325                 return ioread16_native((void __force __iomem *)mem);
326         else
327                 return *mem;
328 }
329
330 void
331 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
332 {
333         bool is_iomem;
334         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
335         mem = &mem[index];
336         if (is_iomem)
337                 iowrite16_native(val, (void __force __iomem *)mem);
338         else
339                 *mem = val;
340 }
341
342 u32
343 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
344 {
345         bool is_iomem;
346         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
347         mem = &mem[index];
348         if (is_iomem)
349                 return ioread32_native((void __force __iomem *)mem);
350         else
351                 return *mem;
352 }
353
354 void
355 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
356 {
357         bool is_iomem;
358         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
359         mem = &mem[index];
360         if (is_iomem)
361                 iowrite32_native(val, (void __force __iomem *)mem);
362         else
363                 *mem = val;
364 }
365
366 static struct ttm_backend *
367 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
368 {
369         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
370         struct drm_device *dev = dev_priv->dev;
371
372         switch (dev_priv->gart_info.type) {
373 #if __OS_HAS_AGP
374         case NOUVEAU_GART_AGP:
375                 return ttm_agp_backend_init(bdev, dev->agp->bridge);
376 #endif
377         case NOUVEAU_GART_PDMA:
378         case NOUVEAU_GART_HW:
379                 return nouveau_sgdma_init_ttm(dev);
380         default:
381                 NV_ERROR(dev, "Unknown GART type %d\n",
382                          dev_priv->gart_info.type);
383                 break;
384         }
385
386         return NULL;
387 }
388
389 static int
390 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
391 {
392         /* We'll do this from user space. */
393         return 0;
394 }
395
396 static int
397 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
398                          struct ttm_mem_type_manager *man)
399 {
400         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
401         struct drm_device *dev = dev_priv->dev;
402
403         switch (type) {
404         case TTM_PL_SYSTEM:
405                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
406                 man->available_caching = TTM_PL_MASK_CACHING;
407                 man->default_caching = TTM_PL_FLAG_CACHED;
408                 break;
409         case TTM_PL_VRAM:
410                 if (dev_priv->card_type >= NV_50) {
411                         man->func = &nouveau_vram_manager;
412                         man->io_reserve_fastpath = false;
413                         man->use_io_reserve_lru = true;
414                 } else {
415                         man->func = &ttm_bo_manager_func;
416                 }
417                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
418                              TTM_MEMTYPE_FLAG_MAPPABLE;
419                 man->available_caching = TTM_PL_FLAG_UNCACHED |
420                                          TTM_PL_FLAG_WC;
421                 man->default_caching = TTM_PL_FLAG_WC;
422                 break;
423         case TTM_PL_TT:
424                 if (dev_priv->card_type >= NV_50)
425                         man->func = &nouveau_gart_manager;
426                 else
427                         man->func = &ttm_bo_manager_func;
428                 switch (dev_priv->gart_info.type) {
429                 case NOUVEAU_GART_AGP:
430                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
431                         man->available_caching = TTM_PL_FLAG_UNCACHED |
432                                 TTM_PL_FLAG_WC;
433                         man->default_caching = TTM_PL_FLAG_WC;
434                         break;
435                 case NOUVEAU_GART_PDMA:
436                 case NOUVEAU_GART_HW:
437                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
438                                      TTM_MEMTYPE_FLAG_CMA;
439                         man->available_caching = TTM_PL_MASK_CACHING;
440                         man->default_caching = TTM_PL_FLAG_CACHED;
441                         man->gpu_offset = dev_priv->gart_info.aper_base;
442                         break;
443                 default:
444                         NV_ERROR(dev, "Unknown GART type: %d\n",
445                                  dev_priv->gart_info.type);
446                         return -EINVAL;
447                 }
448                 break;
449         default:
450                 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
451                 return -EINVAL;
452         }
453         return 0;
454 }
455
456 static void
457 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
458 {
459         struct nouveau_bo *nvbo = nouveau_bo(bo);
460
461         switch (bo->mem.mem_type) {
462         case TTM_PL_VRAM:
463                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
464                                          TTM_PL_FLAG_SYSTEM);
465                 break;
466         default:
467                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
468                 break;
469         }
470
471         *pl = nvbo->placement;
472 }
473
474
475 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
476  * TTM_PL_{VRAM,TT} directly.
477  */
478
479 static int
480 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
481                               struct nouveau_bo *nvbo, bool evict,
482                               bool no_wait_reserve, bool no_wait_gpu,
483                               struct ttm_mem_reg *new_mem)
484 {
485         struct nouveau_fence *fence = NULL;
486         int ret;
487
488         ret = nouveau_fence_new(chan, &fence, true);
489         if (ret)
490                 return ret;
491
492         ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
493                                         no_wait_reserve, no_wait_gpu, new_mem);
494         nouveau_fence_unref(&fence);
495         return ret;
496 }
497
498 static int
499 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
500                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
501 {
502         struct nouveau_mem *old_node = old_mem->mm_node;
503         struct nouveau_mem *new_node = new_mem->mm_node;
504         struct nouveau_bo *nvbo = nouveau_bo(bo);
505         u32 page_count = new_mem->num_pages;
506         u64 src_offset, dst_offset;
507         int ret;
508
509         src_offset = old_node->tmp_vma.offset;
510         if (new_node->tmp_vma.node)
511                 dst_offset = new_node->tmp_vma.offset;
512         else
513                 dst_offset = nvbo->vma.offset;
514
515         page_count = new_mem->num_pages;
516         while (page_count) {
517                 int line_count = (page_count > 2047) ? 2047 : page_count;
518
519                 ret = RING_SPACE(chan, 12);
520                 if (ret)
521                         return ret;
522
523                 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
524                 OUT_RING  (chan, upper_32_bits(dst_offset));
525                 OUT_RING  (chan, lower_32_bits(dst_offset));
526                 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
527                 OUT_RING  (chan, upper_32_bits(src_offset));
528                 OUT_RING  (chan, lower_32_bits(src_offset));
529                 OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
530                 OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
531                 OUT_RING  (chan, PAGE_SIZE); /* line_length */
532                 OUT_RING  (chan, line_count);
533                 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
534                 OUT_RING  (chan, 0x00100110);
535
536                 page_count -= line_count;
537                 src_offset += (PAGE_SIZE * line_count);
538                 dst_offset += (PAGE_SIZE * line_count);
539         }
540
541         return 0;
542 }
543
544 static int
545 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
546                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
547 {
548         struct nouveau_mem *old_node = old_mem->mm_node;
549         struct nouveau_mem *new_node = new_mem->mm_node;
550         struct nouveau_bo *nvbo = nouveau_bo(bo);
551         u64 length = (new_mem->num_pages << PAGE_SHIFT);
552         u64 src_offset, dst_offset;
553         int ret;
554
555         src_offset = old_node->tmp_vma.offset;
556         if (new_node->tmp_vma.node)
557                 dst_offset = new_node->tmp_vma.offset;
558         else
559                 dst_offset = nvbo->vma.offset;
560
561         while (length) {
562                 u32 amount, stride, height;
563
564                 amount  = min(length, (u64)(4 * 1024 * 1024));
565                 stride  = 16 * 4;
566                 height  = amount / stride;
567
568                 if (new_mem->mem_type == TTM_PL_VRAM &&
569                     nouveau_bo_tile_layout(nvbo)) {
570                         ret = RING_SPACE(chan, 8);
571                         if (ret)
572                                 return ret;
573
574                         BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
575                         OUT_RING  (chan, 0);
576                         OUT_RING  (chan, 0);
577                         OUT_RING  (chan, stride);
578                         OUT_RING  (chan, height);
579                         OUT_RING  (chan, 1);
580                         OUT_RING  (chan, 0);
581                         OUT_RING  (chan, 0);
582                 } else {
583                         ret = RING_SPACE(chan, 2);
584                         if (ret)
585                                 return ret;
586
587                         BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
588                         OUT_RING  (chan, 1);
589                 }
590                 if (old_mem->mem_type == TTM_PL_VRAM &&
591                     nouveau_bo_tile_layout(nvbo)) {
592                         ret = RING_SPACE(chan, 8);
593                         if (ret)
594                                 return ret;
595
596                         BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
597                         OUT_RING  (chan, 0);
598                         OUT_RING  (chan, 0);
599                         OUT_RING  (chan, stride);
600                         OUT_RING  (chan, height);
601                         OUT_RING  (chan, 1);
602                         OUT_RING  (chan, 0);
603                         OUT_RING  (chan, 0);
604                 } else {
605                         ret = RING_SPACE(chan, 2);
606                         if (ret)
607                                 return ret;
608
609                         BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
610                         OUT_RING  (chan, 1);
611                 }
612
613                 ret = RING_SPACE(chan, 14);
614                 if (ret)
615                         return ret;
616
617                 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
618                 OUT_RING  (chan, upper_32_bits(src_offset));
619                 OUT_RING  (chan, upper_32_bits(dst_offset));
620                 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
621                 OUT_RING  (chan, lower_32_bits(src_offset));
622                 OUT_RING  (chan, lower_32_bits(dst_offset));
623                 OUT_RING  (chan, stride);
624                 OUT_RING  (chan, stride);
625                 OUT_RING  (chan, stride);
626                 OUT_RING  (chan, height);
627                 OUT_RING  (chan, 0x00000101);
628                 OUT_RING  (chan, 0x00000000);
629                 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
630                 OUT_RING  (chan, 0);
631
632                 length -= amount;
633                 src_offset += amount;
634                 dst_offset += amount;
635         }
636
637         return 0;
638 }
639
640 static inline uint32_t
641 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
642                       struct nouveau_channel *chan, struct ttm_mem_reg *mem)
643 {
644         if (mem->mem_type == TTM_PL_TT)
645                 return chan->gart_handle;
646         return chan->vram_handle;
647 }
648
649 static int
650 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
651                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
652 {
653         u32 src_offset = old_mem->start << PAGE_SHIFT;
654         u32 dst_offset = new_mem->start << PAGE_SHIFT;
655         u32 page_count = new_mem->num_pages;
656         int ret;
657
658         ret = RING_SPACE(chan, 3);
659         if (ret)
660                 return ret;
661
662         BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
663         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
664         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
665
666         page_count = new_mem->num_pages;
667         while (page_count) {
668                 int line_count = (page_count > 2047) ? 2047 : page_count;
669
670                 ret = RING_SPACE(chan, 11);
671                 if (ret)
672                         return ret;
673
674                 BEGIN_RING(chan, NvSubM2MF,
675                                  NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
676                 OUT_RING  (chan, src_offset);
677                 OUT_RING  (chan, dst_offset);
678                 OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
679                 OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
680                 OUT_RING  (chan, PAGE_SIZE); /* line_length */
681                 OUT_RING  (chan, line_count);
682                 OUT_RING  (chan, 0x00000101);
683                 OUT_RING  (chan, 0x00000000);
684                 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
685                 OUT_RING  (chan, 0);
686
687                 page_count -= line_count;
688                 src_offset += (PAGE_SIZE * line_count);
689                 dst_offset += (PAGE_SIZE * line_count);
690         }
691
692         return 0;
693 }
694
695 static int
696 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
697                      bool no_wait_reserve, bool no_wait_gpu,
698                      struct ttm_mem_reg *new_mem)
699 {
700         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
701         struct nouveau_bo *nvbo = nouveau_bo(bo);
702         struct ttm_mem_reg *old_mem = &bo->mem;
703         struct nouveau_channel *chan;
704         int ret;
705
706         chan = nvbo->channel;
707         if (!chan) {
708                 chan = dev_priv->channel;
709                 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
710         }
711
712         /* create temporary vma for old memory, this will get cleaned
713          * up after ttm destroys the ttm_mem_reg
714          */
715         if (dev_priv->card_type >= NV_50) {
716                 struct nouveau_mem *node = old_mem->mm_node;
717                 if (!node->tmp_vma.node) {
718                         u32 page_shift = nvbo->vma.node->type;
719                         if (old_mem->mem_type == TTM_PL_TT)
720                                 page_shift = nvbo->vma.vm->spg_shift;
721
722                         ret = nouveau_vm_get(chan->vm,
723                                              old_mem->num_pages << PAGE_SHIFT,
724                                              page_shift, NV_MEM_ACCESS_RO,
725                                              &node->tmp_vma);
726                         if (ret)
727                                 goto out;
728                 }
729
730                 if (old_mem->mem_type == TTM_PL_VRAM)
731                         nouveau_vm_map(&node->tmp_vma, node);
732                 else {
733                         nouveau_vm_map_sg(&node->tmp_vma, 0,
734                                           old_mem->num_pages << PAGE_SHIFT,
735                                           node, node->pages);
736                 }
737         }
738
739         if (dev_priv->card_type < NV_50)
740                 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
741         else
742         if (dev_priv->card_type < NV_C0)
743                 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
744         else
745                 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
746         if (ret == 0) {
747                 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
748                                                     no_wait_reserve,
749                                                     no_wait_gpu, new_mem);
750         }
751
752 out:
753         if (chan == dev_priv->channel)
754                 mutex_unlock(&chan->mutex);
755         return ret;
756 }
757
758 static int
759 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
760                       bool no_wait_reserve, bool no_wait_gpu,
761                       struct ttm_mem_reg *new_mem)
762 {
763         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
764         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
765         struct ttm_placement placement;
766         struct ttm_mem_reg tmp_mem;
767         int ret;
768
769         placement.fpfn = placement.lpfn = 0;
770         placement.num_placement = placement.num_busy_placement = 1;
771         placement.placement = placement.busy_placement = &placement_memtype;
772
773         tmp_mem = *new_mem;
774         tmp_mem.mm_node = NULL;
775         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
776         if (ret)
777                 return ret;
778
779         ret = ttm_tt_bind(bo->ttm, &tmp_mem);
780         if (ret)
781                 goto out;
782
783         if (dev_priv->card_type >= NV_50) {
784                 struct nouveau_bo *nvbo = nouveau_bo(bo);
785                 struct nouveau_mem *node = tmp_mem.mm_node;
786                 struct nouveau_vma *vma = &nvbo->vma;
787                 if (vma->node->type != vma->vm->spg_shift)
788                         vma = &node->tmp_vma;
789                 nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
790                                   node, node->pages);
791         }
792
793         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
794
795         if (dev_priv->card_type >= NV_50) {
796                 struct nouveau_bo *nvbo = nouveau_bo(bo);
797                 nouveau_vm_unmap(&nvbo->vma);
798         }
799
800         if (ret)
801                 goto out;
802
803         ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
804 out:
805         ttm_bo_mem_put(bo, &tmp_mem);
806         return ret;
807 }
808
809 static int
810 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
811                       bool no_wait_reserve, bool no_wait_gpu,
812                       struct ttm_mem_reg *new_mem)
813 {
814         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
815         struct ttm_placement placement;
816         struct ttm_mem_reg tmp_mem;
817         int ret;
818
819         placement.fpfn = placement.lpfn = 0;
820         placement.num_placement = placement.num_busy_placement = 1;
821         placement.placement = placement.busy_placement = &placement_memtype;
822
823         tmp_mem = *new_mem;
824         tmp_mem.mm_node = NULL;
825         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
826         if (ret)
827                 return ret;
828
829         ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
830         if (ret)
831                 goto out;
832
833         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
834         if (ret)
835                 goto out;
836
837 out:
838         ttm_bo_mem_put(bo, &tmp_mem);
839         return ret;
840 }
841
842 static void
843 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
844 {
845         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
846         struct nouveau_mem *node = new_mem->mm_node;
847         struct nouveau_bo *nvbo = nouveau_bo(bo);
848         struct nouveau_vma *vma = &nvbo->vma;
849         struct nouveau_vm *vm = vma->vm;
850
851         if (dev_priv->card_type < NV_50)
852                 return;
853
854         switch (new_mem->mem_type) {
855         case TTM_PL_VRAM:
856                 nouveau_vm_map(vma, node);
857                 break;
858         case TTM_PL_TT:
859                 if (vma->node->type != vm->spg_shift) {
860                         nouveau_vm_unmap(vma);
861                         vma = &node->tmp_vma;
862                 }
863                 nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
864                                   node, node->pages);
865                 break;
866         default:
867                 nouveau_vm_unmap(&nvbo->vma);
868                 break;
869         }
870 }
871
872 static int
873 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
874                    struct nouveau_tile_reg **new_tile)
875 {
876         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
877         struct drm_device *dev = dev_priv->dev;
878         struct nouveau_bo *nvbo = nouveau_bo(bo);
879         u64 offset = new_mem->start << PAGE_SHIFT;
880
881         *new_tile = NULL;
882         if (new_mem->mem_type != TTM_PL_VRAM)
883                 return 0;
884
885         if (dev_priv->card_type >= NV_10) {
886                 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
887                                                 nvbo->tile_mode,
888                                                 nvbo->tile_flags);
889         }
890
891         return 0;
892 }
893
894 static void
895 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
896                       struct nouveau_tile_reg *new_tile,
897                       struct nouveau_tile_reg **old_tile)
898 {
899         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
900         struct drm_device *dev = dev_priv->dev;
901
902         nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
903         *old_tile = new_tile;
904 }
905
906 static int
907 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
908                 bool no_wait_reserve, bool no_wait_gpu,
909                 struct ttm_mem_reg *new_mem)
910 {
911         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
912         struct nouveau_bo *nvbo = nouveau_bo(bo);
913         struct ttm_mem_reg *old_mem = &bo->mem;
914         struct nouveau_tile_reg *new_tile = NULL;
915         int ret = 0;
916
917         if (dev_priv->card_type < NV_50) {
918                 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
919                 if (ret)
920                         return ret;
921         }
922
923         /* Fake bo copy. */
924         if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
925                 BUG_ON(bo->mem.mm_node != NULL);
926                 bo->mem = *new_mem;
927                 new_mem->mm_node = NULL;
928                 goto out;
929         }
930
931         /* Software copy if the card isn't up and running yet. */
932         if (!dev_priv->channel) {
933                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
934                 goto out;
935         }
936
937         /* Hardware assisted copy. */
938         if (new_mem->mem_type == TTM_PL_SYSTEM)
939                 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
940         else if (old_mem->mem_type == TTM_PL_SYSTEM)
941                 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
942         else
943                 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
944
945         if (!ret)
946                 goto out;
947
948         /* Fallback to software copy. */
949         ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
950
951 out:
952         if (dev_priv->card_type < NV_50) {
953                 if (ret)
954                         nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
955                 else
956                         nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
957         }
958
959         return ret;
960 }
961
962 static int
963 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
964 {
965         return 0;
966 }
967
968 static int
969 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
970 {
971         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
972         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
973         struct drm_device *dev = dev_priv->dev;
974         int ret;
975
976         mem->bus.addr = NULL;
977         mem->bus.offset = 0;
978         mem->bus.size = mem->num_pages << PAGE_SHIFT;
979         mem->bus.base = 0;
980         mem->bus.is_iomem = false;
981         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
982                 return -EINVAL;
983         switch (mem->mem_type) {
984         case TTM_PL_SYSTEM:
985                 /* System memory */
986                 return 0;
987         case TTM_PL_TT:
988 #if __OS_HAS_AGP
989                 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
990                         mem->bus.offset = mem->start << PAGE_SHIFT;
991                         mem->bus.base = dev_priv->gart_info.aper_base;
992                         mem->bus.is_iomem = true;
993                 }
994 #endif
995                 break;
996         case TTM_PL_VRAM:
997         {
998                 struct nouveau_mem *node = mem->mm_node;
999                 u8 page_shift;
1000
1001                 if (!dev_priv->bar1_vm) {
1002                         mem->bus.offset = mem->start << PAGE_SHIFT;
1003                         mem->bus.base = pci_resource_start(dev->pdev, 1);
1004                         mem->bus.is_iomem = true;
1005                         break;
1006                 }
1007
1008                 if (dev_priv->card_type == NV_C0)
1009                         page_shift = node->page_shift;
1010                 else
1011                         page_shift = 12;
1012
1013                 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
1014                                      page_shift, NV_MEM_ACCESS_RW,
1015                                      &node->bar_vma);
1016                 if (ret)
1017                         return ret;
1018
1019                 nouveau_vm_map(&node->bar_vma, node);
1020                 if (ret) {
1021                         nouveau_vm_put(&node->bar_vma);
1022                         return ret;
1023                 }
1024
1025                 mem->bus.offset = node->bar_vma.offset;
1026                 if (dev_priv->card_type == NV_50) /*XXX*/
1027                         mem->bus.offset -= 0x0020000000ULL;
1028                 mem->bus.base = pci_resource_start(dev->pdev, 1);
1029                 mem->bus.is_iomem = true;
1030         }
1031                 break;
1032         default:
1033                 return -EINVAL;
1034         }
1035         return 0;
1036 }
1037
1038 static void
1039 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1040 {
1041         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1042         struct nouveau_mem *node = mem->mm_node;
1043
1044         if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1045                 return;
1046
1047         if (!node->bar_vma.node)
1048                 return;
1049
1050         nouveau_vm_unmap(&node->bar_vma);
1051         nouveau_vm_put(&node->bar_vma);
1052 }
1053
1054 static int
1055 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1056 {
1057         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1058         struct nouveau_bo *nvbo = nouveau_bo(bo);
1059
1060         /* as long as the bo isn't in vram, and isn't tiled, we've got
1061          * nothing to do here.
1062          */
1063         if (bo->mem.mem_type != TTM_PL_VRAM) {
1064                 if (dev_priv->card_type < NV_50 ||
1065                     !nouveau_bo_tile_layout(nvbo))
1066                         return 0;
1067         }
1068
1069         /* make sure bo is in mappable vram */
1070         if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1071                 return 0;
1072
1073
1074         nvbo->placement.fpfn = 0;
1075         nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1076         nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
1077         return nouveau_bo_validate(nvbo, false, true, false);
1078 }
1079
1080 void
1081 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1082 {
1083         struct nouveau_fence *old_fence;
1084
1085         if (likely(fence))
1086                 nouveau_fence_ref(fence);
1087
1088         spin_lock(&nvbo->bo.bdev->fence_lock);
1089         old_fence = nvbo->bo.sync_obj;
1090         nvbo->bo.sync_obj = fence;
1091         spin_unlock(&nvbo->bo.bdev->fence_lock);
1092
1093         nouveau_fence_unref(&old_fence);
1094 }
1095
1096 struct ttm_bo_driver nouveau_bo_driver = {
1097         .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1098         .invalidate_caches = nouveau_bo_invalidate_caches,
1099         .init_mem_type = nouveau_bo_init_mem_type,
1100         .evict_flags = nouveau_bo_evict_flags,
1101         .move_notify = nouveau_bo_move_ntfy,
1102         .move = nouveau_bo_move,
1103         .verify_access = nouveau_bo_verify_access,
1104         .sync_obj_signaled = __nouveau_fence_signalled,
1105         .sync_obj_wait = __nouveau_fence_wait,
1106         .sync_obj_flush = __nouveau_fence_flush,
1107         .sync_obj_unref = __nouveau_fence_unref,
1108         .sync_obj_ref = __nouveau_fence_ref,
1109         .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1110         .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1111         .io_mem_free = &nouveau_ttm_io_mem_free,
1112 };
1113