mtd: nand: Add SmartMedia device table to sm_common module
[pandora-kernel.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *          Ben Skeggs   <darktama@iinet.net.au>
27  *          Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29
30 #include "drmP.h"
31
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35
36 static void
37 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
38 {
39         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
40         struct nouveau_bo *nvbo = nouveau_bo(bo);
41
42         ttm_bo_kunmap(&nvbo->kmap);
43
44         if (unlikely(nvbo->gem))
45                 DRM_ERROR("bo %p still attached to GEM object\n", bo);
46
47         spin_lock(&dev_priv->ttm.bo_list_lock);
48         list_del(&nvbo->head);
49         spin_unlock(&dev_priv->ttm.bo_list_lock);
50         kfree(nvbo);
51 }
52
53 int
54 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
55                int size, int align, uint32_t flags, uint32_t tile_mode,
56                uint32_t tile_flags, bool no_vm, bool mappable,
57                struct nouveau_bo **pnvbo)
58 {
59         struct drm_nouveau_private *dev_priv = dev->dev_private;
60         struct nouveau_bo *nvbo;
61         int ret, n = 0;
62
63         nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
64         if (!nvbo)
65                 return -ENOMEM;
66         INIT_LIST_HEAD(&nvbo->head);
67         INIT_LIST_HEAD(&nvbo->entry);
68         nvbo->mappable = mappable;
69         nvbo->no_vm = no_vm;
70         nvbo->tile_mode = tile_mode;
71         nvbo->tile_flags = tile_flags;
72
73         /*
74          * Some of the tile_flags have a periodic structure of N*4096 bytes,
75          * align to to that as well as the page size. Overallocate memory to
76          * avoid corruption of other buffer objects.
77          */
78         switch (tile_flags) {
79         case 0x1800:
80         case 0x2800:
81         case 0x4800:
82         case 0x7a00:
83                 if (dev_priv->chipset >= 0xA0) {
84                         /* This is based on high end cards with 448 bits
85                          * memory bus, could be different elsewhere.*/
86                         size += 6 * 28672;
87                         /* 8 * 28672 is the actual alignment requirement,
88                          * but we must also align to page size. */
89                         align = 2 * 8 * 28672;
90                 } else if (dev_priv->chipset >= 0x90) {
91                         size += 3 * 16384;
92                         align = 12 * 16834;
93                 } else {
94                         size += 3 * 8192;
95                         /* 12 * 8192 is the actual alignment requirement,
96                          * but we must also align to page size. */
97                         align = 2 * 12 * 8192;
98                 }
99                 break;
100         default:
101                 break;
102         }
103
104         align >>= PAGE_SHIFT;
105
106         size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
107         if (dev_priv->card_type == NV_50) {
108                 size = (size + 65535) & ~65535;
109                 if (align < (65536 / PAGE_SIZE))
110                         align = (65536 / PAGE_SIZE);
111         }
112
113         if (flags & TTM_PL_FLAG_VRAM)
114                 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
115         if (flags & TTM_PL_FLAG_TT)
116                 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
117         nvbo->placement.fpfn = 0;
118         nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
119         nvbo->placement.placement = nvbo->placements;
120         nvbo->placement.busy_placement = nvbo->placements;
121         nvbo->placement.num_placement = n;
122         nvbo->placement.num_busy_placement = n;
123
124         nvbo->channel = chan;
125         nouveau_bo_placement_set(nvbo, flags);
126         ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
127                           ttm_bo_type_device, &nvbo->placement, align, 0,
128                           false, NULL, size, nouveau_bo_del_ttm);
129         nvbo->channel = NULL;
130         if (ret) {
131                 /* ttm will call nouveau_bo_del_ttm if it fails.. */
132                 return ret;
133         }
134
135         spin_lock(&dev_priv->ttm.bo_list_lock);
136         list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
137         spin_unlock(&dev_priv->ttm.bo_list_lock);
138         *pnvbo = nvbo;
139         return 0;
140 }
141
142 void
143 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
144 {
145         int n = 0;
146
147         if (memtype & TTM_PL_FLAG_VRAM)
148                 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
149         if (memtype & TTM_PL_FLAG_TT)
150                 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
151         if (memtype & TTM_PL_FLAG_SYSTEM)
152                 nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
153         nvbo->placement.placement = nvbo->placements;
154         nvbo->placement.busy_placement = nvbo->placements;
155         nvbo->placement.num_placement = n;
156         nvbo->placement.num_busy_placement = n;
157 }
158
159 int
160 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
161 {
162         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
163         struct ttm_buffer_object *bo = &nvbo->bo;
164         int ret, i;
165
166         if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
167                 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
168                          "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
169                          1 << bo->mem.mem_type, memtype);
170                 return -EINVAL;
171         }
172
173         if (nvbo->pin_refcnt++)
174                 return 0;
175
176         ret = ttm_bo_reserve(bo, false, false, false, 0);
177         if (ret)
178                 goto out;
179
180         nouveau_bo_placement_set(nvbo, memtype);
181         for (i = 0; i < nvbo->placement.num_placement; i++)
182                 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
183
184         ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
185         if (ret == 0) {
186                 switch (bo->mem.mem_type) {
187                 case TTM_PL_VRAM:
188                         dev_priv->fb_aper_free -= bo->mem.size;
189                         break;
190                 case TTM_PL_TT:
191                         dev_priv->gart_info.aper_free -= bo->mem.size;
192                         break;
193                 default:
194                         break;
195                 }
196         }
197         ttm_bo_unreserve(bo);
198 out:
199         if (unlikely(ret))
200                 nvbo->pin_refcnt--;
201         return ret;
202 }
203
204 int
205 nouveau_bo_unpin(struct nouveau_bo *nvbo)
206 {
207         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
208         struct ttm_buffer_object *bo = &nvbo->bo;
209         int ret, i;
210
211         if (--nvbo->pin_refcnt)
212                 return 0;
213
214         ret = ttm_bo_reserve(bo, false, false, false, 0);
215         if (ret)
216                 return ret;
217
218         for (i = 0; i < nvbo->placement.num_placement; i++)
219                 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
220
221         ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
222         if (ret == 0) {
223                 switch (bo->mem.mem_type) {
224                 case TTM_PL_VRAM:
225                         dev_priv->fb_aper_free += bo->mem.size;
226                         break;
227                 case TTM_PL_TT:
228                         dev_priv->gart_info.aper_free += bo->mem.size;
229                         break;
230                 default:
231                         break;
232                 }
233         }
234
235         ttm_bo_unreserve(bo);
236         return ret;
237 }
238
239 int
240 nouveau_bo_map(struct nouveau_bo *nvbo)
241 {
242         int ret;
243
244         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
245         if (ret)
246                 return ret;
247
248         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
249         ttm_bo_unreserve(&nvbo->bo);
250         return ret;
251 }
252
253 void
254 nouveau_bo_unmap(struct nouveau_bo *nvbo)
255 {
256         ttm_bo_kunmap(&nvbo->kmap);
257 }
258
259 u16
260 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
261 {
262         bool is_iomem;
263         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
264         mem = &mem[index];
265         if (is_iomem)
266                 return ioread16_native((void __force __iomem *)mem);
267         else
268                 return *mem;
269 }
270
271 void
272 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
273 {
274         bool is_iomem;
275         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
276         mem = &mem[index];
277         if (is_iomem)
278                 iowrite16_native(val, (void __force __iomem *)mem);
279         else
280                 *mem = val;
281 }
282
283 u32
284 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
285 {
286         bool is_iomem;
287         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
288         mem = &mem[index];
289         if (is_iomem)
290                 return ioread32_native((void __force __iomem *)mem);
291         else
292                 return *mem;
293 }
294
295 void
296 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
297 {
298         bool is_iomem;
299         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
300         mem = &mem[index];
301         if (is_iomem)
302                 iowrite32_native(val, (void __force __iomem *)mem);
303         else
304                 *mem = val;
305 }
306
307 static struct ttm_backend *
308 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
309 {
310         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
311         struct drm_device *dev = dev_priv->dev;
312
313         switch (dev_priv->gart_info.type) {
314         case NOUVEAU_GART_AGP:
315                 return ttm_agp_backend_init(bdev, dev->agp->bridge);
316         case NOUVEAU_GART_SGDMA:
317                 return nouveau_sgdma_init_ttm(dev);
318         default:
319                 NV_ERROR(dev, "Unknown GART type %d\n",
320                          dev_priv->gart_info.type);
321                 break;
322         }
323
324         return NULL;
325 }
326
327 static int
328 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
329 {
330         /* We'll do this from user space. */
331         return 0;
332 }
333
334 static int
335 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
336                          struct ttm_mem_type_manager *man)
337 {
338         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
339         struct drm_device *dev = dev_priv->dev;
340
341         switch (type) {
342         case TTM_PL_SYSTEM:
343                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
344                 man->available_caching = TTM_PL_MASK_CACHING;
345                 man->default_caching = TTM_PL_FLAG_CACHED;
346                 break;
347         case TTM_PL_VRAM:
348                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
349                              TTM_MEMTYPE_FLAG_MAPPABLE |
350                              TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
351                 man->available_caching = TTM_PL_FLAG_UNCACHED |
352                                          TTM_PL_FLAG_WC;
353                 man->default_caching = TTM_PL_FLAG_WC;
354
355                 man->io_addr = NULL;
356                 man->io_offset = drm_get_resource_start(dev, 1);
357                 man->io_size = drm_get_resource_len(dev, 1);
358                 if (man->io_size > nouveau_mem_fb_amount(dev))
359                         man->io_size = nouveau_mem_fb_amount(dev);
360
361                 man->gpu_offset = dev_priv->vm_vram_base;
362                 break;
363         case TTM_PL_TT:
364                 switch (dev_priv->gart_info.type) {
365                 case NOUVEAU_GART_AGP:
366                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
367                                      TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
368                         man->available_caching = TTM_PL_FLAG_UNCACHED;
369                         man->default_caching = TTM_PL_FLAG_UNCACHED;
370                         break;
371                 case NOUVEAU_GART_SGDMA:
372                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
373                                      TTM_MEMTYPE_FLAG_CMA;
374                         man->available_caching = TTM_PL_MASK_CACHING;
375                         man->default_caching = TTM_PL_FLAG_CACHED;
376                         break;
377                 default:
378                         NV_ERROR(dev, "Unknown GART type: %d\n",
379                                  dev_priv->gart_info.type);
380                         return -EINVAL;
381                 }
382
383                 man->io_offset  = dev_priv->gart_info.aper_base;
384                 man->io_size    = dev_priv->gart_info.aper_size;
385                 man->io_addr   = NULL;
386                 man->gpu_offset = dev_priv->vm_gart_base;
387                 break;
388         default:
389                 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
390                 return -EINVAL;
391         }
392         return 0;
393 }
394
395 static void
396 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
397 {
398         struct nouveau_bo *nvbo = nouveau_bo(bo);
399
400         switch (bo->mem.mem_type) {
401         default:
402                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
403                 break;
404         }
405 }
406
407
408 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
409  * TTM_PL_{VRAM,TT} directly.
410  */
411 static int
412 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
413                               struct nouveau_bo *nvbo, bool evict, bool no_wait,
414                               struct ttm_mem_reg *new_mem)
415 {
416         struct nouveau_fence *fence = NULL;
417         int ret;
418
419         ret = nouveau_fence_new(chan, &fence, true);
420         if (ret)
421                 return ret;
422
423         ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
424                                         evict, no_wait, new_mem);
425         nouveau_fence_unref((void *)&fence);
426         return ret;
427 }
428
429 static inline uint32_t
430 nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
431                       struct ttm_mem_reg *mem)
432 {
433         if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
434                 if (mem->mem_type == TTM_PL_TT)
435                         return NvDmaGART;
436                 return NvDmaVRAM;
437         }
438
439         if (mem->mem_type == TTM_PL_TT)
440                 return chan->gart_handle;
441         return chan->vram_handle;
442 }
443
444 static int
445 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
446                      struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
447 {
448         struct nouveau_bo *nvbo = nouveau_bo(bo);
449         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
450         struct nouveau_channel *chan;
451         uint64_t src_offset, dst_offset;
452         uint32_t page_count;
453         int ret;
454
455         chan = nvbo->channel;
456         if (!chan || nvbo->tile_flags || nvbo->no_vm) {
457                 chan = dev_priv->channel;
458                 if (!chan)
459                         return -EINVAL;
460         }
461
462         src_offset = old_mem->mm_node->start << PAGE_SHIFT;
463         dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
464         if (chan != dev_priv->channel) {
465                 if (old_mem->mem_type == TTM_PL_TT)
466                         src_offset += dev_priv->vm_gart_base;
467                 else
468                         src_offset += dev_priv->vm_vram_base;
469
470                 if (new_mem->mem_type == TTM_PL_TT)
471                         dst_offset += dev_priv->vm_gart_base;
472                 else
473                         dst_offset += dev_priv->vm_vram_base;
474         }
475
476         ret = RING_SPACE(chan, 3);
477         if (ret)
478                 return ret;
479         BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
480         OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
481         OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
482
483         if (dev_priv->card_type >= NV_50) {
484                 ret = RING_SPACE(chan, 4);
485                 if (ret)
486                         return ret;
487                 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
488                 OUT_RING(chan, 1);
489                 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
490                 OUT_RING(chan, 1);
491         }
492
493         page_count = new_mem->num_pages;
494         while (page_count) {
495                 int line_count = (page_count > 2047) ? 2047 : page_count;
496
497                 if (dev_priv->card_type >= NV_50) {
498                         ret = RING_SPACE(chan, 3);
499                         if (ret)
500                                 return ret;
501                         BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
502                         OUT_RING(chan, upper_32_bits(src_offset));
503                         OUT_RING(chan, upper_32_bits(dst_offset));
504                 }
505                 ret = RING_SPACE(chan, 11);
506                 if (ret)
507                         return ret;
508                 BEGIN_RING(chan, NvSubM2MF,
509                                  NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
510                 OUT_RING(chan, lower_32_bits(src_offset));
511                 OUT_RING(chan, lower_32_bits(dst_offset));
512                 OUT_RING(chan, PAGE_SIZE); /* src_pitch */
513                 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
514                 OUT_RING(chan, PAGE_SIZE); /* line_length */
515                 OUT_RING(chan, line_count);
516                 OUT_RING(chan, (1<<8)|(1<<0));
517                 OUT_RING(chan, 0);
518                 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
519                 OUT_RING(chan, 0);
520
521                 page_count -= line_count;
522                 src_offset += (PAGE_SIZE * line_count);
523                 dst_offset += (PAGE_SIZE * line_count);
524         }
525
526         return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
527 }
528
529 static int
530 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
531                       bool no_wait, struct ttm_mem_reg *new_mem)
532 {
533         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
534         struct ttm_placement placement;
535         struct ttm_mem_reg tmp_mem;
536         int ret;
537
538         placement.fpfn = placement.lpfn = 0;
539         placement.num_placement = placement.num_busy_placement = 1;
540         placement.placement = &placement_memtype;
541
542         tmp_mem = *new_mem;
543         tmp_mem.mm_node = NULL;
544         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
545         if (ret)
546                 return ret;
547
548         ret = ttm_tt_bind(bo->ttm, &tmp_mem);
549         if (ret)
550                 goto out;
551
552         ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
553         if (ret)
554                 goto out;
555
556         ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
557 out:
558         if (tmp_mem.mm_node) {
559                 spin_lock(&bo->bdev->glob->lru_lock);
560                 drm_mm_put_block(tmp_mem.mm_node);
561                 spin_unlock(&bo->bdev->glob->lru_lock);
562         }
563
564         return ret;
565 }
566
567 static int
568 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
569                       bool no_wait, struct ttm_mem_reg *new_mem)
570 {
571         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
572         struct ttm_placement placement;
573         struct ttm_mem_reg tmp_mem;
574         int ret;
575
576         placement.fpfn = placement.lpfn = 0;
577         placement.num_placement = placement.num_busy_placement = 1;
578         placement.placement = &placement_memtype;
579
580         tmp_mem = *new_mem;
581         tmp_mem.mm_node = NULL;
582         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
583         if (ret)
584                 return ret;
585
586         ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
587         if (ret)
588                 goto out;
589
590         ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
591         if (ret)
592                 goto out;
593
594 out:
595         if (tmp_mem.mm_node) {
596                 spin_lock(&bo->bdev->glob->lru_lock);
597                 drm_mm_put_block(tmp_mem.mm_node);
598                 spin_unlock(&bo->bdev->glob->lru_lock);
599         }
600
601         return ret;
602 }
603
604 static int
605 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
606                 bool no_wait, struct ttm_mem_reg *new_mem)
607 {
608         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
609         struct nouveau_bo *nvbo = nouveau_bo(bo);
610         struct drm_device *dev = dev_priv->dev;
611         struct ttm_mem_reg *old_mem = &bo->mem;
612         int ret;
613
614         if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
615             !nvbo->no_vm) {
616                 uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
617
618                 ret = nv50_mem_vm_bind_linear(dev,
619                                               offset + dev_priv->vm_vram_base,
620                                               new_mem->size, nvbo->tile_flags,
621                                               offset);
622                 if (ret)
623                         return ret;
624         }
625
626         if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE)
627                 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
628
629         if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
630                 BUG_ON(bo->mem.mm_node != NULL);
631                 bo->mem = *new_mem;
632                 new_mem->mm_node = NULL;
633                 return 0;
634         }
635
636         if (new_mem->mem_type == TTM_PL_SYSTEM) {
637                 if (old_mem->mem_type == TTM_PL_SYSTEM)
638                         return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
639                 if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
640                         return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
641         } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
642                 if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
643                         return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
644         } else {
645                 if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
646                         return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
647         }
648
649         return 0;
650 }
651
652 static int
653 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
654 {
655         return 0;
656 }
657
658 struct ttm_bo_driver nouveau_bo_driver = {
659         .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
660         .invalidate_caches = nouveau_bo_invalidate_caches,
661         .init_mem_type = nouveau_bo_init_mem_type,
662         .evict_flags = nouveau_bo_evict_flags,
663         .move = nouveau_bo_move,
664         .verify_access = nouveau_bo_verify_access,
665         .sync_obj_signaled = nouveau_fence_signalled,
666         .sync_obj_wait = nouveau_fence_wait,
667         .sync_obj_flush = nouveau_fence_flush,
668         .sync_obj_unref = nouveau_fence_unref,
669         .sync_obj_ref = nouveau_fence_ref,
670 };
671