Merge branch 'x86-geode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_placement.h"
33 #include <linux/io.h>
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/module.h>
39
40 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41 {
42         ttm_bo_mem_put(bo, &bo->mem);
43 }
44
45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46                     bool evict, bool no_wait_reserve,
47                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48 {
49         struct ttm_tt *ttm = bo->ttm;
50         struct ttm_mem_reg *old_mem = &bo->mem;
51         int ret;
52
53         if (old_mem->mem_type != TTM_PL_SYSTEM) {
54                 ttm_tt_unbind(ttm);
55                 ttm_bo_free_old_node(bo);
56                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57                                 TTM_PL_MASK_MEM);
58                 old_mem->mem_type = TTM_PL_SYSTEM;
59         }
60
61         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62         if (unlikely(ret != 0))
63                 return ret;
64
65         if (new_mem->mem_type != TTM_PL_SYSTEM) {
66                 ret = ttm_tt_bind(ttm, new_mem);
67                 if (unlikely(ret != 0))
68                         return ret;
69         }
70
71         *old_mem = *new_mem;
72         new_mem->mm_node = NULL;
73
74         return 0;
75 }
76 EXPORT_SYMBOL(ttm_bo_move_ttm);
77
78 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79 {
80         if (likely(man->io_reserve_fastpath))
81                 return 0;
82
83         if (interruptible)
84                 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86         mutex_lock(&man->io_reserve_mutex);
87         return 0;
88 }
89
90 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
91 {
92         if (likely(man->io_reserve_fastpath))
93                 return;
94
95         mutex_unlock(&man->io_reserve_mutex);
96 }
97
98 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99 {
100         struct ttm_buffer_object *bo;
101
102         if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103                 return -EAGAIN;
104
105         bo = list_first_entry(&man->io_reserve_lru,
106                               struct ttm_buffer_object,
107                               io_reserve_lru);
108         list_del_init(&bo->io_reserve_lru);
109         ttm_bo_unmap_virtual_locked(bo);
110
111         return 0;
112 }
113
114 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115                               struct ttm_mem_reg *mem)
116 {
117         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118         int ret = 0;
119
120         if (!bdev->driver->io_mem_reserve)
121                 return 0;
122         if (likely(man->io_reserve_fastpath))
123                 return bdev->driver->io_mem_reserve(bdev, mem);
124
125         if (bdev->driver->io_mem_reserve &&
126             mem->bus.io_reserved_count++ == 0) {
127 retry:
128                 ret = bdev->driver->io_mem_reserve(bdev, mem);
129                 if (ret == -EAGAIN) {
130                         ret = ttm_mem_io_evict(man);
131                         if (ret == 0)
132                                 goto retry;
133                 }
134         }
135         return ret;
136 }
137
138 static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139                             struct ttm_mem_reg *mem)
140 {
141         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143         if (likely(man->io_reserve_fastpath))
144                 return;
145
146         if (bdev->driver->io_mem_reserve &&
147             --mem->bus.io_reserved_count == 0 &&
148             bdev->driver->io_mem_free)
149                 bdev->driver->io_mem_free(bdev, mem);
150
151 }
152
153 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154 {
155         struct ttm_mem_reg *mem = &bo->mem;
156         int ret;
157
158         if (!mem->bus.io_reserved_vm) {
159                 struct ttm_mem_type_manager *man =
160                         &bo->bdev->man[mem->mem_type];
161
162                 ret = ttm_mem_io_reserve(bo->bdev, mem);
163                 if (unlikely(ret != 0))
164                         return ret;
165                 mem->bus.io_reserved_vm = true;
166                 if (man->use_io_reserve_lru)
167                         list_add_tail(&bo->io_reserve_lru,
168                                       &man->io_reserve_lru);
169         }
170         return 0;
171 }
172
173 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
174 {
175         struct ttm_mem_reg *mem = &bo->mem;
176
177         if (mem->bus.io_reserved_vm) {
178                 mem->bus.io_reserved_vm = false;
179                 list_del_init(&bo->io_reserve_lru);
180                 ttm_mem_io_free(bo->bdev, mem);
181         }
182 }
183
184 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
185                         void **virtual)
186 {
187         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
188         int ret;
189         void *addr;
190
191         *virtual = NULL;
192         (void) ttm_mem_io_lock(man, false);
193         ret = ttm_mem_io_reserve(bdev, mem);
194         ttm_mem_io_unlock(man);
195         if (ret || !mem->bus.is_iomem)
196                 return ret;
197
198         if (mem->bus.addr) {
199                 addr = mem->bus.addr;
200         } else {
201                 if (mem->placement & TTM_PL_FLAG_WC)
202                         addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
203                 else
204                         addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
205                 if (!addr) {
206                         (void) ttm_mem_io_lock(man, false);
207                         ttm_mem_io_free(bdev, mem);
208                         ttm_mem_io_unlock(man);
209                         return -ENOMEM;
210                 }
211         }
212         *virtual = addr;
213         return 0;
214 }
215
216 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
217                          void *virtual)
218 {
219         struct ttm_mem_type_manager *man;
220
221         man = &bdev->man[mem->mem_type];
222
223         if (virtual && mem->bus.addr == NULL)
224                 iounmap(virtual);
225         (void) ttm_mem_io_lock(man, false);
226         ttm_mem_io_free(bdev, mem);
227         ttm_mem_io_unlock(man);
228 }
229
230 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
231 {
232         uint32_t *dstP =
233             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
234         uint32_t *srcP =
235             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
236
237         int i;
238         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
239                 iowrite32(ioread32(srcP++), dstP++);
240         return 0;
241 }
242
243 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
244                                 unsigned long page,
245                                 pgprot_t prot)
246 {
247         struct page *d = ttm_tt_get_page(ttm, page);
248         void *dst;
249
250         if (!d)
251                 return -ENOMEM;
252
253         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
254
255 #ifdef CONFIG_X86
256         dst = kmap_atomic_prot(d, prot);
257 #else
258         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
259                 dst = vmap(&d, 1, 0, prot);
260         else
261                 dst = kmap(d);
262 #endif
263         if (!dst)
264                 return -ENOMEM;
265
266         memcpy_fromio(dst, src, PAGE_SIZE);
267
268 #ifdef CONFIG_X86
269         kunmap_atomic(dst);
270 #else
271         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
272                 vunmap(dst);
273         else
274                 kunmap(d);
275 #endif
276
277         return 0;
278 }
279
280 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
281                                 unsigned long page,
282                                 pgprot_t prot)
283 {
284         struct page *s = ttm_tt_get_page(ttm, page);
285         void *src;
286
287         if (!s)
288                 return -ENOMEM;
289
290         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
291 #ifdef CONFIG_X86
292         src = kmap_atomic_prot(s, prot);
293 #else
294         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
295                 src = vmap(&s, 1, 0, prot);
296         else
297                 src = kmap(s);
298 #endif
299         if (!src)
300                 return -ENOMEM;
301
302         memcpy_toio(dst, src, PAGE_SIZE);
303
304 #ifdef CONFIG_X86
305         kunmap_atomic(src);
306 #else
307         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
308                 vunmap(src);
309         else
310                 kunmap(s);
311 #endif
312
313         return 0;
314 }
315
316 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
317                        bool evict, bool no_wait_reserve, bool no_wait_gpu,
318                        struct ttm_mem_reg *new_mem)
319 {
320         struct ttm_bo_device *bdev = bo->bdev;
321         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
322         struct ttm_tt *ttm = bo->ttm;
323         struct ttm_mem_reg *old_mem = &bo->mem;
324         struct ttm_mem_reg old_copy = *old_mem;
325         void *old_iomap;
326         void *new_iomap;
327         int ret;
328         unsigned long i;
329         unsigned long page;
330         unsigned long add = 0;
331         int dir;
332
333         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
334         if (ret)
335                 return ret;
336         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
337         if (ret)
338                 goto out;
339
340         if (old_iomap == NULL && new_iomap == NULL)
341                 goto out2;
342         if (old_iomap == NULL && ttm == NULL)
343                 goto out2;
344
345         add = 0;
346         dir = 1;
347
348         if ((old_mem->mem_type == new_mem->mem_type) &&
349             (new_mem->start < old_mem->start + old_mem->size)) {
350                 dir = -1;
351                 add = new_mem->num_pages - 1;
352         }
353
354         for (i = 0; i < new_mem->num_pages; ++i) {
355                 page = i * dir + add;
356                 if (old_iomap == NULL) {
357                         pgprot_t prot = ttm_io_prot(old_mem->placement,
358                                                     PAGE_KERNEL);
359                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
360                                                    prot);
361                 } else if (new_iomap == NULL) {
362                         pgprot_t prot = ttm_io_prot(new_mem->placement,
363                                                     PAGE_KERNEL);
364                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
365                                                    prot);
366                 } else
367                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
368                 if (ret)
369                         goto out1;
370         }
371         mb();
372 out2:
373         old_copy = *old_mem;
374         *old_mem = *new_mem;
375         new_mem->mm_node = NULL;
376
377         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
378                 ttm_tt_unbind(ttm);
379                 ttm_tt_destroy(ttm);
380                 bo->ttm = NULL;
381         }
382
383 out1:
384         ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
385 out:
386         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
387         ttm_bo_mem_put(bo, &old_copy);
388         return ret;
389 }
390 EXPORT_SYMBOL(ttm_bo_move_memcpy);
391
392 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
393 {
394         kfree(bo);
395 }
396
397 /**
398  * ttm_buffer_object_transfer
399  *
400  * @bo: A pointer to a struct ttm_buffer_object.
401  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
402  * holding the data of @bo with the old placement.
403  *
404  * This is a utility function that may be called after an accelerated move
405  * has been scheduled. A new buffer object is created as a placeholder for
406  * the old data while it's being copied. When that buffer object is idle,
407  * it can be destroyed, releasing the space of the old placement.
408  * Returns:
409  * !0: Failure.
410  */
411
412 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
413                                       struct ttm_buffer_object **new_obj)
414 {
415         struct ttm_buffer_object *fbo;
416         struct ttm_bo_device *bdev = bo->bdev;
417         struct ttm_bo_driver *driver = bdev->driver;
418
419         fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
420         if (!fbo)
421                 return -ENOMEM;
422
423         *fbo = *bo;
424
425         /**
426          * Fix up members that we shouldn't copy directly:
427          * TODO: Explicit member copy would probably be better here.
428          */
429
430         init_waitqueue_head(&fbo->event_queue);
431         INIT_LIST_HEAD(&fbo->ddestroy);
432         INIT_LIST_HEAD(&fbo->lru);
433         INIT_LIST_HEAD(&fbo->swap);
434         INIT_LIST_HEAD(&fbo->io_reserve_lru);
435         fbo->vm_node = NULL;
436         atomic_set(&fbo->cpu_writers, 0);
437
438         fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
439         kref_init(&fbo->list_kref);
440         kref_init(&fbo->kref);
441         fbo->destroy = &ttm_transfered_destroy;
442
443         *new_obj = fbo;
444         return 0;
445 }
446
447 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
448 {
449 #if defined(__i386__) || defined(__x86_64__)
450         if (caching_flags & TTM_PL_FLAG_WC)
451                 tmp = pgprot_writecombine(tmp);
452         else if (boot_cpu_data.x86 > 3)
453                 tmp = pgprot_noncached(tmp);
454
455 #elif defined(__powerpc__)
456         if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
457                 pgprot_val(tmp) |= _PAGE_NO_CACHE;
458                 if (caching_flags & TTM_PL_FLAG_UNCACHED)
459                         pgprot_val(tmp) |= _PAGE_GUARDED;
460         }
461 #endif
462 #if defined(__ia64__)
463         if (caching_flags & TTM_PL_FLAG_WC)
464                 tmp = pgprot_writecombine(tmp);
465         else
466                 tmp = pgprot_noncached(tmp);
467 #endif
468 #if defined(__sparc__)
469         if (!(caching_flags & TTM_PL_FLAG_CACHED))
470                 tmp = pgprot_noncached(tmp);
471 #endif
472         return tmp;
473 }
474 EXPORT_SYMBOL(ttm_io_prot);
475
476 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
477                           unsigned long offset,
478                           unsigned long size,
479                           struct ttm_bo_kmap_obj *map)
480 {
481         struct ttm_mem_reg *mem = &bo->mem;
482
483         if (bo->mem.bus.addr) {
484                 map->bo_kmap_type = ttm_bo_map_premapped;
485                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
486         } else {
487                 map->bo_kmap_type = ttm_bo_map_iomap;
488                 if (mem->placement & TTM_PL_FLAG_WC)
489                         map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
490                                                   size);
491                 else
492                         map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
493                                                        size);
494         }
495         return (!map->virtual) ? -ENOMEM : 0;
496 }
497
498 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
499                            unsigned long start_page,
500                            unsigned long num_pages,
501                            struct ttm_bo_kmap_obj *map)
502 {
503         struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
504         struct ttm_tt *ttm = bo->ttm;
505         struct page *d;
506         int i;
507
508         BUG_ON(!ttm);
509         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
510                 /*
511                  * We're mapping a single page, and the desired
512                  * page protection is consistent with the bo.
513                  */
514
515                 map->bo_kmap_type = ttm_bo_map_kmap;
516                 map->page = ttm_tt_get_page(ttm, start_page);
517                 map->virtual = kmap(map->page);
518         } else {
519             /*
520              * Populate the part we're mapping;
521              */
522                 for (i = start_page; i < start_page + num_pages; ++i) {
523                         d = ttm_tt_get_page(ttm, i);
524                         if (!d)
525                                 return -ENOMEM;
526                 }
527
528                 /*
529                  * We need to use vmap to get the desired page protection
530                  * or to make the buffer object look contiguous.
531                  */
532                 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
533                         PAGE_KERNEL :
534                         ttm_io_prot(mem->placement, PAGE_KERNEL);
535                 map->bo_kmap_type = ttm_bo_map_vmap;
536                 map->virtual = vmap(ttm->pages + start_page, num_pages,
537                                     0, prot);
538         }
539         return (!map->virtual) ? -ENOMEM : 0;
540 }
541
542 int ttm_bo_kmap(struct ttm_buffer_object *bo,
543                 unsigned long start_page, unsigned long num_pages,
544                 struct ttm_bo_kmap_obj *map)
545 {
546         struct ttm_mem_type_manager *man =
547                 &bo->bdev->man[bo->mem.mem_type];
548         unsigned long offset, size;
549         int ret;
550
551         BUG_ON(!list_empty(&bo->swap));
552         map->virtual = NULL;
553         map->bo = bo;
554         if (num_pages > bo->num_pages)
555                 return -EINVAL;
556         if (start_page > bo->num_pages)
557                 return -EINVAL;
558 #if 0
559         if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
560                 return -EPERM;
561 #endif
562         (void) ttm_mem_io_lock(man, false);
563         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
564         ttm_mem_io_unlock(man);
565         if (ret)
566                 return ret;
567         if (!bo->mem.bus.is_iomem) {
568                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
569         } else {
570                 offset = start_page << PAGE_SHIFT;
571                 size = num_pages << PAGE_SHIFT;
572                 return ttm_bo_ioremap(bo, offset, size, map);
573         }
574 }
575 EXPORT_SYMBOL(ttm_bo_kmap);
576
577 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
578 {
579         struct ttm_buffer_object *bo = map->bo;
580         struct ttm_mem_type_manager *man =
581                 &bo->bdev->man[bo->mem.mem_type];
582
583         if (!map->virtual)
584                 return;
585         switch (map->bo_kmap_type) {
586         case ttm_bo_map_iomap:
587                 iounmap(map->virtual);
588                 break;
589         case ttm_bo_map_vmap:
590                 vunmap(map->virtual);
591                 break;
592         case ttm_bo_map_kmap:
593                 kunmap(map->page);
594                 break;
595         case ttm_bo_map_premapped:
596                 break;
597         default:
598                 BUG();
599         }
600         (void) ttm_mem_io_lock(man, false);
601         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
602         ttm_mem_io_unlock(man);
603         map->virtual = NULL;
604         map->page = NULL;
605 }
606 EXPORT_SYMBOL(ttm_bo_kunmap);
607
608 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
609                               void *sync_obj,
610                               void *sync_obj_arg,
611                               bool evict, bool no_wait_reserve,
612                               bool no_wait_gpu,
613                               struct ttm_mem_reg *new_mem)
614 {
615         struct ttm_bo_device *bdev = bo->bdev;
616         struct ttm_bo_driver *driver = bdev->driver;
617         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
618         struct ttm_mem_reg *old_mem = &bo->mem;
619         int ret;
620         struct ttm_buffer_object *ghost_obj;
621         void *tmp_obj = NULL;
622
623         spin_lock(&bdev->fence_lock);
624         if (bo->sync_obj) {
625                 tmp_obj = bo->sync_obj;
626                 bo->sync_obj = NULL;
627         }
628         bo->sync_obj = driver->sync_obj_ref(sync_obj);
629         bo->sync_obj_arg = sync_obj_arg;
630         if (evict) {
631                 ret = ttm_bo_wait(bo, false, false, false);
632                 spin_unlock(&bdev->fence_lock);
633                 if (tmp_obj)
634                         driver->sync_obj_unref(&tmp_obj);
635                 if (ret)
636                         return ret;
637
638                 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
639                     (bo->ttm != NULL)) {
640                         ttm_tt_unbind(bo->ttm);
641                         ttm_tt_destroy(bo->ttm);
642                         bo->ttm = NULL;
643                 }
644                 ttm_bo_free_old_node(bo);
645         } else {
646                 /**
647                  * This should help pipeline ordinary buffer moves.
648                  *
649                  * Hang old buffer memory on a new buffer object,
650                  * and leave it to be released when the GPU
651                  * operation has completed.
652                  */
653
654                 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
655                 spin_unlock(&bdev->fence_lock);
656                 if (tmp_obj)
657                         driver->sync_obj_unref(&tmp_obj);
658
659                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
660                 if (ret)
661                         return ret;
662
663                 /**
664                  * If we're not moving to fixed memory, the TTM object
665                  * needs to stay alive. Otherwhise hang it on the ghost
666                  * bo to be unbound and destroyed.
667                  */
668
669                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
670                         ghost_obj->ttm = NULL;
671                 else
672                         bo->ttm = NULL;
673
674                 ttm_bo_unreserve(ghost_obj);
675                 ttm_bo_unref(&ghost_obj);
676         }
677
678         *old_mem = *new_mem;
679         new_mem->mm_node = NULL;
680
681         return 0;
682 }
683 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);