Merge branch 'for_3.2/gpio-cleanup' of git://gitorious.org/khilman/linux-omap-pm...
[pandora-kernel.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/mm.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
40 #include <linux/atomic.h>
41
42 #define TTM_ASSERT_LOCKED(param)
43 #define TTM_DEBUG(fmt, arg...)
44 #define TTM_BO_HASH_ORDER 13
45
46 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
47 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48 static void ttm_bo_global_kobj_release(struct kobject *kobj);
49
50 static struct attribute ttm_bo_count = {
51         .name = "bo_count",
52         .mode = S_IRUGO
53 };
54
55 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
56 {
57         int i;
58
59         for (i = 0; i <= TTM_PL_PRIV5; i++)
60                 if (flags & (1 << i)) {
61                         *mem_type = i;
62                         return 0;
63                 }
64         return -EINVAL;
65 }
66
67 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
68 {
69         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
70
71         printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
72         printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
73         printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
74         printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
75         printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
76         printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
77                 man->available_caching);
78         printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
79                 man->default_caching);
80         if (mem_type != TTM_PL_SYSTEM)
81                 (*man->func->debug)(man, TTM_PFX);
82 }
83
84 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
85                                         struct ttm_placement *placement)
86 {
87         int i, ret, mem_type;
88
89         printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
90                 bo, bo->mem.num_pages, bo->mem.size >> 10,
91                 bo->mem.size >> 20);
92         for (i = 0; i < placement->num_placement; i++) {
93                 ret = ttm_mem_type_from_flags(placement->placement[i],
94                                                 &mem_type);
95                 if (ret)
96                         return;
97                 printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
98                         i, placement->placement[i], mem_type);
99                 ttm_mem_type_debug(bo->bdev, mem_type);
100         }
101 }
102
103 static ssize_t ttm_bo_global_show(struct kobject *kobj,
104                                   struct attribute *attr,
105                                   char *buffer)
106 {
107         struct ttm_bo_global *glob =
108                 container_of(kobj, struct ttm_bo_global, kobj);
109
110         return snprintf(buffer, PAGE_SIZE, "%lu\n",
111                         (unsigned long) atomic_read(&glob->bo_count));
112 }
113
114 static struct attribute *ttm_bo_global_attrs[] = {
115         &ttm_bo_count,
116         NULL
117 };
118
119 static const struct sysfs_ops ttm_bo_global_ops = {
120         .show = &ttm_bo_global_show
121 };
122
123 static struct kobj_type ttm_bo_glob_kobj_type  = {
124         .release = &ttm_bo_global_kobj_release,
125         .sysfs_ops = &ttm_bo_global_ops,
126         .default_attrs = ttm_bo_global_attrs
127 };
128
129
130 static inline uint32_t ttm_bo_type_flags(unsigned type)
131 {
132         return 1 << (type);
133 }
134
135 static void ttm_bo_release_list(struct kref *list_kref)
136 {
137         struct ttm_buffer_object *bo =
138             container_of(list_kref, struct ttm_buffer_object, list_kref);
139         struct ttm_bo_device *bdev = bo->bdev;
140
141         BUG_ON(atomic_read(&bo->list_kref.refcount));
142         BUG_ON(atomic_read(&bo->kref.refcount));
143         BUG_ON(atomic_read(&bo->cpu_writers));
144         BUG_ON(bo->sync_obj != NULL);
145         BUG_ON(bo->mem.mm_node != NULL);
146         BUG_ON(!list_empty(&bo->lru));
147         BUG_ON(!list_empty(&bo->ddestroy));
148
149         if (bo->ttm)
150                 ttm_tt_destroy(bo->ttm);
151         atomic_dec(&bo->glob->bo_count);
152         if (bo->destroy)
153                 bo->destroy(bo);
154         else {
155                 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
156                 kfree(bo);
157         }
158 }
159
160 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
161 {
162         if (interruptible) {
163                 return wait_event_interruptible(bo->event_queue,
164                                                atomic_read(&bo->reserved) == 0);
165         } else {
166                 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
167                 return 0;
168         }
169 }
170 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
171
172 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
173 {
174         struct ttm_bo_device *bdev = bo->bdev;
175         struct ttm_mem_type_manager *man;
176
177         BUG_ON(!atomic_read(&bo->reserved));
178
179         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
180
181                 BUG_ON(!list_empty(&bo->lru));
182
183                 man = &bdev->man[bo->mem.mem_type];
184                 list_add_tail(&bo->lru, &man->lru);
185                 kref_get(&bo->list_kref);
186
187                 if (bo->ttm != NULL) {
188                         list_add_tail(&bo->swap, &bo->glob->swap_lru);
189                         kref_get(&bo->list_kref);
190                 }
191         }
192 }
193
194 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
195 {
196         int put_count = 0;
197
198         if (!list_empty(&bo->swap)) {
199                 list_del_init(&bo->swap);
200                 ++put_count;
201         }
202         if (!list_empty(&bo->lru)) {
203                 list_del_init(&bo->lru);
204                 ++put_count;
205         }
206
207         /*
208          * TODO: Add a driver hook to delete from
209          * driver-specific LRU's here.
210          */
211
212         return put_count;
213 }
214
215 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
216                           bool interruptible,
217                           bool no_wait, bool use_sequence, uint32_t sequence)
218 {
219         struct ttm_bo_global *glob = bo->glob;
220         int ret;
221
222         while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
223                 /**
224                  * Deadlock avoidance for multi-bo reserving.
225                  */
226                 if (use_sequence && bo->seq_valid) {
227                         /**
228                          * We've already reserved this one.
229                          */
230                         if (unlikely(sequence == bo->val_seq))
231                                 return -EDEADLK;
232                         /**
233                          * Already reserved by a thread that will not back
234                          * off for us. We need to back off.
235                          */
236                         if (unlikely(sequence - bo->val_seq < (1 << 31)))
237                                 return -EAGAIN;
238                 }
239
240                 if (no_wait)
241                         return -EBUSY;
242
243                 spin_unlock(&glob->lru_lock);
244                 ret = ttm_bo_wait_unreserved(bo, interruptible);
245                 spin_lock(&glob->lru_lock);
246
247                 if (unlikely(ret))
248                         return ret;
249         }
250
251         if (use_sequence) {
252                 /**
253                  * Wake up waiters that may need to recheck for deadlock,
254                  * if we decreased the sequence number.
255                  */
256                 if (unlikely((bo->val_seq - sequence < (1 << 31))
257                              || !bo->seq_valid))
258                         wake_up_all(&bo->event_queue);
259
260                 bo->val_seq = sequence;
261                 bo->seq_valid = true;
262         } else {
263                 bo->seq_valid = false;
264         }
265
266         return 0;
267 }
268 EXPORT_SYMBOL(ttm_bo_reserve);
269
270 static void ttm_bo_ref_bug(struct kref *list_kref)
271 {
272         BUG();
273 }
274
275 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
276                          bool never_free)
277 {
278         kref_sub(&bo->list_kref, count,
279                  (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
280 }
281
282 int ttm_bo_reserve(struct ttm_buffer_object *bo,
283                    bool interruptible,
284                    bool no_wait, bool use_sequence, uint32_t sequence)
285 {
286         struct ttm_bo_global *glob = bo->glob;
287         int put_count = 0;
288         int ret;
289
290         spin_lock(&glob->lru_lock);
291         ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
292                                     sequence);
293         if (likely(ret == 0))
294                 put_count = ttm_bo_del_from_lru(bo);
295         spin_unlock(&glob->lru_lock);
296
297         ttm_bo_list_ref_sub(bo, put_count, true);
298
299         return ret;
300 }
301
302 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
303 {
304         ttm_bo_add_to_lru(bo);
305         atomic_set(&bo->reserved, 0);
306         wake_up_all(&bo->event_queue);
307 }
308
309 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
310 {
311         struct ttm_bo_global *glob = bo->glob;
312
313         spin_lock(&glob->lru_lock);
314         ttm_bo_unreserve_locked(bo);
315         spin_unlock(&glob->lru_lock);
316 }
317 EXPORT_SYMBOL(ttm_bo_unreserve);
318
319 /*
320  * Call bo->mutex locked.
321  */
322 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
323 {
324         struct ttm_bo_device *bdev = bo->bdev;
325         struct ttm_bo_global *glob = bo->glob;
326         int ret = 0;
327         uint32_t page_flags = 0;
328
329         TTM_ASSERT_LOCKED(&bo->mutex);
330         bo->ttm = NULL;
331
332         if (bdev->need_dma32)
333                 page_flags |= TTM_PAGE_FLAG_DMA32;
334
335         switch (bo->type) {
336         case ttm_bo_type_device:
337                 if (zero_alloc)
338                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
339         case ttm_bo_type_kernel:
340                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
341                                         page_flags, glob->dummy_read_page);
342                 if (unlikely(bo->ttm == NULL))
343                         ret = -ENOMEM;
344                 break;
345         case ttm_bo_type_user:
346                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
347                                         page_flags | TTM_PAGE_FLAG_USER,
348                                         glob->dummy_read_page);
349                 if (unlikely(bo->ttm == NULL)) {
350                         ret = -ENOMEM;
351                         break;
352                 }
353
354                 ret = ttm_tt_set_user(bo->ttm, current,
355                                       bo->buffer_start, bo->num_pages);
356                 if (unlikely(ret != 0)) {
357                         ttm_tt_destroy(bo->ttm);
358                         bo->ttm = NULL;
359                 }
360                 break;
361         default:
362                 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
363                 ret = -EINVAL;
364                 break;
365         }
366
367         return ret;
368 }
369
370 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
371                                   struct ttm_mem_reg *mem,
372                                   bool evict, bool interruptible,
373                                   bool no_wait_reserve, bool no_wait_gpu)
374 {
375         struct ttm_bo_device *bdev = bo->bdev;
376         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
377         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
378         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
379         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
380         int ret = 0;
381
382         if (old_is_pci || new_is_pci ||
383             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
384                 ret = ttm_mem_io_lock(old_man, true);
385                 if (unlikely(ret != 0))
386                         goto out_err;
387                 ttm_bo_unmap_virtual_locked(bo);
388                 ttm_mem_io_unlock(old_man);
389         }
390
391         /*
392          * Create and bind a ttm if required.
393          */
394
395         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
396                 if (bo->ttm == NULL) {
397                         ret = ttm_bo_add_ttm(bo, false);
398                         if (ret)
399                                 goto out_err;
400                 }
401
402                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
403                 if (ret)
404                         goto out_err;
405
406                 if (mem->mem_type != TTM_PL_SYSTEM) {
407                         ret = ttm_tt_bind(bo->ttm, mem);
408                         if (ret)
409                                 goto out_err;
410                 }
411
412                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
413                         if (bdev->driver->move_notify)
414                                 bdev->driver->move_notify(bo, mem);
415                         bo->mem = *mem;
416                         mem->mm_node = NULL;
417                         goto moved;
418                 }
419         }
420
421         if (bdev->driver->move_notify)
422                 bdev->driver->move_notify(bo, mem);
423
424         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
425             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
426                 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
427         else if (bdev->driver->move)
428                 ret = bdev->driver->move(bo, evict, interruptible,
429                                          no_wait_reserve, no_wait_gpu, mem);
430         else
431                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
432
433         if (ret)
434                 goto out_err;
435
436 moved:
437         if (bo->evicted) {
438                 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
439                 if (ret)
440                         printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
441                 bo->evicted = false;
442         }
443
444         if (bo->mem.mm_node) {
445                 bo->offset = (bo->mem.start << PAGE_SHIFT) +
446                     bdev->man[bo->mem.mem_type].gpu_offset;
447                 bo->cur_placement = bo->mem.placement;
448         } else
449                 bo->offset = 0;
450
451         return 0;
452
453 out_err:
454         new_man = &bdev->man[bo->mem.mem_type];
455         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
456                 ttm_tt_unbind(bo->ttm);
457                 ttm_tt_destroy(bo->ttm);
458                 bo->ttm = NULL;
459         }
460
461         return ret;
462 }
463
464 /**
465  * Call bo::reserved.
466  * Will release GPU memory type usage on destruction.
467  * This is the place to put in driver specific hooks to release
468  * driver private resources.
469  * Will release the bo::reserved lock.
470  */
471
472 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
473 {
474         if (bo->ttm) {
475                 ttm_tt_unbind(bo->ttm);
476                 ttm_tt_destroy(bo->ttm);
477                 bo->ttm = NULL;
478         }
479         ttm_bo_mem_put(bo, &bo->mem);
480
481         atomic_set(&bo->reserved, 0);
482
483         /*
484          * Make processes trying to reserve really pick it up.
485          */
486         smp_mb__after_atomic_dec();
487         wake_up_all(&bo->event_queue);
488 }
489
490 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
491 {
492         struct ttm_bo_device *bdev = bo->bdev;
493         struct ttm_bo_global *glob = bo->glob;
494         struct ttm_bo_driver *driver;
495         void *sync_obj = NULL;
496         void *sync_obj_arg;
497         int put_count;
498         int ret;
499
500         spin_lock(&bdev->fence_lock);
501         (void) ttm_bo_wait(bo, false, false, true);
502         if (!bo->sync_obj) {
503
504                 spin_lock(&glob->lru_lock);
505
506                 /**
507                  * Lock inversion between bo:reserve and bdev::fence_lock here,
508                  * but that's OK, since we're only trylocking.
509                  */
510
511                 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
512
513                 if (unlikely(ret == -EBUSY))
514                         goto queue;
515
516                 spin_unlock(&bdev->fence_lock);
517                 put_count = ttm_bo_del_from_lru(bo);
518
519                 spin_unlock(&glob->lru_lock);
520                 ttm_bo_cleanup_memtype_use(bo);
521
522                 ttm_bo_list_ref_sub(bo, put_count, true);
523
524                 return;
525         } else {
526                 spin_lock(&glob->lru_lock);
527         }
528 queue:
529         driver = bdev->driver;
530         if (bo->sync_obj)
531                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
532         sync_obj_arg = bo->sync_obj_arg;
533
534         kref_get(&bo->list_kref);
535         list_add_tail(&bo->ddestroy, &bdev->ddestroy);
536         spin_unlock(&glob->lru_lock);
537         spin_unlock(&bdev->fence_lock);
538
539         if (sync_obj) {
540                 driver->sync_obj_flush(sync_obj, sync_obj_arg);
541                 driver->sync_obj_unref(&sync_obj);
542         }
543         schedule_delayed_work(&bdev->wq,
544                               ((HZ / 100) < 1) ? 1 : HZ / 100);
545 }
546
547 /**
548  * function ttm_bo_cleanup_refs
549  * If bo idle, remove from delayed- and lru lists, and unref.
550  * If not idle, do nothing.
551  *
552  * @interruptible         Any sleeps should occur interruptibly.
553  * @no_wait_reserve       Never wait for reserve. Return -EBUSY instead.
554  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
555  */
556
557 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
558                                bool interruptible,
559                                bool no_wait_reserve,
560                                bool no_wait_gpu)
561 {
562         struct ttm_bo_device *bdev = bo->bdev;
563         struct ttm_bo_global *glob = bo->glob;
564         int put_count;
565         int ret = 0;
566
567 retry:
568         spin_lock(&bdev->fence_lock);
569         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
570         spin_unlock(&bdev->fence_lock);
571
572         if (unlikely(ret != 0))
573                 return ret;
574
575         spin_lock(&glob->lru_lock);
576         ret = ttm_bo_reserve_locked(bo, interruptible,
577                                     no_wait_reserve, false, 0);
578
579         if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
580                 spin_unlock(&glob->lru_lock);
581                 return ret;
582         }
583
584         /**
585          * We can re-check for sync object without taking
586          * the bo::lock since setting the sync object requires
587          * also bo::reserved. A busy object at this point may
588          * be caused by another thread recently starting an accelerated
589          * eviction.
590          */
591
592         if (unlikely(bo->sync_obj)) {
593                 atomic_set(&bo->reserved, 0);
594                 wake_up_all(&bo->event_queue);
595                 spin_unlock(&glob->lru_lock);
596                 goto retry;
597         }
598
599         put_count = ttm_bo_del_from_lru(bo);
600         list_del_init(&bo->ddestroy);
601         ++put_count;
602
603         spin_unlock(&glob->lru_lock);
604         ttm_bo_cleanup_memtype_use(bo);
605
606         ttm_bo_list_ref_sub(bo, put_count, true);
607
608         return 0;
609 }
610
611 /**
612  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
613  * encountered buffers.
614  */
615
616 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
617 {
618         struct ttm_bo_global *glob = bdev->glob;
619         struct ttm_buffer_object *entry = NULL;
620         int ret = 0;
621
622         spin_lock(&glob->lru_lock);
623         if (list_empty(&bdev->ddestroy))
624                 goto out_unlock;
625
626         entry = list_first_entry(&bdev->ddestroy,
627                 struct ttm_buffer_object, ddestroy);
628         kref_get(&entry->list_kref);
629
630         for (;;) {
631                 struct ttm_buffer_object *nentry = NULL;
632
633                 if (entry->ddestroy.next != &bdev->ddestroy) {
634                         nentry = list_first_entry(&entry->ddestroy,
635                                 struct ttm_buffer_object, ddestroy);
636                         kref_get(&nentry->list_kref);
637                 }
638
639                 spin_unlock(&glob->lru_lock);
640                 ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
641                                           !remove_all);
642                 kref_put(&entry->list_kref, ttm_bo_release_list);
643                 entry = nentry;
644
645                 if (ret || !entry)
646                         goto out;
647
648                 spin_lock(&glob->lru_lock);
649                 if (list_empty(&entry->ddestroy))
650                         break;
651         }
652
653 out_unlock:
654         spin_unlock(&glob->lru_lock);
655 out:
656         if (entry)
657                 kref_put(&entry->list_kref, ttm_bo_release_list);
658         return ret;
659 }
660
661 static void ttm_bo_delayed_workqueue(struct work_struct *work)
662 {
663         struct ttm_bo_device *bdev =
664             container_of(work, struct ttm_bo_device, wq.work);
665
666         if (ttm_bo_delayed_delete(bdev, false)) {
667                 schedule_delayed_work(&bdev->wq,
668                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
669         }
670 }
671
672 static void ttm_bo_release(struct kref *kref)
673 {
674         struct ttm_buffer_object *bo =
675             container_of(kref, struct ttm_buffer_object, kref);
676         struct ttm_bo_device *bdev = bo->bdev;
677         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
678
679         if (likely(bo->vm_node != NULL)) {
680                 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
681                 drm_mm_put_block(bo->vm_node);
682                 bo->vm_node = NULL;
683         }
684         write_unlock(&bdev->vm_lock);
685         ttm_mem_io_lock(man, false);
686         ttm_mem_io_free_vm(bo);
687         ttm_mem_io_unlock(man);
688         ttm_bo_cleanup_refs_or_queue(bo);
689         kref_put(&bo->list_kref, ttm_bo_release_list);
690         write_lock(&bdev->vm_lock);
691 }
692
693 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
694 {
695         struct ttm_buffer_object *bo = *p_bo;
696         struct ttm_bo_device *bdev = bo->bdev;
697
698         *p_bo = NULL;
699         write_lock(&bdev->vm_lock);
700         kref_put(&bo->kref, ttm_bo_release);
701         write_unlock(&bdev->vm_lock);
702 }
703 EXPORT_SYMBOL(ttm_bo_unref);
704
705 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
706 {
707         return cancel_delayed_work_sync(&bdev->wq);
708 }
709 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
710
711 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
712 {
713         if (resched)
714                 schedule_delayed_work(&bdev->wq,
715                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
716 }
717 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
718
719 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
720                         bool no_wait_reserve, bool no_wait_gpu)
721 {
722         struct ttm_bo_device *bdev = bo->bdev;
723         struct ttm_mem_reg evict_mem;
724         struct ttm_placement placement;
725         int ret = 0;
726
727         spin_lock(&bdev->fence_lock);
728         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
729         spin_unlock(&bdev->fence_lock);
730
731         if (unlikely(ret != 0)) {
732                 if (ret != -ERESTARTSYS) {
733                         printk(KERN_ERR TTM_PFX
734                                "Failed to expire sync object before "
735                                "buffer eviction.\n");
736                 }
737                 goto out;
738         }
739
740         BUG_ON(!atomic_read(&bo->reserved));
741
742         evict_mem = bo->mem;
743         evict_mem.mm_node = NULL;
744         evict_mem.bus.io_reserved_vm = false;
745         evict_mem.bus.io_reserved_count = 0;
746
747         placement.fpfn = 0;
748         placement.lpfn = 0;
749         placement.num_placement = 0;
750         placement.num_busy_placement = 0;
751         bdev->driver->evict_flags(bo, &placement);
752         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
753                                 no_wait_reserve, no_wait_gpu);
754         if (ret) {
755                 if (ret != -ERESTARTSYS) {
756                         printk(KERN_ERR TTM_PFX
757                                "Failed to find memory space for "
758                                "buffer 0x%p eviction.\n", bo);
759                         ttm_bo_mem_space_debug(bo, &placement);
760                 }
761                 goto out;
762         }
763
764         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
765                                      no_wait_reserve, no_wait_gpu);
766         if (ret) {
767                 if (ret != -ERESTARTSYS)
768                         printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
769                 ttm_bo_mem_put(bo, &evict_mem);
770                 goto out;
771         }
772         bo->evicted = true;
773 out:
774         return ret;
775 }
776
777 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
778                                 uint32_t mem_type,
779                                 bool interruptible, bool no_wait_reserve,
780                                 bool no_wait_gpu)
781 {
782         struct ttm_bo_global *glob = bdev->glob;
783         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
784         struct ttm_buffer_object *bo;
785         int ret, put_count = 0;
786
787 retry:
788         spin_lock(&glob->lru_lock);
789         if (list_empty(&man->lru)) {
790                 spin_unlock(&glob->lru_lock);
791                 return -EBUSY;
792         }
793
794         bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
795         kref_get(&bo->list_kref);
796
797         if (!list_empty(&bo->ddestroy)) {
798                 spin_unlock(&glob->lru_lock);
799                 ret = ttm_bo_cleanup_refs(bo, interruptible,
800                                           no_wait_reserve, no_wait_gpu);
801                 kref_put(&bo->list_kref, ttm_bo_release_list);
802
803                 if (likely(ret == 0 || ret == -ERESTARTSYS))
804                         return ret;
805
806                 goto retry;
807         }
808
809         ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
810
811         if (unlikely(ret == -EBUSY)) {
812                 spin_unlock(&glob->lru_lock);
813                 if (likely(!no_wait_gpu))
814                         ret = ttm_bo_wait_unreserved(bo, interruptible);
815
816                 kref_put(&bo->list_kref, ttm_bo_release_list);
817
818                 /**
819                  * We *need* to retry after releasing the lru lock.
820                  */
821
822                 if (unlikely(ret != 0))
823                         return ret;
824                 goto retry;
825         }
826
827         put_count = ttm_bo_del_from_lru(bo);
828         spin_unlock(&glob->lru_lock);
829
830         BUG_ON(ret != 0);
831
832         ttm_bo_list_ref_sub(bo, put_count, true);
833
834         ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
835         ttm_bo_unreserve(bo);
836
837         kref_put(&bo->list_kref, ttm_bo_release_list);
838         return ret;
839 }
840
841 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
842 {
843         struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
844
845         if (mem->mm_node)
846                 (*man->func->put_node)(man, mem);
847 }
848 EXPORT_SYMBOL(ttm_bo_mem_put);
849
850 /**
851  * Repeatedly evict memory from the LRU for @mem_type until we create enough
852  * space, or we've evicted everything and there isn't enough space.
853  */
854 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
855                                         uint32_t mem_type,
856                                         struct ttm_placement *placement,
857                                         struct ttm_mem_reg *mem,
858                                         bool interruptible,
859                                         bool no_wait_reserve,
860                                         bool no_wait_gpu)
861 {
862         struct ttm_bo_device *bdev = bo->bdev;
863         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
864         int ret;
865
866         do {
867                 ret = (*man->func->get_node)(man, bo, placement, mem);
868                 if (unlikely(ret != 0))
869                         return ret;
870                 if (mem->mm_node)
871                         break;
872                 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
873                                                 no_wait_reserve, no_wait_gpu);
874                 if (unlikely(ret != 0))
875                         return ret;
876         } while (1);
877         if (mem->mm_node == NULL)
878                 return -ENOMEM;
879         mem->mem_type = mem_type;
880         return 0;
881 }
882
883 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
884                                       uint32_t cur_placement,
885                                       uint32_t proposed_placement)
886 {
887         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
888         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
889
890         /**
891          * Keep current caching if possible.
892          */
893
894         if ((cur_placement & caching) != 0)
895                 result |= (cur_placement & caching);
896         else if ((man->default_caching & caching) != 0)
897                 result |= man->default_caching;
898         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
899                 result |= TTM_PL_FLAG_CACHED;
900         else if ((TTM_PL_FLAG_WC & caching) != 0)
901                 result |= TTM_PL_FLAG_WC;
902         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
903                 result |= TTM_PL_FLAG_UNCACHED;
904
905         return result;
906 }
907
908 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
909                                  bool disallow_fixed,
910                                  uint32_t mem_type,
911                                  uint32_t proposed_placement,
912                                  uint32_t *masked_placement)
913 {
914         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
915
916         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
917                 return false;
918
919         if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
920                 return false;
921
922         if ((proposed_placement & man->available_caching) == 0)
923                 return false;
924
925         cur_flags |= (proposed_placement & man->available_caching);
926
927         *masked_placement = cur_flags;
928         return true;
929 }
930
931 /**
932  * Creates space for memory region @mem according to its type.
933  *
934  * This function first searches for free space in compatible memory types in
935  * the priority order defined by the driver.  If free space isn't found, then
936  * ttm_bo_mem_force_space is attempted in priority order to evict and find
937  * space.
938  */
939 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
940                         struct ttm_placement *placement,
941                         struct ttm_mem_reg *mem,
942                         bool interruptible, bool no_wait_reserve,
943                         bool no_wait_gpu)
944 {
945         struct ttm_bo_device *bdev = bo->bdev;
946         struct ttm_mem_type_manager *man;
947         uint32_t mem_type = TTM_PL_SYSTEM;
948         uint32_t cur_flags = 0;
949         bool type_found = false;
950         bool type_ok = false;
951         bool has_erestartsys = false;
952         int i, ret;
953
954         mem->mm_node = NULL;
955         for (i = 0; i < placement->num_placement; ++i) {
956                 ret = ttm_mem_type_from_flags(placement->placement[i],
957                                                 &mem_type);
958                 if (ret)
959                         return ret;
960                 man = &bdev->man[mem_type];
961
962                 type_ok = ttm_bo_mt_compatible(man,
963                                                 bo->type == ttm_bo_type_user,
964                                                 mem_type,
965                                                 placement->placement[i],
966                                                 &cur_flags);
967
968                 if (!type_ok)
969                         continue;
970
971                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
972                                                   cur_flags);
973                 /*
974                  * Use the access and other non-mapping-related flag bits from
975                  * the memory placement flags to the current flags
976                  */
977                 ttm_flag_masked(&cur_flags, placement->placement[i],
978                                 ~TTM_PL_MASK_MEMTYPE);
979
980                 if (mem_type == TTM_PL_SYSTEM)
981                         break;
982
983                 if (man->has_type && man->use_type) {
984                         type_found = true;
985                         ret = (*man->func->get_node)(man, bo, placement, mem);
986                         if (unlikely(ret))
987                                 return ret;
988                 }
989                 if (mem->mm_node)
990                         break;
991         }
992
993         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
994                 mem->mem_type = mem_type;
995                 mem->placement = cur_flags;
996                 return 0;
997         }
998
999         if (!type_found)
1000                 return -EINVAL;
1001
1002         for (i = 0; i < placement->num_busy_placement; ++i) {
1003                 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1004                                                 &mem_type);
1005                 if (ret)
1006                         return ret;
1007                 man = &bdev->man[mem_type];
1008                 if (!man->has_type)
1009                         continue;
1010                 if (!ttm_bo_mt_compatible(man,
1011                                                 bo->type == ttm_bo_type_user,
1012                                                 mem_type,
1013                                                 placement->busy_placement[i],
1014                                                 &cur_flags))
1015                         continue;
1016
1017                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1018                                                   cur_flags);
1019                 /*
1020                  * Use the access and other non-mapping-related flag bits from
1021                  * the memory placement flags to the current flags
1022                  */
1023                 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1024                                 ~TTM_PL_MASK_MEMTYPE);
1025
1026
1027                 if (mem_type == TTM_PL_SYSTEM) {
1028                         mem->mem_type = mem_type;
1029                         mem->placement = cur_flags;
1030                         mem->mm_node = NULL;
1031                         return 0;
1032                 }
1033
1034                 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1035                                                 interruptible, no_wait_reserve, no_wait_gpu);
1036                 if (ret == 0 && mem->mm_node) {
1037                         mem->placement = cur_flags;
1038                         return 0;
1039                 }
1040                 if (ret == -ERESTARTSYS)
1041                         has_erestartsys = true;
1042         }
1043         ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1044         return ret;
1045 }
1046 EXPORT_SYMBOL(ttm_bo_mem_space);
1047
1048 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1049 {
1050         if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1051                 return -EBUSY;
1052
1053         return wait_event_interruptible(bo->event_queue,
1054                                         atomic_read(&bo->cpu_writers) == 0);
1055 }
1056 EXPORT_SYMBOL(ttm_bo_wait_cpu);
1057
1058 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1059                         struct ttm_placement *placement,
1060                         bool interruptible, bool no_wait_reserve,
1061                         bool no_wait_gpu)
1062 {
1063         int ret = 0;
1064         struct ttm_mem_reg mem;
1065         struct ttm_bo_device *bdev = bo->bdev;
1066
1067         BUG_ON(!atomic_read(&bo->reserved));
1068
1069         /*
1070          * FIXME: It's possible to pipeline buffer moves.
1071          * Have the driver move function wait for idle when necessary,
1072          * instead of doing it here.
1073          */
1074         spin_lock(&bdev->fence_lock);
1075         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1076         spin_unlock(&bdev->fence_lock);
1077         if (ret)
1078                 return ret;
1079         mem.num_pages = bo->num_pages;
1080         mem.size = mem.num_pages << PAGE_SHIFT;
1081         mem.page_alignment = bo->mem.page_alignment;
1082         mem.bus.io_reserved_vm = false;
1083         mem.bus.io_reserved_count = 0;
1084         /*
1085          * Determine where to move the buffer.
1086          */
1087         ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1088         if (ret)
1089                 goto out_unlock;
1090         ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1091 out_unlock:
1092         if (ret && mem.mm_node)
1093                 ttm_bo_mem_put(bo, &mem);
1094         return ret;
1095 }
1096
1097 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1098                              struct ttm_mem_reg *mem)
1099 {
1100         int i;
1101
1102         if (mem->mm_node && placement->lpfn != 0 &&
1103             (mem->start < placement->fpfn ||
1104              mem->start + mem->num_pages > placement->lpfn))
1105                 return -1;
1106
1107         for (i = 0; i < placement->num_placement; i++) {
1108                 if ((placement->placement[i] & mem->placement &
1109                         TTM_PL_MASK_CACHING) &&
1110                         (placement->placement[i] & mem->placement &
1111                         TTM_PL_MASK_MEM))
1112                         return i;
1113         }
1114         return -1;
1115 }
1116
1117 int ttm_bo_validate(struct ttm_buffer_object *bo,
1118                         struct ttm_placement *placement,
1119                         bool interruptible, bool no_wait_reserve,
1120                         bool no_wait_gpu)
1121 {
1122         int ret;
1123
1124         BUG_ON(!atomic_read(&bo->reserved));
1125         /* Check that range is valid */
1126         if (placement->lpfn || placement->fpfn)
1127                 if (placement->fpfn > placement->lpfn ||
1128                         (placement->lpfn - placement->fpfn) < bo->num_pages)
1129                         return -EINVAL;
1130         /*
1131          * Check whether we need to move buffer.
1132          */
1133         ret = ttm_bo_mem_compat(placement, &bo->mem);
1134         if (ret < 0) {
1135                 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1136                 if (ret)
1137                         return ret;
1138         } else {
1139                 /*
1140                  * Use the access and other non-mapping-related flag bits from
1141                  * the compatible memory placement flags to the active flags
1142                  */
1143                 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1144                                 ~TTM_PL_MASK_MEMTYPE);
1145         }
1146         /*
1147          * We might need to add a TTM.
1148          */
1149         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1150                 ret = ttm_bo_add_ttm(bo, true);
1151                 if (ret)
1152                         return ret;
1153         }
1154         return 0;
1155 }
1156 EXPORT_SYMBOL(ttm_bo_validate);
1157
1158 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1159                                 struct ttm_placement *placement)
1160 {
1161         BUG_ON((placement->fpfn || placement->lpfn) &&
1162                (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1163
1164         return 0;
1165 }
1166
1167 int ttm_bo_init(struct ttm_bo_device *bdev,
1168                 struct ttm_buffer_object *bo,
1169                 unsigned long size,
1170                 enum ttm_bo_type type,
1171                 struct ttm_placement *placement,
1172                 uint32_t page_alignment,
1173                 unsigned long buffer_start,
1174                 bool interruptible,
1175                 struct file *persistent_swap_storage,
1176                 size_t acc_size,
1177                 void (*destroy) (struct ttm_buffer_object *))
1178 {
1179         int ret = 0;
1180         unsigned long num_pages;
1181
1182         size += buffer_start & ~PAGE_MASK;
1183         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1184         if (num_pages == 0) {
1185                 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1186                 if (destroy)
1187                         (*destroy)(bo);
1188                 else
1189                         kfree(bo);
1190                 return -EINVAL;
1191         }
1192         bo->destroy = destroy;
1193
1194         kref_init(&bo->kref);
1195         kref_init(&bo->list_kref);
1196         atomic_set(&bo->cpu_writers, 0);
1197         atomic_set(&bo->reserved, 1);
1198         init_waitqueue_head(&bo->event_queue);
1199         INIT_LIST_HEAD(&bo->lru);
1200         INIT_LIST_HEAD(&bo->ddestroy);
1201         INIT_LIST_HEAD(&bo->swap);
1202         INIT_LIST_HEAD(&bo->io_reserve_lru);
1203         bo->bdev = bdev;
1204         bo->glob = bdev->glob;
1205         bo->type = type;
1206         bo->num_pages = num_pages;
1207         bo->mem.size = num_pages << PAGE_SHIFT;
1208         bo->mem.mem_type = TTM_PL_SYSTEM;
1209         bo->mem.num_pages = bo->num_pages;
1210         bo->mem.mm_node = NULL;
1211         bo->mem.page_alignment = page_alignment;
1212         bo->mem.bus.io_reserved_vm = false;
1213         bo->mem.bus.io_reserved_count = 0;
1214         bo->buffer_start = buffer_start & PAGE_MASK;
1215         bo->priv_flags = 0;
1216         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1217         bo->seq_valid = false;
1218         bo->persistent_swap_storage = persistent_swap_storage;
1219         bo->acc_size = acc_size;
1220         atomic_inc(&bo->glob->bo_count);
1221
1222         ret = ttm_bo_check_placement(bo, placement);
1223         if (unlikely(ret != 0))
1224                 goto out_err;
1225
1226         /*
1227          * For ttm_bo_type_device buffers, allocate
1228          * address space from the device.
1229          */
1230         if (bo->type == ttm_bo_type_device) {
1231                 ret = ttm_bo_setup_vm(bo);
1232                 if (ret)
1233                         goto out_err;
1234         }
1235
1236         ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1237         if (ret)
1238                 goto out_err;
1239
1240         ttm_bo_unreserve(bo);
1241         return 0;
1242
1243 out_err:
1244         ttm_bo_unreserve(bo);
1245         ttm_bo_unref(&bo);
1246
1247         return ret;
1248 }
1249 EXPORT_SYMBOL(ttm_bo_init);
1250
1251 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1252                                  unsigned long num_pages)
1253 {
1254         size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1255             PAGE_MASK;
1256
1257         return glob->ttm_bo_size + 2 * page_array_size;
1258 }
1259
1260 int ttm_bo_create(struct ttm_bo_device *bdev,
1261                         unsigned long size,
1262                         enum ttm_bo_type type,
1263                         struct ttm_placement *placement,
1264                         uint32_t page_alignment,
1265                         unsigned long buffer_start,
1266                         bool interruptible,
1267                         struct file *persistent_swap_storage,
1268                         struct ttm_buffer_object **p_bo)
1269 {
1270         struct ttm_buffer_object *bo;
1271         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1272         int ret;
1273
1274         size_t acc_size =
1275             ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1276         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1277         if (unlikely(ret != 0))
1278                 return ret;
1279
1280         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1281
1282         if (unlikely(bo == NULL)) {
1283                 ttm_mem_global_free(mem_glob, acc_size);
1284                 return -ENOMEM;
1285         }
1286
1287         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1288                                 buffer_start, interruptible,
1289                                 persistent_swap_storage, acc_size, NULL);
1290         if (likely(ret == 0))
1291                 *p_bo = bo;
1292
1293         return ret;
1294 }
1295
1296 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1297                                         unsigned mem_type, bool allow_errors)
1298 {
1299         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1300         struct ttm_bo_global *glob = bdev->glob;
1301         int ret;
1302
1303         /*
1304          * Can't use standard list traversal since we're unlocking.
1305          */
1306
1307         spin_lock(&glob->lru_lock);
1308         while (!list_empty(&man->lru)) {
1309                 spin_unlock(&glob->lru_lock);
1310                 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1311                 if (ret) {
1312                         if (allow_errors) {
1313                                 return ret;
1314                         } else {
1315                                 printk(KERN_ERR TTM_PFX
1316                                         "Cleanup eviction failed\n");
1317                         }
1318                 }
1319                 spin_lock(&glob->lru_lock);
1320         }
1321         spin_unlock(&glob->lru_lock);
1322         return 0;
1323 }
1324
1325 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1326 {
1327         struct ttm_mem_type_manager *man;
1328         int ret = -EINVAL;
1329
1330         if (mem_type >= TTM_NUM_MEM_TYPES) {
1331                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1332                 return ret;
1333         }
1334         man = &bdev->man[mem_type];
1335
1336         if (!man->has_type) {
1337                 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1338                        "memory manager type %u\n", mem_type);
1339                 return ret;
1340         }
1341
1342         man->use_type = false;
1343         man->has_type = false;
1344
1345         ret = 0;
1346         if (mem_type > 0) {
1347                 ttm_bo_force_list_clean(bdev, mem_type, false);
1348
1349                 ret = (*man->func->takedown)(man);
1350         }
1351
1352         return ret;
1353 }
1354 EXPORT_SYMBOL(ttm_bo_clean_mm);
1355
1356 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1357 {
1358         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1359
1360         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1361                 printk(KERN_ERR TTM_PFX
1362                        "Illegal memory manager memory type %u.\n",
1363                        mem_type);
1364                 return -EINVAL;
1365         }
1366
1367         if (!man->has_type) {
1368                 printk(KERN_ERR TTM_PFX
1369                        "Memory type %u has not been initialized.\n",
1370                        mem_type);
1371                 return 0;
1372         }
1373
1374         return ttm_bo_force_list_clean(bdev, mem_type, true);
1375 }
1376 EXPORT_SYMBOL(ttm_bo_evict_mm);
1377
1378 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1379                         unsigned long p_size)
1380 {
1381         int ret = -EINVAL;
1382         struct ttm_mem_type_manager *man;
1383
1384         BUG_ON(type >= TTM_NUM_MEM_TYPES);
1385         man = &bdev->man[type];
1386         BUG_ON(man->has_type);
1387         man->io_reserve_fastpath = true;
1388         man->use_io_reserve_lru = false;
1389         mutex_init(&man->io_reserve_mutex);
1390         INIT_LIST_HEAD(&man->io_reserve_lru);
1391
1392         ret = bdev->driver->init_mem_type(bdev, type, man);
1393         if (ret)
1394                 return ret;
1395         man->bdev = bdev;
1396
1397         ret = 0;
1398         if (type != TTM_PL_SYSTEM) {
1399                 ret = (*man->func->init)(man, p_size);
1400                 if (ret)
1401                         return ret;
1402         }
1403         man->has_type = true;
1404         man->use_type = true;
1405         man->size = p_size;
1406
1407         INIT_LIST_HEAD(&man->lru);
1408
1409         return 0;
1410 }
1411 EXPORT_SYMBOL(ttm_bo_init_mm);
1412
1413 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1414 {
1415         struct ttm_bo_global *glob =
1416                 container_of(kobj, struct ttm_bo_global, kobj);
1417
1418         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1419         __free_page(glob->dummy_read_page);
1420         kfree(glob);
1421 }
1422
1423 void ttm_bo_global_release(struct drm_global_reference *ref)
1424 {
1425         struct ttm_bo_global *glob = ref->object;
1426
1427         kobject_del(&glob->kobj);
1428         kobject_put(&glob->kobj);
1429 }
1430 EXPORT_SYMBOL(ttm_bo_global_release);
1431
1432 int ttm_bo_global_init(struct drm_global_reference *ref)
1433 {
1434         struct ttm_bo_global_ref *bo_ref =
1435                 container_of(ref, struct ttm_bo_global_ref, ref);
1436         struct ttm_bo_global *glob = ref->object;
1437         int ret;
1438
1439         mutex_init(&glob->device_list_mutex);
1440         spin_lock_init(&glob->lru_lock);
1441         glob->mem_glob = bo_ref->mem_glob;
1442         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1443
1444         if (unlikely(glob->dummy_read_page == NULL)) {
1445                 ret = -ENOMEM;
1446                 goto out_no_drp;
1447         }
1448
1449         INIT_LIST_HEAD(&glob->swap_lru);
1450         INIT_LIST_HEAD(&glob->device_list);
1451
1452         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1453         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1454         if (unlikely(ret != 0)) {
1455                 printk(KERN_ERR TTM_PFX
1456                        "Could not register buffer object swapout.\n");
1457                 goto out_no_shrink;
1458         }
1459
1460         glob->ttm_bo_extra_size =
1461                 ttm_round_pot(sizeof(struct ttm_tt)) +
1462                 ttm_round_pot(sizeof(struct ttm_backend));
1463
1464         glob->ttm_bo_size = glob->ttm_bo_extra_size +
1465                 ttm_round_pot(sizeof(struct ttm_buffer_object));
1466
1467         atomic_set(&glob->bo_count, 0);
1468
1469         ret = kobject_init_and_add(
1470                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1471         if (unlikely(ret != 0))
1472                 kobject_put(&glob->kobj);
1473         return ret;
1474 out_no_shrink:
1475         __free_page(glob->dummy_read_page);
1476 out_no_drp:
1477         kfree(glob);
1478         return ret;
1479 }
1480 EXPORT_SYMBOL(ttm_bo_global_init);
1481
1482
1483 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1484 {
1485         int ret = 0;
1486         unsigned i = TTM_NUM_MEM_TYPES;
1487         struct ttm_mem_type_manager *man;
1488         struct ttm_bo_global *glob = bdev->glob;
1489
1490         while (i--) {
1491                 man = &bdev->man[i];
1492                 if (man->has_type) {
1493                         man->use_type = false;
1494                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1495                                 ret = -EBUSY;
1496                                 printk(KERN_ERR TTM_PFX
1497                                        "DRM memory manager type %d "
1498                                        "is not clean.\n", i);
1499                         }
1500                         man->has_type = false;
1501                 }
1502         }
1503
1504         mutex_lock(&glob->device_list_mutex);
1505         list_del(&bdev->device_list);
1506         mutex_unlock(&glob->device_list_mutex);
1507
1508         cancel_delayed_work_sync(&bdev->wq);
1509
1510         while (ttm_bo_delayed_delete(bdev, true))
1511                 ;
1512
1513         spin_lock(&glob->lru_lock);
1514         if (list_empty(&bdev->ddestroy))
1515                 TTM_DEBUG("Delayed destroy list was clean\n");
1516
1517         if (list_empty(&bdev->man[0].lru))
1518                 TTM_DEBUG("Swap list was clean\n");
1519         spin_unlock(&glob->lru_lock);
1520
1521         BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1522         write_lock(&bdev->vm_lock);
1523         drm_mm_takedown(&bdev->addr_space_mm);
1524         write_unlock(&bdev->vm_lock);
1525
1526         return ret;
1527 }
1528 EXPORT_SYMBOL(ttm_bo_device_release);
1529
1530 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1531                        struct ttm_bo_global *glob,
1532                        struct ttm_bo_driver *driver,
1533                        uint64_t file_page_offset,
1534                        bool need_dma32)
1535 {
1536         int ret = -EINVAL;
1537
1538         rwlock_init(&bdev->vm_lock);
1539         bdev->driver = driver;
1540
1541         memset(bdev->man, 0, sizeof(bdev->man));
1542
1543         /*
1544          * Initialize the system memory buffer type.
1545          * Other types need to be driver / IOCTL initialized.
1546          */
1547         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1548         if (unlikely(ret != 0))
1549                 goto out_no_sys;
1550
1551         bdev->addr_space_rb = RB_ROOT;
1552         ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1553         if (unlikely(ret != 0))
1554                 goto out_no_addr_mm;
1555
1556         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1557         bdev->nice_mode = true;
1558         INIT_LIST_HEAD(&bdev->ddestroy);
1559         bdev->dev_mapping = NULL;
1560         bdev->glob = glob;
1561         bdev->need_dma32 = need_dma32;
1562         bdev->val_seq = 0;
1563         spin_lock_init(&bdev->fence_lock);
1564         mutex_lock(&glob->device_list_mutex);
1565         list_add_tail(&bdev->device_list, &glob->device_list);
1566         mutex_unlock(&glob->device_list_mutex);
1567
1568         return 0;
1569 out_no_addr_mm:
1570         ttm_bo_clean_mm(bdev, 0);
1571 out_no_sys:
1572         return ret;
1573 }
1574 EXPORT_SYMBOL(ttm_bo_device_init);
1575
1576 /*
1577  * buffer object vm functions.
1578  */
1579
1580 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1581 {
1582         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1583
1584         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1585                 if (mem->mem_type == TTM_PL_SYSTEM)
1586                         return false;
1587
1588                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1589                         return false;
1590
1591                 if (mem->placement & TTM_PL_FLAG_CACHED)
1592                         return false;
1593         }
1594         return true;
1595 }
1596
1597 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1598 {
1599         struct ttm_bo_device *bdev = bo->bdev;
1600         loff_t offset = (loff_t) bo->addr_space_offset;
1601         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1602
1603         if (!bdev->dev_mapping)
1604                 return;
1605         unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1606         ttm_mem_io_free_vm(bo);
1607 }
1608
1609 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1610 {
1611         struct ttm_bo_device *bdev = bo->bdev;
1612         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1613
1614         ttm_mem_io_lock(man, false);
1615         ttm_bo_unmap_virtual_locked(bo);
1616         ttm_mem_io_unlock(man);
1617 }
1618
1619
1620 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1621
1622 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1623 {
1624         struct ttm_bo_device *bdev = bo->bdev;
1625         struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1626         struct rb_node *parent = NULL;
1627         struct ttm_buffer_object *cur_bo;
1628         unsigned long offset = bo->vm_node->start;
1629         unsigned long cur_offset;
1630
1631         while (*cur) {
1632                 parent = *cur;
1633                 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1634                 cur_offset = cur_bo->vm_node->start;
1635                 if (offset < cur_offset)
1636                         cur = &parent->rb_left;
1637                 else if (offset > cur_offset)
1638                         cur = &parent->rb_right;
1639                 else
1640                         BUG();
1641         }
1642
1643         rb_link_node(&bo->vm_rb, parent, cur);
1644         rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1645 }
1646
1647 /**
1648  * ttm_bo_setup_vm:
1649  *
1650  * @bo: the buffer to allocate address space for
1651  *
1652  * Allocate address space in the drm device so that applications
1653  * can mmap the buffer and access the contents. This only
1654  * applies to ttm_bo_type_device objects as others are not
1655  * placed in the drm device address space.
1656  */
1657
1658 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1659 {
1660         struct ttm_bo_device *bdev = bo->bdev;
1661         int ret;
1662
1663 retry_pre_get:
1664         ret = drm_mm_pre_get(&bdev->addr_space_mm);
1665         if (unlikely(ret != 0))
1666                 return ret;
1667
1668         write_lock(&bdev->vm_lock);
1669         bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1670                                          bo->mem.num_pages, 0, 0);
1671
1672         if (unlikely(bo->vm_node == NULL)) {
1673                 ret = -ENOMEM;
1674                 goto out_unlock;
1675         }
1676
1677         bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1678                                               bo->mem.num_pages, 0);
1679
1680         if (unlikely(bo->vm_node == NULL)) {
1681                 write_unlock(&bdev->vm_lock);
1682                 goto retry_pre_get;
1683         }
1684
1685         ttm_bo_vm_insert_rb(bo);
1686         write_unlock(&bdev->vm_lock);
1687         bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1688
1689         return 0;
1690 out_unlock:
1691         write_unlock(&bdev->vm_lock);
1692         return ret;
1693 }
1694
1695 int ttm_bo_wait(struct ttm_buffer_object *bo,
1696                 bool lazy, bool interruptible, bool no_wait)
1697 {
1698         struct ttm_bo_driver *driver = bo->bdev->driver;
1699         struct ttm_bo_device *bdev = bo->bdev;
1700         void *sync_obj;
1701         void *sync_obj_arg;
1702         int ret = 0;
1703
1704         if (likely(bo->sync_obj == NULL))
1705                 return 0;
1706
1707         while (bo->sync_obj) {
1708
1709                 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1710                         void *tmp_obj = bo->sync_obj;
1711                         bo->sync_obj = NULL;
1712                         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1713                         spin_unlock(&bdev->fence_lock);
1714                         driver->sync_obj_unref(&tmp_obj);
1715                         spin_lock(&bdev->fence_lock);
1716                         continue;
1717                 }
1718
1719                 if (no_wait)
1720                         return -EBUSY;
1721
1722                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1723                 sync_obj_arg = bo->sync_obj_arg;
1724                 spin_unlock(&bdev->fence_lock);
1725                 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1726                                             lazy, interruptible);
1727                 if (unlikely(ret != 0)) {
1728                         driver->sync_obj_unref(&sync_obj);
1729                         spin_lock(&bdev->fence_lock);
1730                         return ret;
1731                 }
1732                 spin_lock(&bdev->fence_lock);
1733                 if (likely(bo->sync_obj == sync_obj &&
1734                            bo->sync_obj_arg == sync_obj_arg)) {
1735                         void *tmp_obj = bo->sync_obj;
1736                         bo->sync_obj = NULL;
1737                         clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1738                                   &bo->priv_flags);
1739                         spin_unlock(&bdev->fence_lock);
1740                         driver->sync_obj_unref(&sync_obj);
1741                         driver->sync_obj_unref(&tmp_obj);
1742                         spin_lock(&bdev->fence_lock);
1743                 } else {
1744                         spin_unlock(&bdev->fence_lock);
1745                         driver->sync_obj_unref(&sync_obj);
1746                         spin_lock(&bdev->fence_lock);
1747                 }
1748         }
1749         return 0;
1750 }
1751 EXPORT_SYMBOL(ttm_bo_wait);
1752
1753 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1754 {
1755         struct ttm_bo_device *bdev = bo->bdev;
1756         int ret = 0;
1757
1758         /*
1759          * Using ttm_bo_reserve makes sure the lru lists are updated.
1760          */
1761
1762         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1763         if (unlikely(ret != 0))
1764                 return ret;
1765         spin_lock(&bdev->fence_lock);
1766         ret = ttm_bo_wait(bo, false, true, no_wait);
1767         spin_unlock(&bdev->fence_lock);
1768         if (likely(ret == 0))
1769                 atomic_inc(&bo->cpu_writers);
1770         ttm_bo_unreserve(bo);
1771         return ret;
1772 }
1773 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1774
1775 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1776 {
1777         if (atomic_dec_and_test(&bo->cpu_writers))
1778                 wake_up_all(&bo->event_queue);
1779 }
1780 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1781
1782 /**
1783  * A buffer object shrink method that tries to swap out the first
1784  * buffer object on the bo_global::swap_lru list.
1785  */
1786
1787 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1788 {
1789         struct ttm_bo_global *glob =
1790             container_of(shrink, struct ttm_bo_global, shrink);
1791         struct ttm_buffer_object *bo;
1792         int ret = -EBUSY;
1793         int put_count;
1794         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1795
1796         spin_lock(&glob->lru_lock);
1797         while (ret == -EBUSY) {
1798                 if (unlikely(list_empty(&glob->swap_lru))) {
1799                         spin_unlock(&glob->lru_lock);
1800                         return -EBUSY;
1801                 }
1802
1803                 bo = list_first_entry(&glob->swap_lru,
1804                                       struct ttm_buffer_object, swap);
1805                 kref_get(&bo->list_kref);
1806
1807                 if (!list_empty(&bo->ddestroy)) {
1808                         spin_unlock(&glob->lru_lock);
1809                         (void) ttm_bo_cleanup_refs(bo, false, false, false);
1810                         kref_put(&bo->list_kref, ttm_bo_release_list);
1811                         continue;
1812                 }
1813
1814                 /**
1815                  * Reserve buffer. Since we unlock while sleeping, we need
1816                  * to re-check that nobody removed us from the swap-list while
1817                  * we slept.
1818                  */
1819
1820                 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1821                 if (unlikely(ret == -EBUSY)) {
1822                         spin_unlock(&glob->lru_lock);
1823                         ttm_bo_wait_unreserved(bo, false);
1824                         kref_put(&bo->list_kref, ttm_bo_release_list);
1825                         spin_lock(&glob->lru_lock);
1826                 }
1827         }
1828
1829         BUG_ON(ret != 0);
1830         put_count = ttm_bo_del_from_lru(bo);
1831         spin_unlock(&glob->lru_lock);
1832
1833         ttm_bo_list_ref_sub(bo, put_count, true);
1834
1835         /**
1836          * Wait for GPU, then move to system cached.
1837          */
1838
1839         spin_lock(&bo->bdev->fence_lock);
1840         ret = ttm_bo_wait(bo, false, false, false);
1841         spin_unlock(&bo->bdev->fence_lock);
1842
1843         if (unlikely(ret != 0))
1844                 goto out;
1845
1846         if ((bo->mem.placement & swap_placement) != swap_placement) {
1847                 struct ttm_mem_reg evict_mem;
1848
1849                 evict_mem = bo->mem;
1850                 evict_mem.mm_node = NULL;
1851                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1852                 evict_mem.mem_type = TTM_PL_SYSTEM;
1853
1854                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1855                                              false, false, false);
1856                 if (unlikely(ret != 0))
1857                         goto out;
1858         }
1859
1860         ttm_bo_unmap_virtual(bo);
1861
1862         /**
1863          * Swap out. Buffer will be swapped in again as soon as
1864          * anyone tries to access a ttm page.
1865          */
1866
1867         if (bo->bdev->driver->swap_notify)
1868                 bo->bdev->driver->swap_notify(bo);
1869
1870         ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1871 out:
1872
1873         /**
1874          *
1875          * Unreserve without putting on LRU to avoid swapping out an
1876          * already swapped buffer.
1877          */
1878
1879         atomic_set(&bo->reserved, 0);
1880         wake_up_all(&bo->event_queue);
1881         kref_put(&bo->list_kref, ttm_bo_release_list);
1882         return ret;
1883 }
1884
1885 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1886 {
1887         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1888                 ;
1889 }
1890 EXPORT_SYMBOL(ttm_bo_swapout_all);