a586378b1b2b420110bebfc3f74c13d278d07e70
[pandora-kernel.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/mm.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
40 #include <asm/atomic.h>
41
42 #define TTM_ASSERT_LOCKED(param)
43 #define TTM_DEBUG(fmt, arg...)
44 #define TTM_BO_HASH_ORDER 13
45
46 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
47 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48 static void ttm_bo_global_kobj_release(struct kobject *kobj);
49
50 static struct attribute ttm_bo_count = {
51         .name = "bo_count",
52         .mode = S_IRUGO
53 };
54
55 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
56 {
57         int i;
58
59         for (i = 0; i <= TTM_PL_PRIV5; i++)
60                 if (flags & (1 << i)) {
61                         *mem_type = i;
62                         return 0;
63                 }
64         return -EINVAL;
65 }
66
67 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
68 {
69         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
70
71         printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
72         printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
73         printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
74         printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
75         printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
76         printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
77                 man->available_caching);
78         printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
79                 man->default_caching);
80         if (mem_type != TTM_PL_SYSTEM)
81                 (*man->func->debug)(man, TTM_PFX);
82 }
83
84 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
85                                         struct ttm_placement *placement)
86 {
87         int i, ret, mem_type;
88
89         printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
90                 bo, bo->mem.num_pages, bo->mem.size >> 10,
91                 bo->mem.size >> 20);
92         for (i = 0; i < placement->num_placement; i++) {
93                 ret = ttm_mem_type_from_flags(placement->placement[i],
94                                                 &mem_type);
95                 if (ret)
96                         return;
97                 printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
98                         i, placement->placement[i], mem_type);
99                 ttm_mem_type_debug(bo->bdev, mem_type);
100         }
101 }
102
103 static ssize_t ttm_bo_global_show(struct kobject *kobj,
104                                   struct attribute *attr,
105                                   char *buffer)
106 {
107         struct ttm_bo_global *glob =
108                 container_of(kobj, struct ttm_bo_global, kobj);
109
110         return snprintf(buffer, PAGE_SIZE, "%lu\n",
111                         (unsigned long) atomic_read(&glob->bo_count));
112 }
113
114 static struct attribute *ttm_bo_global_attrs[] = {
115         &ttm_bo_count,
116         NULL
117 };
118
119 static const struct sysfs_ops ttm_bo_global_ops = {
120         .show = &ttm_bo_global_show
121 };
122
123 static struct kobj_type ttm_bo_glob_kobj_type  = {
124         .release = &ttm_bo_global_kobj_release,
125         .sysfs_ops = &ttm_bo_global_ops,
126         .default_attrs = ttm_bo_global_attrs
127 };
128
129
130 static inline uint32_t ttm_bo_type_flags(unsigned type)
131 {
132         return 1 << (type);
133 }
134
135 static void ttm_bo_release_list(struct kref *list_kref)
136 {
137         struct ttm_buffer_object *bo =
138             container_of(list_kref, struct ttm_buffer_object, list_kref);
139         struct ttm_bo_device *bdev = bo->bdev;
140
141         BUG_ON(atomic_read(&bo->list_kref.refcount));
142         BUG_ON(atomic_read(&bo->kref.refcount));
143         BUG_ON(atomic_read(&bo->cpu_writers));
144         BUG_ON(bo->sync_obj != NULL);
145         BUG_ON(bo->mem.mm_node != NULL);
146         BUG_ON(!list_empty(&bo->lru));
147         BUG_ON(!list_empty(&bo->ddestroy));
148
149         if (bo->ttm)
150                 ttm_tt_destroy(bo->ttm);
151         atomic_dec(&bo->glob->bo_count);
152         if (bo->destroy)
153                 bo->destroy(bo);
154         else {
155                 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
156                 kfree(bo);
157         }
158 }
159
160 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
161 {
162         if (interruptible) {
163                 return wait_event_interruptible(bo->event_queue,
164                                                atomic_read(&bo->reserved) == 0);
165         } else {
166                 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
167                 return 0;
168         }
169 }
170 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
171
172 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
173 {
174         struct ttm_bo_device *bdev = bo->bdev;
175         struct ttm_mem_type_manager *man;
176
177         BUG_ON(!atomic_read(&bo->reserved));
178
179         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
180
181                 BUG_ON(!list_empty(&bo->lru));
182
183                 man = &bdev->man[bo->mem.mem_type];
184                 list_add_tail(&bo->lru, &man->lru);
185                 kref_get(&bo->list_kref);
186
187                 if (bo->ttm != NULL) {
188                         list_add_tail(&bo->swap, &bo->glob->swap_lru);
189                         kref_get(&bo->list_kref);
190                 }
191         }
192 }
193
194 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
195 {
196         int put_count = 0;
197
198         if (!list_empty(&bo->swap)) {
199                 list_del_init(&bo->swap);
200                 ++put_count;
201         }
202         if (!list_empty(&bo->lru)) {
203                 list_del_init(&bo->lru);
204                 ++put_count;
205         }
206
207         /*
208          * TODO: Add a driver hook to delete from
209          * driver-specific LRU's here.
210          */
211
212         return put_count;
213 }
214
215 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
216                           bool interruptible,
217                           bool no_wait, bool use_sequence, uint32_t sequence)
218 {
219         struct ttm_bo_global *glob = bo->glob;
220         int ret;
221
222         while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
223                 /**
224                  * Deadlock avoidance for multi-bo reserving.
225                  */
226                 if (use_sequence && bo->seq_valid &&
227                         (sequence - bo->val_seq < (1 << 31))) {
228                         return -EAGAIN;
229                 }
230
231                 if (no_wait)
232                         return -EBUSY;
233
234                 spin_unlock(&glob->lru_lock);
235                 ret = ttm_bo_wait_unreserved(bo, interruptible);
236                 spin_lock(&glob->lru_lock);
237
238                 if (unlikely(ret))
239                         return ret;
240         }
241
242         if (use_sequence) {
243                 /**
244                  * Wake up waiters that may need to recheck for deadlock,
245                  * if we decreased the sequence number.
246                  */
247                 if (unlikely((bo->val_seq - sequence < (1 << 31))
248                              || !bo->seq_valid))
249                         wake_up_all(&bo->event_queue);
250
251                 bo->val_seq = sequence;
252                 bo->seq_valid = true;
253         } else {
254                 bo->seq_valid = false;
255         }
256
257         return 0;
258 }
259 EXPORT_SYMBOL(ttm_bo_reserve);
260
261 static void ttm_bo_ref_bug(struct kref *list_kref)
262 {
263         BUG();
264 }
265
266 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
267                          bool never_free)
268 {
269         while (count--)
270                 kref_put(&bo->list_kref,
271                          (never_free || (count >= 0)) ? ttm_bo_ref_bug :
272                          ttm_bo_release_list);
273 }
274
275 int ttm_bo_reserve(struct ttm_buffer_object *bo,
276                    bool interruptible,
277                    bool no_wait, bool use_sequence, uint32_t sequence)
278 {
279         struct ttm_bo_global *glob = bo->glob;
280         int put_count = 0;
281         int ret;
282
283         spin_lock(&glob->lru_lock);
284         ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
285                                     sequence);
286         if (likely(ret == 0))
287                 put_count = ttm_bo_del_from_lru(bo);
288         spin_unlock(&glob->lru_lock);
289
290         ttm_bo_list_ref_sub(bo, put_count, true);
291
292         return ret;
293 }
294
295 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
296 {
297         struct ttm_bo_global *glob = bo->glob;
298
299         spin_lock(&glob->lru_lock);
300         ttm_bo_add_to_lru(bo);
301         atomic_set(&bo->reserved, 0);
302         wake_up_all(&bo->event_queue);
303         spin_unlock(&glob->lru_lock);
304 }
305 EXPORT_SYMBOL(ttm_bo_unreserve);
306
307 /*
308  * Call bo->mutex locked.
309  */
310 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
311 {
312         struct ttm_bo_device *bdev = bo->bdev;
313         struct ttm_bo_global *glob = bo->glob;
314         int ret = 0;
315         uint32_t page_flags = 0;
316
317         TTM_ASSERT_LOCKED(&bo->mutex);
318         bo->ttm = NULL;
319
320         if (bdev->need_dma32)
321                 page_flags |= TTM_PAGE_FLAG_DMA32;
322
323         switch (bo->type) {
324         case ttm_bo_type_device:
325                 if (zero_alloc)
326                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
327         case ttm_bo_type_kernel:
328                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
329                                         page_flags, glob->dummy_read_page);
330                 if (unlikely(bo->ttm == NULL))
331                         ret = -ENOMEM;
332                 break;
333         case ttm_bo_type_user:
334                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
335                                         page_flags | TTM_PAGE_FLAG_USER,
336                                         glob->dummy_read_page);
337                 if (unlikely(bo->ttm == NULL)) {
338                         ret = -ENOMEM;
339                         break;
340                 }
341
342                 ret = ttm_tt_set_user(bo->ttm, current,
343                                       bo->buffer_start, bo->num_pages);
344                 if (unlikely(ret != 0))
345                         ttm_tt_destroy(bo->ttm);
346                 break;
347         default:
348                 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
349                 ret = -EINVAL;
350                 break;
351         }
352
353         return ret;
354 }
355
356 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
357                                   struct ttm_mem_reg *mem,
358                                   bool evict, bool interruptible,
359                                   bool no_wait_reserve, bool no_wait_gpu)
360 {
361         struct ttm_bo_device *bdev = bo->bdev;
362         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
363         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
364         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
365         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
366         int ret = 0;
367
368         if (old_is_pci || new_is_pci ||
369             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
370                 ttm_bo_unmap_virtual(bo);
371
372         /*
373          * Create and bind a ttm if required.
374          */
375
376         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
377                 ret = ttm_bo_add_ttm(bo, false);
378                 if (ret)
379                         goto out_err;
380
381                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
382                 if (ret)
383                         goto out_err;
384
385                 if (mem->mem_type != TTM_PL_SYSTEM) {
386                         ret = ttm_tt_bind(bo->ttm, mem);
387                         if (ret)
388                                 goto out_err;
389                 }
390
391                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
392                         bo->mem = *mem;
393                         mem->mm_node = NULL;
394                         goto moved;
395                 }
396
397         }
398
399         if (bdev->driver->move_notify)
400                 bdev->driver->move_notify(bo, mem);
401
402         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
403             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
404                 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
405         else if (bdev->driver->move)
406                 ret = bdev->driver->move(bo, evict, interruptible,
407                                          no_wait_reserve, no_wait_gpu, mem);
408         else
409                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
410
411         if (ret)
412                 goto out_err;
413
414 moved:
415         if (bo->evicted) {
416                 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
417                 if (ret)
418                         printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
419                 bo->evicted = false;
420         }
421
422         if (bo->mem.mm_node) {
423                 spin_lock(&bo->lock);
424                 bo->offset = (bo->mem.start << PAGE_SHIFT) +
425                     bdev->man[bo->mem.mem_type].gpu_offset;
426                 bo->cur_placement = bo->mem.placement;
427                 spin_unlock(&bo->lock);
428         } else
429                 bo->offset = 0;
430
431         return 0;
432
433 out_err:
434         new_man = &bdev->man[bo->mem.mem_type];
435         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
436                 ttm_tt_unbind(bo->ttm);
437                 ttm_tt_destroy(bo->ttm);
438                 bo->ttm = NULL;
439         }
440
441         return ret;
442 }
443
444 /**
445  * Call bo::reserved.
446  * Will release GPU memory type usage on destruction.
447  * This is the place to put in driver specific hooks to release
448  * driver private resources.
449  * Will release the bo::reserved lock.
450  */
451
452 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
453 {
454         if (bo->ttm) {
455                 ttm_tt_unbind(bo->ttm);
456                 ttm_tt_destroy(bo->ttm);
457                 bo->ttm = NULL;
458         }
459
460         ttm_bo_mem_put(bo, &bo->mem);
461
462         atomic_set(&bo->reserved, 0);
463
464         /*
465          * Make processes trying to reserve really pick it up.
466          */
467         smp_mb__after_atomic_dec();
468         wake_up_all(&bo->event_queue);
469 }
470
471 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
472 {
473         struct ttm_bo_device *bdev = bo->bdev;
474         struct ttm_bo_global *glob = bo->glob;
475         struct ttm_bo_driver *driver;
476         void *sync_obj = NULL;
477         void *sync_obj_arg;
478         int put_count;
479         int ret;
480
481         spin_lock(&bo->lock);
482         (void) ttm_bo_wait(bo, false, false, true);
483         if (!bo->sync_obj) {
484
485                 spin_lock(&glob->lru_lock);
486
487                 /**
488                  * Lock inversion between bo::reserve and bo::lock here,
489                  * but that's OK, since we're only trylocking.
490                  */
491
492                 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
493
494                 if (unlikely(ret == -EBUSY))
495                         goto queue;
496
497                 spin_unlock(&bo->lock);
498                 put_count = ttm_bo_del_from_lru(bo);
499
500                 spin_unlock(&glob->lru_lock);
501                 ttm_bo_cleanup_memtype_use(bo);
502
503                 ttm_bo_list_ref_sub(bo, put_count, true);
504
505                 return;
506         } else {
507                 spin_lock(&glob->lru_lock);
508         }
509 queue:
510         driver = bdev->driver;
511         if (bo->sync_obj)
512                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
513         sync_obj_arg = bo->sync_obj_arg;
514
515         kref_get(&bo->list_kref);
516         list_add_tail(&bo->ddestroy, &bdev->ddestroy);
517         spin_unlock(&glob->lru_lock);
518         spin_unlock(&bo->lock);
519
520         if (sync_obj) {
521                 driver->sync_obj_flush(sync_obj, sync_obj_arg);
522                 driver->sync_obj_unref(&sync_obj);
523         }
524         schedule_delayed_work(&bdev->wq,
525                               ((HZ / 100) < 1) ? 1 : HZ / 100);
526 }
527
528 /**
529  * function ttm_bo_cleanup_refs
530  * If bo idle, remove from delayed- and lru lists, and unref.
531  * If not idle, do nothing.
532  *
533  * @interruptible         Any sleeps should occur interruptibly.
534  * @no_wait_reserve       Never wait for reserve. Return -EBUSY instead.
535  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
536  */
537
538 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
539                                bool interruptible,
540                                bool no_wait_reserve,
541                                bool no_wait_gpu)
542 {
543         struct ttm_bo_global *glob = bo->glob;
544         int put_count;
545         int ret = 0;
546
547 retry:
548         spin_lock(&bo->lock);
549         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
550         spin_unlock(&bo->lock);
551
552         if (unlikely(ret != 0))
553                 return ret;
554
555         spin_lock(&glob->lru_lock);
556         ret = ttm_bo_reserve_locked(bo, interruptible,
557                                     no_wait_reserve, false, 0);
558
559         if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
560                 spin_unlock(&glob->lru_lock);
561                 return ret;
562         }
563
564         /**
565          * We can re-check for sync object without taking
566          * the bo::lock since setting the sync object requires
567          * also bo::reserved. A busy object at this point may
568          * be caused by another thread recently starting an accelerated
569          * eviction.
570          */
571
572         if (unlikely(bo->sync_obj)) {
573                 atomic_set(&bo->reserved, 0);
574                 wake_up_all(&bo->event_queue);
575                 spin_unlock(&glob->lru_lock);
576                 goto retry;
577         }
578
579         put_count = ttm_bo_del_from_lru(bo);
580         list_del_init(&bo->ddestroy);
581         ++put_count;
582
583         spin_unlock(&glob->lru_lock);
584         ttm_bo_cleanup_memtype_use(bo);
585
586         ttm_bo_list_ref_sub(bo, put_count, true);
587
588         return 0;
589 }
590
591 /**
592  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
593  * encountered buffers.
594  */
595
596 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
597 {
598         struct ttm_bo_global *glob = bdev->glob;
599         struct ttm_buffer_object *entry = NULL;
600         int ret = 0;
601
602         spin_lock(&glob->lru_lock);
603         if (list_empty(&bdev->ddestroy))
604                 goto out_unlock;
605
606         entry = list_first_entry(&bdev->ddestroy,
607                 struct ttm_buffer_object, ddestroy);
608         kref_get(&entry->list_kref);
609
610         for (;;) {
611                 struct ttm_buffer_object *nentry = NULL;
612
613                 if (entry->ddestroy.next != &bdev->ddestroy) {
614                         nentry = list_first_entry(&entry->ddestroy,
615                                 struct ttm_buffer_object, ddestroy);
616                         kref_get(&nentry->list_kref);
617                 }
618
619                 spin_unlock(&glob->lru_lock);
620                 ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
621                                           !remove_all);
622                 kref_put(&entry->list_kref, ttm_bo_release_list);
623                 entry = nentry;
624
625                 if (ret || !entry)
626                         goto out;
627
628                 spin_lock(&glob->lru_lock);
629                 if (list_empty(&entry->ddestroy))
630                         break;
631         }
632
633 out_unlock:
634         spin_unlock(&glob->lru_lock);
635 out:
636         if (entry)
637                 kref_put(&entry->list_kref, ttm_bo_release_list);
638         return ret;
639 }
640
641 static void ttm_bo_delayed_workqueue(struct work_struct *work)
642 {
643         struct ttm_bo_device *bdev =
644             container_of(work, struct ttm_bo_device, wq.work);
645
646         if (ttm_bo_delayed_delete(bdev, false)) {
647                 schedule_delayed_work(&bdev->wq,
648                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
649         }
650 }
651
652 static void ttm_bo_release(struct kref *kref)
653 {
654         struct ttm_buffer_object *bo =
655             container_of(kref, struct ttm_buffer_object, kref);
656         struct ttm_bo_device *bdev = bo->bdev;
657
658         if (likely(bo->vm_node != NULL)) {
659                 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
660                 drm_mm_put_block(bo->vm_node);
661                 bo->vm_node = NULL;
662         }
663         write_unlock(&bdev->vm_lock);
664         ttm_bo_cleanup_refs_or_queue(bo);
665         kref_put(&bo->list_kref, ttm_bo_release_list);
666         write_lock(&bdev->vm_lock);
667 }
668
669 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
670 {
671         struct ttm_buffer_object *bo = *p_bo;
672         struct ttm_bo_device *bdev = bo->bdev;
673
674         *p_bo = NULL;
675         write_lock(&bdev->vm_lock);
676         kref_put(&bo->kref, ttm_bo_release);
677         write_unlock(&bdev->vm_lock);
678 }
679 EXPORT_SYMBOL(ttm_bo_unref);
680
681 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
682 {
683         return cancel_delayed_work_sync(&bdev->wq);
684 }
685 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
686
687 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
688 {
689         if (resched)
690                 schedule_delayed_work(&bdev->wq,
691                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
692 }
693 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
694
695 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
696                         bool no_wait_reserve, bool no_wait_gpu)
697 {
698         struct ttm_bo_device *bdev = bo->bdev;
699         struct ttm_mem_reg evict_mem;
700         struct ttm_placement placement;
701         int ret = 0;
702
703         spin_lock(&bo->lock);
704         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
705         spin_unlock(&bo->lock);
706
707         if (unlikely(ret != 0)) {
708                 if (ret != -ERESTARTSYS) {
709                         printk(KERN_ERR TTM_PFX
710                                "Failed to expire sync object before "
711                                "buffer eviction.\n");
712                 }
713                 goto out;
714         }
715
716         BUG_ON(!atomic_read(&bo->reserved));
717
718         evict_mem = bo->mem;
719         evict_mem.mm_node = NULL;
720         evict_mem.bus.io_reserved = false;
721
722         placement.fpfn = 0;
723         placement.lpfn = 0;
724         placement.num_placement = 0;
725         placement.num_busy_placement = 0;
726         bdev->driver->evict_flags(bo, &placement);
727         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
728                                 no_wait_reserve, no_wait_gpu);
729         if (ret) {
730                 if (ret != -ERESTARTSYS) {
731                         printk(KERN_ERR TTM_PFX
732                                "Failed to find memory space for "
733                                "buffer 0x%p eviction.\n", bo);
734                         ttm_bo_mem_space_debug(bo, &placement);
735                 }
736                 goto out;
737         }
738
739         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
740                                      no_wait_reserve, no_wait_gpu);
741         if (ret) {
742                 if (ret != -ERESTARTSYS)
743                         printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
744                 ttm_bo_mem_put(bo, &evict_mem);
745                 goto out;
746         }
747         bo->evicted = true;
748 out:
749         return ret;
750 }
751
752 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
753                                 uint32_t mem_type,
754                                 bool interruptible, bool no_wait_reserve,
755                                 bool no_wait_gpu)
756 {
757         struct ttm_bo_global *glob = bdev->glob;
758         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
759         struct ttm_buffer_object *bo;
760         int ret, put_count = 0;
761
762 retry:
763         spin_lock(&glob->lru_lock);
764         if (list_empty(&man->lru)) {
765                 spin_unlock(&glob->lru_lock);
766                 return -EBUSY;
767         }
768
769         bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
770         kref_get(&bo->list_kref);
771
772         if (!list_empty(&bo->ddestroy)) {
773                 spin_unlock(&glob->lru_lock);
774                 ret = ttm_bo_cleanup_refs(bo, interruptible,
775                                           no_wait_reserve, no_wait_gpu);
776                 kref_put(&bo->list_kref, ttm_bo_release_list);
777
778                 if (likely(ret == 0 || ret == -ERESTARTSYS))
779                         return ret;
780
781                 goto retry;
782         }
783
784         ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
785
786         if (unlikely(ret == -EBUSY)) {
787                 spin_unlock(&glob->lru_lock);
788                 if (likely(!no_wait_gpu))
789                         ret = ttm_bo_wait_unreserved(bo, interruptible);
790
791                 kref_put(&bo->list_kref, ttm_bo_release_list);
792
793                 /**
794                  * We *need* to retry after releasing the lru lock.
795                  */
796
797                 if (unlikely(ret != 0))
798                         return ret;
799                 goto retry;
800         }
801
802         put_count = ttm_bo_del_from_lru(bo);
803         spin_unlock(&glob->lru_lock);
804
805         BUG_ON(ret != 0);
806
807         ttm_bo_list_ref_sub(bo, put_count, true);
808
809         ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
810         ttm_bo_unreserve(bo);
811
812         kref_put(&bo->list_kref, ttm_bo_release_list);
813         return ret;
814 }
815
816 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
817 {
818         struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
819
820         if (mem->mm_node)
821                 (*man->func->put_node)(man, mem);
822 }
823 EXPORT_SYMBOL(ttm_bo_mem_put);
824
825 /**
826  * Repeatedly evict memory from the LRU for @mem_type until we create enough
827  * space, or we've evicted everything and there isn't enough space.
828  */
829 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
830                                         uint32_t mem_type,
831                                         struct ttm_placement *placement,
832                                         struct ttm_mem_reg *mem,
833                                         bool interruptible,
834                                         bool no_wait_reserve,
835                                         bool no_wait_gpu)
836 {
837         struct ttm_bo_device *bdev = bo->bdev;
838         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
839         int ret;
840
841         do {
842                 ret = (*man->func->get_node)(man, bo, placement, mem);
843                 if (unlikely(ret != 0))
844                         return ret;
845                 if (mem->mm_node)
846                         break;
847                 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
848                                                 no_wait_reserve, no_wait_gpu);
849                 if (unlikely(ret != 0))
850                         return ret;
851         } while (1);
852         if (mem->mm_node == NULL)
853                 return -ENOMEM;
854         mem->mem_type = mem_type;
855         return 0;
856 }
857
858 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
859                                       uint32_t cur_placement,
860                                       uint32_t proposed_placement)
861 {
862         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
863         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
864
865         /**
866          * Keep current caching if possible.
867          */
868
869         if ((cur_placement & caching) != 0)
870                 result |= (cur_placement & caching);
871         else if ((man->default_caching & caching) != 0)
872                 result |= man->default_caching;
873         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
874                 result |= TTM_PL_FLAG_CACHED;
875         else if ((TTM_PL_FLAG_WC & caching) != 0)
876                 result |= TTM_PL_FLAG_WC;
877         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
878                 result |= TTM_PL_FLAG_UNCACHED;
879
880         return result;
881 }
882
883 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
884                                  bool disallow_fixed,
885                                  uint32_t mem_type,
886                                  uint32_t proposed_placement,
887                                  uint32_t *masked_placement)
888 {
889         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
890
891         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
892                 return false;
893
894         if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
895                 return false;
896
897         if ((proposed_placement & man->available_caching) == 0)
898                 return false;
899
900         cur_flags |= (proposed_placement & man->available_caching);
901
902         *masked_placement = cur_flags;
903         return true;
904 }
905
906 /**
907  * Creates space for memory region @mem according to its type.
908  *
909  * This function first searches for free space in compatible memory types in
910  * the priority order defined by the driver.  If free space isn't found, then
911  * ttm_bo_mem_force_space is attempted in priority order to evict and find
912  * space.
913  */
914 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
915                         struct ttm_placement *placement,
916                         struct ttm_mem_reg *mem,
917                         bool interruptible, bool no_wait_reserve,
918                         bool no_wait_gpu)
919 {
920         struct ttm_bo_device *bdev = bo->bdev;
921         struct ttm_mem_type_manager *man;
922         uint32_t mem_type = TTM_PL_SYSTEM;
923         uint32_t cur_flags = 0;
924         bool type_found = false;
925         bool type_ok = false;
926         bool has_erestartsys = false;
927         int i, ret;
928
929         mem->mm_node = NULL;
930         for (i = 0; i < placement->num_placement; ++i) {
931                 ret = ttm_mem_type_from_flags(placement->placement[i],
932                                                 &mem_type);
933                 if (ret)
934                         return ret;
935                 man = &bdev->man[mem_type];
936
937                 type_ok = ttm_bo_mt_compatible(man,
938                                                 bo->type == ttm_bo_type_user,
939                                                 mem_type,
940                                                 placement->placement[i],
941                                                 &cur_flags);
942
943                 if (!type_ok)
944                         continue;
945
946                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
947                                                   cur_flags);
948                 /*
949                  * Use the access and other non-mapping-related flag bits from
950                  * the memory placement flags to the current flags
951                  */
952                 ttm_flag_masked(&cur_flags, placement->placement[i],
953                                 ~TTM_PL_MASK_MEMTYPE);
954
955                 if (mem_type == TTM_PL_SYSTEM)
956                         break;
957
958                 if (man->has_type && man->use_type) {
959                         type_found = true;
960                         ret = (*man->func->get_node)(man, bo, placement, mem);
961                         if (unlikely(ret))
962                                 return ret;
963                 }
964                 if (mem->mm_node)
965                         break;
966         }
967
968         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
969                 mem->mem_type = mem_type;
970                 mem->placement = cur_flags;
971                 return 0;
972         }
973
974         if (!type_found)
975                 return -EINVAL;
976
977         for (i = 0; i < placement->num_busy_placement; ++i) {
978                 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
979                                                 &mem_type);
980                 if (ret)
981                         return ret;
982                 man = &bdev->man[mem_type];
983                 if (!man->has_type)
984                         continue;
985                 if (!ttm_bo_mt_compatible(man,
986                                                 bo->type == ttm_bo_type_user,
987                                                 mem_type,
988                                                 placement->busy_placement[i],
989                                                 &cur_flags))
990                         continue;
991
992                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
993                                                   cur_flags);
994                 /*
995                  * Use the access and other non-mapping-related flag bits from
996                  * the memory placement flags to the current flags
997                  */
998                 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
999                                 ~TTM_PL_MASK_MEMTYPE);
1000
1001
1002                 if (mem_type == TTM_PL_SYSTEM) {
1003                         mem->mem_type = mem_type;
1004                         mem->placement = cur_flags;
1005                         mem->mm_node = NULL;
1006                         return 0;
1007                 }
1008
1009                 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1010                                                 interruptible, no_wait_reserve, no_wait_gpu);
1011                 if (ret == 0 && mem->mm_node) {
1012                         mem->placement = cur_flags;
1013                         return 0;
1014                 }
1015                 if (ret == -ERESTARTSYS)
1016                         has_erestartsys = true;
1017         }
1018         ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1019         return ret;
1020 }
1021 EXPORT_SYMBOL(ttm_bo_mem_space);
1022
1023 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1024 {
1025         if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1026                 return -EBUSY;
1027
1028         return wait_event_interruptible(bo->event_queue,
1029                                         atomic_read(&bo->cpu_writers) == 0);
1030 }
1031 EXPORT_SYMBOL(ttm_bo_wait_cpu);
1032
1033 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1034                         struct ttm_placement *placement,
1035                         bool interruptible, bool no_wait_reserve,
1036                         bool no_wait_gpu)
1037 {
1038         int ret = 0;
1039         struct ttm_mem_reg mem;
1040
1041         BUG_ON(!atomic_read(&bo->reserved));
1042
1043         /*
1044          * FIXME: It's possible to pipeline buffer moves.
1045          * Have the driver move function wait for idle when necessary,
1046          * instead of doing it here.
1047          */
1048         spin_lock(&bo->lock);
1049         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1050         spin_unlock(&bo->lock);
1051         if (ret)
1052                 return ret;
1053         mem.num_pages = bo->num_pages;
1054         mem.size = mem.num_pages << PAGE_SHIFT;
1055         mem.page_alignment = bo->mem.page_alignment;
1056         mem.bus.io_reserved = false;
1057         /*
1058          * Determine where to move the buffer.
1059          */
1060         ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1061         if (ret)
1062                 goto out_unlock;
1063         ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1064 out_unlock:
1065         if (ret && mem.mm_node)
1066                 ttm_bo_mem_put(bo, &mem);
1067         return ret;
1068 }
1069
1070 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1071                              struct ttm_mem_reg *mem)
1072 {
1073         int i;
1074
1075         if (mem->mm_node && placement->lpfn != 0 &&
1076             (mem->start < placement->fpfn ||
1077              mem->start + mem->num_pages > placement->lpfn))
1078                 return -1;
1079
1080         for (i = 0; i < placement->num_placement; i++) {
1081                 if ((placement->placement[i] & mem->placement &
1082                         TTM_PL_MASK_CACHING) &&
1083                         (placement->placement[i] & mem->placement &
1084                         TTM_PL_MASK_MEM))
1085                         return i;
1086         }
1087         return -1;
1088 }
1089
1090 int ttm_bo_validate(struct ttm_buffer_object *bo,
1091                         struct ttm_placement *placement,
1092                         bool interruptible, bool no_wait_reserve,
1093                         bool no_wait_gpu)
1094 {
1095         int ret;
1096
1097         BUG_ON(!atomic_read(&bo->reserved));
1098         /* Check that range is valid */
1099         if (placement->lpfn || placement->fpfn)
1100                 if (placement->fpfn > placement->lpfn ||
1101                         (placement->lpfn - placement->fpfn) < bo->num_pages)
1102                         return -EINVAL;
1103         /*
1104          * Check whether we need to move buffer.
1105          */
1106         ret = ttm_bo_mem_compat(placement, &bo->mem);
1107         if (ret < 0) {
1108                 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1109                 if (ret)
1110                         return ret;
1111         } else {
1112                 /*
1113                  * Use the access and other non-mapping-related flag bits from
1114                  * the compatible memory placement flags to the active flags
1115                  */
1116                 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1117                                 ~TTM_PL_MASK_MEMTYPE);
1118         }
1119         /*
1120          * We might need to add a TTM.
1121          */
1122         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1123                 ret = ttm_bo_add_ttm(bo, true);
1124                 if (ret)
1125                         return ret;
1126         }
1127         return 0;
1128 }
1129 EXPORT_SYMBOL(ttm_bo_validate);
1130
1131 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1132                                 struct ttm_placement *placement)
1133 {
1134         BUG_ON((placement->fpfn || placement->lpfn) &&
1135                (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1136
1137         return 0;
1138 }
1139
1140 int ttm_bo_init(struct ttm_bo_device *bdev,
1141                 struct ttm_buffer_object *bo,
1142                 unsigned long size,
1143                 enum ttm_bo_type type,
1144                 struct ttm_placement *placement,
1145                 uint32_t page_alignment,
1146                 unsigned long buffer_start,
1147                 bool interruptible,
1148                 struct file *persistant_swap_storage,
1149                 size_t acc_size,
1150                 void (*destroy) (struct ttm_buffer_object *))
1151 {
1152         int ret = 0;
1153         unsigned long num_pages;
1154
1155         size += buffer_start & ~PAGE_MASK;
1156         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1157         if (num_pages == 0) {
1158                 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1159                 if (destroy)
1160                         (*destroy)(bo);
1161                 else
1162                         kfree(bo);
1163                 return -EINVAL;
1164         }
1165         bo->destroy = destroy;
1166
1167         spin_lock_init(&bo->lock);
1168         kref_init(&bo->kref);
1169         kref_init(&bo->list_kref);
1170         atomic_set(&bo->cpu_writers, 0);
1171         atomic_set(&bo->reserved, 1);
1172         init_waitqueue_head(&bo->event_queue);
1173         INIT_LIST_HEAD(&bo->lru);
1174         INIT_LIST_HEAD(&bo->ddestroy);
1175         INIT_LIST_HEAD(&bo->swap);
1176         bo->bdev = bdev;
1177         bo->glob = bdev->glob;
1178         bo->type = type;
1179         bo->num_pages = num_pages;
1180         bo->mem.size = num_pages << PAGE_SHIFT;
1181         bo->mem.mem_type = TTM_PL_SYSTEM;
1182         bo->mem.num_pages = bo->num_pages;
1183         bo->mem.mm_node = NULL;
1184         bo->mem.page_alignment = page_alignment;
1185         bo->mem.bus.io_reserved = false;
1186         bo->buffer_start = buffer_start & PAGE_MASK;
1187         bo->priv_flags = 0;
1188         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1189         bo->seq_valid = false;
1190         bo->persistant_swap_storage = persistant_swap_storage;
1191         bo->acc_size = acc_size;
1192         atomic_inc(&bo->glob->bo_count);
1193
1194         ret = ttm_bo_check_placement(bo, placement);
1195         if (unlikely(ret != 0))
1196                 goto out_err;
1197
1198         /*
1199          * For ttm_bo_type_device buffers, allocate
1200          * address space from the device.
1201          */
1202         if (bo->type == ttm_bo_type_device) {
1203                 ret = ttm_bo_setup_vm(bo);
1204                 if (ret)
1205                         goto out_err;
1206         }
1207
1208         ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1209         if (ret)
1210                 goto out_err;
1211
1212         ttm_bo_unreserve(bo);
1213         return 0;
1214
1215 out_err:
1216         ttm_bo_unreserve(bo);
1217         ttm_bo_unref(&bo);
1218
1219         return ret;
1220 }
1221 EXPORT_SYMBOL(ttm_bo_init);
1222
1223 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1224                                  unsigned long num_pages)
1225 {
1226         size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1227             PAGE_MASK;
1228
1229         return glob->ttm_bo_size + 2 * page_array_size;
1230 }
1231
1232 int ttm_bo_create(struct ttm_bo_device *bdev,
1233                         unsigned long size,
1234                         enum ttm_bo_type type,
1235                         struct ttm_placement *placement,
1236                         uint32_t page_alignment,
1237                         unsigned long buffer_start,
1238                         bool interruptible,
1239                         struct file *persistant_swap_storage,
1240                         struct ttm_buffer_object **p_bo)
1241 {
1242         struct ttm_buffer_object *bo;
1243         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1244         int ret;
1245
1246         size_t acc_size =
1247             ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1248         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1249         if (unlikely(ret != 0))
1250                 return ret;
1251
1252         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1253
1254         if (unlikely(bo == NULL)) {
1255                 ttm_mem_global_free(mem_glob, acc_size);
1256                 return -ENOMEM;
1257         }
1258
1259         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1260                                 buffer_start, interruptible,
1261                                 persistant_swap_storage, acc_size, NULL);
1262         if (likely(ret == 0))
1263                 *p_bo = bo;
1264
1265         return ret;
1266 }
1267
1268 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1269                                         unsigned mem_type, bool allow_errors)
1270 {
1271         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1272         struct ttm_bo_global *glob = bdev->glob;
1273         int ret;
1274
1275         /*
1276          * Can't use standard list traversal since we're unlocking.
1277          */
1278
1279         spin_lock(&glob->lru_lock);
1280         while (!list_empty(&man->lru)) {
1281                 spin_unlock(&glob->lru_lock);
1282                 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1283                 if (ret) {
1284                         if (allow_errors) {
1285                                 return ret;
1286                         } else {
1287                                 printk(KERN_ERR TTM_PFX
1288                                         "Cleanup eviction failed\n");
1289                         }
1290                 }
1291                 spin_lock(&glob->lru_lock);
1292         }
1293         spin_unlock(&glob->lru_lock);
1294         return 0;
1295 }
1296
1297 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1298 {
1299         struct ttm_mem_type_manager *man;
1300         int ret = -EINVAL;
1301
1302         if (mem_type >= TTM_NUM_MEM_TYPES) {
1303                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1304                 return ret;
1305         }
1306         man = &bdev->man[mem_type];
1307
1308         if (!man->has_type) {
1309                 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1310                        "memory manager type %u\n", mem_type);
1311                 return ret;
1312         }
1313
1314         man->use_type = false;
1315         man->has_type = false;
1316
1317         ret = 0;
1318         if (mem_type > 0) {
1319                 ttm_bo_force_list_clean(bdev, mem_type, false);
1320
1321                 ret = (*man->func->takedown)(man);
1322         }
1323
1324         return ret;
1325 }
1326 EXPORT_SYMBOL(ttm_bo_clean_mm);
1327
1328 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1329 {
1330         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1331
1332         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1333                 printk(KERN_ERR TTM_PFX
1334                        "Illegal memory manager memory type %u.\n",
1335                        mem_type);
1336                 return -EINVAL;
1337         }
1338
1339         if (!man->has_type) {
1340                 printk(KERN_ERR TTM_PFX
1341                        "Memory type %u has not been initialized.\n",
1342                        mem_type);
1343                 return 0;
1344         }
1345
1346         return ttm_bo_force_list_clean(bdev, mem_type, true);
1347 }
1348 EXPORT_SYMBOL(ttm_bo_evict_mm);
1349
1350 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1351                         unsigned long p_size)
1352 {
1353         int ret = -EINVAL;
1354         struct ttm_mem_type_manager *man;
1355
1356         BUG_ON(type >= TTM_NUM_MEM_TYPES);
1357         man = &bdev->man[type];
1358         BUG_ON(man->has_type);
1359
1360         ret = bdev->driver->init_mem_type(bdev, type, man);
1361         if (ret)
1362                 return ret;
1363         man->bdev = bdev;
1364
1365         ret = 0;
1366         if (type != TTM_PL_SYSTEM) {
1367                 ret = (*man->func->init)(man, p_size);
1368                 if (ret)
1369                         return ret;
1370         }
1371         man->has_type = true;
1372         man->use_type = true;
1373         man->size = p_size;
1374
1375         INIT_LIST_HEAD(&man->lru);
1376
1377         return 0;
1378 }
1379 EXPORT_SYMBOL(ttm_bo_init_mm);
1380
1381 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1382 {
1383         struct ttm_bo_global *glob =
1384                 container_of(kobj, struct ttm_bo_global, kobj);
1385
1386         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1387         __free_page(glob->dummy_read_page);
1388         kfree(glob);
1389 }
1390
1391 void ttm_bo_global_release(struct drm_global_reference *ref)
1392 {
1393         struct ttm_bo_global *glob = ref->object;
1394
1395         kobject_del(&glob->kobj);
1396         kobject_put(&glob->kobj);
1397 }
1398 EXPORT_SYMBOL(ttm_bo_global_release);
1399
1400 int ttm_bo_global_init(struct drm_global_reference *ref)
1401 {
1402         struct ttm_bo_global_ref *bo_ref =
1403                 container_of(ref, struct ttm_bo_global_ref, ref);
1404         struct ttm_bo_global *glob = ref->object;
1405         int ret;
1406
1407         mutex_init(&glob->device_list_mutex);
1408         spin_lock_init(&glob->lru_lock);
1409         glob->mem_glob = bo_ref->mem_glob;
1410         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1411
1412         if (unlikely(glob->dummy_read_page == NULL)) {
1413                 ret = -ENOMEM;
1414                 goto out_no_drp;
1415         }
1416
1417         INIT_LIST_HEAD(&glob->swap_lru);
1418         INIT_LIST_HEAD(&glob->device_list);
1419
1420         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1421         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1422         if (unlikely(ret != 0)) {
1423                 printk(KERN_ERR TTM_PFX
1424                        "Could not register buffer object swapout.\n");
1425                 goto out_no_shrink;
1426         }
1427
1428         glob->ttm_bo_extra_size =
1429                 ttm_round_pot(sizeof(struct ttm_tt)) +
1430                 ttm_round_pot(sizeof(struct ttm_backend));
1431
1432         glob->ttm_bo_size = glob->ttm_bo_extra_size +
1433                 ttm_round_pot(sizeof(struct ttm_buffer_object));
1434
1435         atomic_set(&glob->bo_count, 0);
1436
1437         ret = kobject_init_and_add(
1438                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1439         if (unlikely(ret != 0))
1440                 kobject_put(&glob->kobj);
1441         return ret;
1442 out_no_shrink:
1443         __free_page(glob->dummy_read_page);
1444 out_no_drp:
1445         kfree(glob);
1446         return ret;
1447 }
1448 EXPORT_SYMBOL(ttm_bo_global_init);
1449
1450
1451 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1452 {
1453         int ret = 0;
1454         unsigned i = TTM_NUM_MEM_TYPES;
1455         struct ttm_mem_type_manager *man;
1456         struct ttm_bo_global *glob = bdev->glob;
1457
1458         while (i--) {
1459                 man = &bdev->man[i];
1460                 if (man->has_type) {
1461                         man->use_type = false;
1462                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1463                                 ret = -EBUSY;
1464                                 printk(KERN_ERR TTM_PFX
1465                                        "DRM memory manager type %d "
1466                                        "is not clean.\n", i);
1467                         }
1468                         man->has_type = false;
1469                 }
1470         }
1471
1472         mutex_lock(&glob->device_list_mutex);
1473         list_del(&bdev->device_list);
1474         mutex_unlock(&glob->device_list_mutex);
1475
1476         if (!cancel_delayed_work(&bdev->wq))
1477                 flush_scheduled_work();
1478
1479         while (ttm_bo_delayed_delete(bdev, true))
1480                 ;
1481
1482         spin_lock(&glob->lru_lock);
1483         if (list_empty(&bdev->ddestroy))
1484                 TTM_DEBUG("Delayed destroy list was clean\n");
1485
1486         if (list_empty(&bdev->man[0].lru))
1487                 TTM_DEBUG("Swap list was clean\n");
1488         spin_unlock(&glob->lru_lock);
1489
1490         BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1491         write_lock(&bdev->vm_lock);
1492         drm_mm_takedown(&bdev->addr_space_mm);
1493         write_unlock(&bdev->vm_lock);
1494
1495         return ret;
1496 }
1497 EXPORT_SYMBOL(ttm_bo_device_release);
1498
1499 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1500                        struct ttm_bo_global *glob,
1501                        struct ttm_bo_driver *driver,
1502                        uint64_t file_page_offset,
1503                        bool need_dma32)
1504 {
1505         int ret = -EINVAL;
1506
1507         rwlock_init(&bdev->vm_lock);
1508         bdev->driver = driver;
1509
1510         memset(bdev->man, 0, sizeof(bdev->man));
1511
1512         /*
1513          * Initialize the system memory buffer type.
1514          * Other types need to be driver / IOCTL initialized.
1515          */
1516         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1517         if (unlikely(ret != 0))
1518                 goto out_no_sys;
1519
1520         bdev->addr_space_rb = RB_ROOT;
1521         ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1522         if (unlikely(ret != 0))
1523                 goto out_no_addr_mm;
1524
1525         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1526         bdev->nice_mode = true;
1527         INIT_LIST_HEAD(&bdev->ddestroy);
1528         bdev->dev_mapping = NULL;
1529         bdev->glob = glob;
1530         bdev->need_dma32 = need_dma32;
1531
1532         mutex_lock(&glob->device_list_mutex);
1533         list_add_tail(&bdev->device_list, &glob->device_list);
1534         mutex_unlock(&glob->device_list_mutex);
1535
1536         return 0;
1537 out_no_addr_mm:
1538         ttm_bo_clean_mm(bdev, 0);
1539 out_no_sys:
1540         return ret;
1541 }
1542 EXPORT_SYMBOL(ttm_bo_device_init);
1543
1544 /*
1545  * buffer object vm functions.
1546  */
1547
1548 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1549 {
1550         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1551
1552         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1553                 if (mem->mem_type == TTM_PL_SYSTEM)
1554                         return false;
1555
1556                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1557                         return false;
1558
1559                 if (mem->placement & TTM_PL_FLAG_CACHED)
1560                         return false;
1561         }
1562         return true;
1563 }
1564
1565 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1566 {
1567         struct ttm_bo_device *bdev = bo->bdev;
1568         loff_t offset = (loff_t) bo->addr_space_offset;
1569         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1570
1571         if (!bdev->dev_mapping)
1572                 return;
1573         unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1574         ttm_mem_io_free(bdev, &bo->mem);
1575 }
1576 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1577
1578 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1579 {
1580         struct ttm_bo_device *bdev = bo->bdev;
1581         struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1582         struct rb_node *parent = NULL;
1583         struct ttm_buffer_object *cur_bo;
1584         unsigned long offset = bo->vm_node->start;
1585         unsigned long cur_offset;
1586
1587         while (*cur) {
1588                 parent = *cur;
1589                 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1590                 cur_offset = cur_bo->vm_node->start;
1591                 if (offset < cur_offset)
1592                         cur = &parent->rb_left;
1593                 else if (offset > cur_offset)
1594                         cur = &parent->rb_right;
1595                 else
1596                         BUG();
1597         }
1598
1599         rb_link_node(&bo->vm_rb, parent, cur);
1600         rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1601 }
1602
1603 /**
1604  * ttm_bo_setup_vm:
1605  *
1606  * @bo: the buffer to allocate address space for
1607  *
1608  * Allocate address space in the drm device so that applications
1609  * can mmap the buffer and access the contents. This only
1610  * applies to ttm_bo_type_device objects as others are not
1611  * placed in the drm device address space.
1612  */
1613
1614 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1615 {
1616         struct ttm_bo_device *bdev = bo->bdev;
1617         int ret;
1618
1619 retry_pre_get:
1620         ret = drm_mm_pre_get(&bdev->addr_space_mm);
1621         if (unlikely(ret != 0))
1622                 return ret;
1623
1624         write_lock(&bdev->vm_lock);
1625         bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1626                                          bo->mem.num_pages, 0, 0);
1627
1628         if (unlikely(bo->vm_node == NULL)) {
1629                 ret = -ENOMEM;
1630                 goto out_unlock;
1631         }
1632
1633         bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1634                                               bo->mem.num_pages, 0);
1635
1636         if (unlikely(bo->vm_node == NULL)) {
1637                 write_unlock(&bdev->vm_lock);
1638                 goto retry_pre_get;
1639         }
1640
1641         ttm_bo_vm_insert_rb(bo);
1642         write_unlock(&bdev->vm_lock);
1643         bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1644
1645         return 0;
1646 out_unlock:
1647         write_unlock(&bdev->vm_lock);
1648         return ret;
1649 }
1650
1651 int ttm_bo_wait(struct ttm_buffer_object *bo,
1652                 bool lazy, bool interruptible, bool no_wait)
1653 {
1654         struct ttm_bo_driver *driver = bo->bdev->driver;
1655         void *sync_obj;
1656         void *sync_obj_arg;
1657         int ret = 0;
1658
1659         if (likely(bo->sync_obj == NULL))
1660                 return 0;
1661
1662         while (bo->sync_obj) {
1663
1664                 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1665                         void *tmp_obj = bo->sync_obj;
1666                         bo->sync_obj = NULL;
1667                         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1668                         spin_unlock(&bo->lock);
1669                         driver->sync_obj_unref(&tmp_obj);
1670                         spin_lock(&bo->lock);
1671                         continue;
1672                 }
1673
1674                 if (no_wait)
1675                         return -EBUSY;
1676
1677                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1678                 sync_obj_arg = bo->sync_obj_arg;
1679                 spin_unlock(&bo->lock);
1680                 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1681                                             lazy, interruptible);
1682                 if (unlikely(ret != 0)) {
1683                         driver->sync_obj_unref(&sync_obj);
1684                         spin_lock(&bo->lock);
1685                         return ret;
1686                 }
1687                 spin_lock(&bo->lock);
1688                 if (likely(bo->sync_obj == sync_obj &&
1689                            bo->sync_obj_arg == sync_obj_arg)) {
1690                         void *tmp_obj = bo->sync_obj;
1691                         bo->sync_obj = NULL;
1692                         clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1693                                   &bo->priv_flags);
1694                         spin_unlock(&bo->lock);
1695                         driver->sync_obj_unref(&sync_obj);
1696                         driver->sync_obj_unref(&tmp_obj);
1697                         spin_lock(&bo->lock);
1698                 } else {
1699                         spin_unlock(&bo->lock);
1700                         driver->sync_obj_unref(&sync_obj);
1701                         spin_lock(&bo->lock);
1702                 }
1703         }
1704         return 0;
1705 }
1706 EXPORT_SYMBOL(ttm_bo_wait);
1707
1708 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1709 {
1710         int ret = 0;
1711
1712         /*
1713          * Using ttm_bo_reserve makes sure the lru lists are updated.
1714          */
1715
1716         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1717         if (unlikely(ret != 0))
1718                 return ret;
1719         spin_lock(&bo->lock);
1720         ret = ttm_bo_wait(bo, false, true, no_wait);
1721         spin_unlock(&bo->lock);
1722         if (likely(ret == 0))
1723                 atomic_inc(&bo->cpu_writers);
1724         ttm_bo_unreserve(bo);
1725         return ret;
1726 }
1727 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1728
1729 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1730 {
1731         if (atomic_dec_and_test(&bo->cpu_writers))
1732                 wake_up_all(&bo->event_queue);
1733 }
1734 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1735
1736 /**
1737  * A buffer object shrink method that tries to swap out the first
1738  * buffer object on the bo_global::swap_lru list.
1739  */
1740
1741 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1742 {
1743         struct ttm_bo_global *glob =
1744             container_of(shrink, struct ttm_bo_global, shrink);
1745         struct ttm_buffer_object *bo;
1746         int ret = -EBUSY;
1747         int put_count;
1748         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1749
1750         spin_lock(&glob->lru_lock);
1751         while (ret == -EBUSY) {
1752                 if (unlikely(list_empty(&glob->swap_lru))) {
1753                         spin_unlock(&glob->lru_lock);
1754                         return -EBUSY;
1755                 }
1756
1757                 bo = list_first_entry(&glob->swap_lru,
1758                                       struct ttm_buffer_object, swap);
1759                 kref_get(&bo->list_kref);
1760
1761                 if (!list_empty(&bo->ddestroy)) {
1762                         spin_unlock(&glob->lru_lock);
1763                         (void) ttm_bo_cleanup_refs(bo, false, false, false);
1764                         kref_put(&bo->list_kref, ttm_bo_release_list);
1765                         continue;
1766                 }
1767
1768                 /**
1769                  * Reserve buffer. Since we unlock while sleeping, we need
1770                  * to re-check that nobody removed us from the swap-list while
1771                  * we slept.
1772                  */
1773
1774                 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1775                 if (unlikely(ret == -EBUSY)) {
1776                         spin_unlock(&glob->lru_lock);
1777                         ttm_bo_wait_unreserved(bo, false);
1778                         kref_put(&bo->list_kref, ttm_bo_release_list);
1779                         spin_lock(&glob->lru_lock);
1780                 }
1781         }
1782
1783         BUG_ON(ret != 0);
1784         put_count = ttm_bo_del_from_lru(bo);
1785         spin_unlock(&glob->lru_lock);
1786
1787         ttm_bo_list_ref_sub(bo, put_count, true);
1788
1789         /**
1790          * Wait for GPU, then move to system cached.
1791          */
1792
1793         spin_lock(&bo->lock);
1794         ret = ttm_bo_wait(bo, false, false, false);
1795         spin_unlock(&bo->lock);
1796
1797         if (unlikely(ret != 0))
1798                 goto out;
1799
1800         if ((bo->mem.placement & swap_placement) != swap_placement) {
1801                 struct ttm_mem_reg evict_mem;
1802
1803                 evict_mem = bo->mem;
1804                 evict_mem.mm_node = NULL;
1805                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1806                 evict_mem.mem_type = TTM_PL_SYSTEM;
1807
1808                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1809                                              false, false, false);
1810                 if (unlikely(ret != 0))
1811                         goto out;
1812         }
1813
1814         ttm_bo_unmap_virtual(bo);
1815
1816         /**
1817          * Swap out. Buffer will be swapped in again as soon as
1818          * anyone tries to access a ttm page.
1819          */
1820
1821         if (bo->bdev->driver->swap_notify)
1822                 bo->bdev->driver->swap_notify(bo);
1823
1824         ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1825 out:
1826
1827         /**
1828          *
1829          * Unreserve without putting on LRU to avoid swapping out an
1830          * already swapped buffer.
1831          */
1832
1833         atomic_set(&bo->reserved, 0);
1834         wake_up_all(&bo->event_queue);
1835         kref_put(&bo->list_kref, ttm_bo_release_list);
1836         return ret;
1837 }
1838
1839 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1840 {
1841         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1842                 ;
1843 }
1844 EXPORT_SYMBOL(ttm_bo_swapout_all);