MIPS: O32 compat/N32: Fix to use compat syscall wrappers for AIO syscalls.
[pandora-kernel.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /* Notes:
31  *
32  * We store bo pointer in drm_mm_node struct so we know which bo own a
33  * specific node. There is no protection on the pointer, thus to make
34  * sure things don't go berserk you have to access this pointer while
35  * holding the global lru lock and make sure anytime you free a node you
36  * reset the pointer to NULL.
37  */
38
39 #include "ttm/ttm_module.h"
40 #include "ttm/ttm_bo_driver.h"
41 #include "ttm/ttm_placement.h"
42 #include <linux/jiffies.h>
43 #include <linux/slab.h>
44 #include <linux/sched.h>
45 #include <linux/mm.h>
46 #include <linux/file.h>
47 #include <linux/module.h>
48
49 #define TTM_ASSERT_LOCKED(param)
50 #define TTM_DEBUG(fmt, arg...)
51 #define TTM_BO_HASH_ORDER 13
52
53 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
54 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
55 static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57 static struct attribute ttm_bo_count = {
58         .name = "bo_count",
59         .mode = S_IRUGO
60 };
61
62 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63 {
64         int i;
65
66         for (i = 0; i <= TTM_PL_PRIV5; i++)
67                 if (flags & (1 << i)) {
68                         *mem_type = i;
69                         return 0;
70                 }
71         return -EINVAL;
72 }
73
74 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
75 {
76         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
78         printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
79         printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
80         printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
81         printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
82         printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
83         printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
84                 man->available_caching);
85         printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
86                 man->default_caching);
87         if (mem_type != TTM_PL_SYSTEM) {
88                 spin_lock(&bdev->glob->lru_lock);
89                 drm_mm_debug_table(&man->manager, TTM_PFX);
90                 spin_unlock(&bdev->glob->lru_lock);
91         }
92 }
93
94 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
95                                         struct ttm_placement *placement)
96 {
97         int i, ret, mem_type;
98
99         printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
100                 bo, bo->mem.num_pages, bo->mem.size >> 10,
101                 bo->mem.size >> 20);
102         for (i = 0; i < placement->num_placement; i++) {
103                 ret = ttm_mem_type_from_flags(placement->placement[i],
104                                                 &mem_type);
105                 if (ret)
106                         return;
107                 printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
108                         i, placement->placement[i], mem_type);
109                 ttm_mem_type_debug(bo->bdev, mem_type);
110         }
111 }
112
113 static ssize_t ttm_bo_global_show(struct kobject *kobj,
114                                   struct attribute *attr,
115                                   char *buffer)
116 {
117         struct ttm_bo_global *glob =
118                 container_of(kobj, struct ttm_bo_global, kobj);
119
120         return snprintf(buffer, PAGE_SIZE, "%lu\n",
121                         (unsigned long) atomic_read(&glob->bo_count));
122 }
123
124 static struct attribute *ttm_bo_global_attrs[] = {
125         &ttm_bo_count,
126         NULL
127 };
128
129 static const struct sysfs_ops ttm_bo_global_ops = {
130         .show = &ttm_bo_global_show
131 };
132
133 static struct kobj_type ttm_bo_glob_kobj_type  = {
134         .release = &ttm_bo_global_kobj_release,
135         .sysfs_ops = &ttm_bo_global_ops,
136         .default_attrs = ttm_bo_global_attrs
137 };
138
139
140 static inline uint32_t ttm_bo_type_flags(unsigned type)
141 {
142         return 1 << (type);
143 }
144
145 static void ttm_bo_release_list(struct kref *list_kref)
146 {
147         struct ttm_buffer_object *bo =
148             container_of(list_kref, struct ttm_buffer_object, list_kref);
149         struct ttm_bo_device *bdev = bo->bdev;
150
151         BUG_ON(atomic_read(&bo->list_kref.refcount));
152         BUG_ON(atomic_read(&bo->kref.refcount));
153         BUG_ON(atomic_read(&bo->cpu_writers));
154         BUG_ON(bo->sync_obj != NULL);
155         BUG_ON(bo->mem.mm_node != NULL);
156         BUG_ON(!list_empty(&bo->lru));
157         BUG_ON(!list_empty(&bo->ddestroy));
158
159         if (bo->ttm)
160                 ttm_tt_destroy(bo->ttm);
161         atomic_dec(&bo->glob->bo_count);
162         if (bo->destroy)
163                 bo->destroy(bo);
164         else {
165                 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
166                 kfree(bo);
167         }
168 }
169
170 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
171 {
172
173         if (interruptible) {
174                 int ret = 0;
175
176                 ret = wait_event_interruptible(bo->event_queue,
177                                                atomic_read(&bo->reserved) == 0);
178                 if (unlikely(ret != 0))
179                         return ret;
180         } else {
181                 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
182         }
183         return 0;
184 }
185 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
186
187 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
188 {
189         struct ttm_bo_device *bdev = bo->bdev;
190         struct ttm_mem_type_manager *man;
191
192         BUG_ON(!atomic_read(&bo->reserved));
193
194         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
195
196                 BUG_ON(!list_empty(&bo->lru));
197
198                 man = &bdev->man[bo->mem.mem_type];
199                 list_add_tail(&bo->lru, &man->lru);
200                 kref_get(&bo->list_kref);
201
202                 if (bo->ttm != NULL) {
203                         list_add_tail(&bo->swap, &bo->glob->swap_lru);
204                         kref_get(&bo->list_kref);
205                 }
206         }
207 }
208
209 /**
210  * Call with the lru_lock held.
211  */
212
213 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
214 {
215         int put_count = 0;
216
217         if (!list_empty(&bo->swap)) {
218                 list_del_init(&bo->swap);
219                 ++put_count;
220         }
221         if (!list_empty(&bo->lru)) {
222                 list_del_init(&bo->lru);
223                 ++put_count;
224         }
225
226         /*
227          * TODO: Add a driver hook to delete from
228          * driver-specific LRU's here.
229          */
230
231         return put_count;
232 }
233
234 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
235                           bool interruptible,
236                           bool no_wait, bool use_sequence, uint32_t sequence)
237 {
238         struct ttm_bo_global *glob = bo->glob;
239         int ret;
240
241         while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
242                 if (use_sequence && bo->seq_valid &&
243                         (sequence - bo->val_seq < (1 << 31))) {
244                         return -EAGAIN;
245                 }
246
247                 if (no_wait)
248                         return -EBUSY;
249
250                 spin_unlock(&glob->lru_lock);
251                 ret = ttm_bo_wait_unreserved(bo, interruptible);
252                 spin_lock(&glob->lru_lock);
253
254                 if (unlikely(ret))
255                         return ret;
256         }
257
258         if (use_sequence) {
259                 bo->val_seq = sequence;
260                 bo->seq_valid = true;
261         } else {
262                 bo->seq_valid = false;
263         }
264
265         return 0;
266 }
267 EXPORT_SYMBOL(ttm_bo_reserve);
268
269 static void ttm_bo_ref_bug(struct kref *list_kref)
270 {
271         BUG();
272 }
273
274 int ttm_bo_reserve(struct ttm_buffer_object *bo,
275                    bool interruptible,
276                    bool no_wait, bool use_sequence, uint32_t sequence)
277 {
278         struct ttm_bo_global *glob = bo->glob;
279         int put_count = 0;
280         int ret;
281
282         spin_lock(&glob->lru_lock);
283         ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
284                                     sequence);
285         if (likely(ret == 0))
286                 put_count = ttm_bo_del_from_lru(bo);
287         spin_unlock(&glob->lru_lock);
288
289         while (put_count--)
290                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
291
292         return ret;
293 }
294
295 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
296 {
297         struct ttm_bo_global *glob = bo->glob;
298
299         spin_lock(&glob->lru_lock);
300         ttm_bo_add_to_lru(bo);
301         atomic_set(&bo->reserved, 0);
302         wake_up_all(&bo->event_queue);
303         spin_unlock(&glob->lru_lock);
304 }
305 EXPORT_SYMBOL(ttm_bo_unreserve);
306
307 /*
308  * Call bo->mutex locked.
309  */
310 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
311 {
312         struct ttm_bo_device *bdev = bo->bdev;
313         struct ttm_bo_global *glob = bo->glob;
314         int ret = 0;
315         uint32_t page_flags = 0;
316
317         TTM_ASSERT_LOCKED(&bo->mutex);
318         bo->ttm = NULL;
319
320         if (bdev->need_dma32)
321                 page_flags |= TTM_PAGE_FLAG_DMA32;
322
323         switch (bo->type) {
324         case ttm_bo_type_device:
325                 if (zero_alloc)
326                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
327         case ttm_bo_type_kernel:
328                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
329                                         page_flags, glob->dummy_read_page);
330                 if (unlikely(bo->ttm == NULL))
331                         ret = -ENOMEM;
332                 break;
333         case ttm_bo_type_user:
334                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
335                                         page_flags | TTM_PAGE_FLAG_USER,
336                                         glob->dummy_read_page);
337                 if (unlikely(bo->ttm == NULL)) {
338                         ret = -ENOMEM;
339                         break;
340                 }
341
342                 ret = ttm_tt_set_user(bo->ttm, current,
343                                       bo->buffer_start, bo->num_pages);
344                 if (unlikely(ret != 0))
345                         ttm_tt_destroy(bo->ttm);
346                 break;
347         default:
348                 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
349                 ret = -EINVAL;
350                 break;
351         }
352
353         return ret;
354 }
355
356 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
357                                   struct ttm_mem_reg *mem,
358                                   bool evict, bool interruptible,
359                                   bool no_wait_reserve, bool no_wait_gpu)
360 {
361         struct ttm_bo_device *bdev = bo->bdev;
362         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
363         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
364         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
365         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
366         int ret = 0;
367
368         if (old_is_pci || new_is_pci ||
369             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
370                 ttm_bo_unmap_virtual(bo);
371
372         /*
373          * Create and bind a ttm if required.
374          */
375
376         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
377                 ret = ttm_bo_add_ttm(bo, false);
378                 if (ret)
379                         goto out_err;
380
381                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
382                 if (ret)
383                         goto out_err;
384
385                 if (mem->mem_type != TTM_PL_SYSTEM) {
386                         ret = ttm_tt_bind(bo->ttm, mem);
387                         if (ret)
388                                 goto out_err;
389                 }
390
391                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
392                         bo->mem = *mem;
393                         mem->mm_node = NULL;
394                         goto moved;
395                 }
396
397         }
398
399         if (bdev->driver->move_notify)
400                 bdev->driver->move_notify(bo, mem);
401
402         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
403             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
404                 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
405         else if (bdev->driver->move)
406                 ret = bdev->driver->move(bo, evict, interruptible,
407                                          no_wait_reserve, no_wait_gpu, mem);
408         else
409                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
410
411         if (ret)
412                 goto out_err;
413
414 moved:
415         if (bo->evicted) {
416                 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
417                 if (ret)
418                         printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
419                 bo->evicted = false;
420         }
421
422         if (bo->mem.mm_node) {
423                 spin_lock(&bo->lock);
424                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
425                     bdev->man[bo->mem.mem_type].gpu_offset;
426                 bo->cur_placement = bo->mem.placement;
427                 spin_unlock(&bo->lock);
428         } else
429                 bo->offset = 0;
430
431         return 0;
432
433 out_err:
434         new_man = &bdev->man[bo->mem.mem_type];
435         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
436                 ttm_tt_unbind(bo->ttm);
437                 ttm_tt_destroy(bo->ttm);
438                 bo->ttm = NULL;
439         }
440
441         return ret;
442 }
443
444 /**
445  * If bo idle, remove from delayed- and lru lists, and unref.
446  * If not idle, and already on delayed list, do nothing.
447  * If not idle, and not on delayed list, put on delayed list,
448  *   up the list_kref and schedule a delayed list check.
449  */
450
451 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
452 {
453         struct ttm_bo_device *bdev = bo->bdev;
454         struct ttm_bo_global *glob = bo->glob;
455         struct ttm_bo_driver *driver = bdev->driver;
456         int ret;
457
458         spin_lock(&bo->lock);
459         (void) ttm_bo_wait(bo, false, false, !remove_all);
460
461         if (!bo->sync_obj) {
462                 int put_count;
463
464                 spin_unlock(&bo->lock);
465
466                 spin_lock(&glob->lru_lock);
467                 put_count = ttm_bo_del_from_lru(bo);
468
469                 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
470                 BUG_ON(ret);
471                 if (bo->ttm)
472                         ttm_tt_unbind(bo->ttm);
473
474                 if (!list_empty(&bo->ddestroy)) {
475                         list_del_init(&bo->ddestroy);
476                         ++put_count;
477                 }
478                 if (bo->mem.mm_node) {
479                         drm_mm_put_block(bo->mem.mm_node);
480                         bo->mem.mm_node = NULL;
481                 }
482                 spin_unlock(&glob->lru_lock);
483
484                 atomic_set(&bo->reserved, 0);
485
486                 while (put_count--)
487                         kref_put(&bo->list_kref, ttm_bo_ref_bug);
488
489                 return 0;
490         }
491
492         spin_lock(&glob->lru_lock);
493         if (list_empty(&bo->ddestroy)) {
494                 void *sync_obj = bo->sync_obj;
495                 void *sync_obj_arg = bo->sync_obj_arg;
496
497                 kref_get(&bo->list_kref);
498                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
499                 spin_unlock(&glob->lru_lock);
500                 spin_unlock(&bo->lock);
501
502                 if (sync_obj)
503                         driver->sync_obj_flush(sync_obj, sync_obj_arg);
504                 schedule_delayed_work(&bdev->wq,
505                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
506                 ret = 0;
507
508         } else {
509                 spin_unlock(&glob->lru_lock);
510                 spin_unlock(&bo->lock);
511                 ret = -EBUSY;
512         }
513
514         return ret;
515 }
516
517 /**
518  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
519  * encountered buffers.
520  */
521
522 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
523 {
524         struct ttm_bo_global *glob = bdev->glob;
525         struct ttm_buffer_object *entry = NULL;
526         int ret = 0;
527
528         spin_lock(&glob->lru_lock);
529         if (list_empty(&bdev->ddestroy))
530                 goto out_unlock;
531
532         entry = list_first_entry(&bdev->ddestroy,
533                 struct ttm_buffer_object, ddestroy);
534         kref_get(&entry->list_kref);
535
536         for (;;) {
537                 struct ttm_buffer_object *nentry = NULL;
538
539                 if (entry->ddestroy.next != &bdev->ddestroy) {
540                         nentry = list_first_entry(&entry->ddestroy,
541                                 struct ttm_buffer_object, ddestroy);
542                         kref_get(&nentry->list_kref);
543                 }
544
545                 spin_unlock(&glob->lru_lock);
546                 ret = ttm_bo_cleanup_refs(entry, remove_all);
547                 kref_put(&entry->list_kref, ttm_bo_release_list);
548                 entry = nentry;
549
550                 if (ret || !entry)
551                         goto out;
552
553                 spin_lock(&glob->lru_lock);
554                 if (list_empty(&entry->ddestroy))
555                         break;
556         }
557
558 out_unlock:
559         spin_unlock(&glob->lru_lock);
560 out:
561         if (entry)
562                 kref_put(&entry->list_kref, ttm_bo_release_list);
563         return ret;
564 }
565
566 static void ttm_bo_delayed_workqueue(struct work_struct *work)
567 {
568         struct ttm_bo_device *bdev =
569             container_of(work, struct ttm_bo_device, wq.work);
570
571         if (ttm_bo_delayed_delete(bdev, false)) {
572                 schedule_delayed_work(&bdev->wq,
573                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
574         }
575 }
576
577 static void ttm_bo_release(struct kref *kref)
578 {
579         struct ttm_buffer_object *bo =
580             container_of(kref, struct ttm_buffer_object, kref);
581         struct ttm_bo_device *bdev = bo->bdev;
582
583         if (likely(bo->vm_node != NULL)) {
584                 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
585                 drm_mm_put_block(bo->vm_node);
586                 bo->vm_node = NULL;
587         }
588         write_unlock(&bdev->vm_lock);
589         ttm_bo_cleanup_refs(bo, false);
590         kref_put(&bo->list_kref, ttm_bo_release_list);
591         write_lock(&bdev->vm_lock);
592 }
593
594 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
595 {
596         struct ttm_buffer_object *bo = *p_bo;
597         struct ttm_bo_device *bdev = bo->bdev;
598
599         *p_bo = NULL;
600         write_lock(&bdev->vm_lock);
601         kref_put(&bo->kref, ttm_bo_release);
602         write_unlock(&bdev->vm_lock);
603 }
604 EXPORT_SYMBOL(ttm_bo_unref);
605
606 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
607 {
608         return cancel_delayed_work_sync(&bdev->wq);
609 }
610 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
611
612 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
613 {
614         if (resched)
615                 schedule_delayed_work(&bdev->wq,
616                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
617 }
618 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
619
620 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
621                         bool no_wait_reserve, bool no_wait_gpu)
622 {
623         struct ttm_bo_device *bdev = bo->bdev;
624         struct ttm_bo_global *glob = bo->glob;
625         struct ttm_mem_reg evict_mem;
626         struct ttm_placement placement;
627         int ret = 0;
628
629         spin_lock(&bo->lock);
630         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
631         spin_unlock(&bo->lock);
632
633         if (unlikely(ret != 0)) {
634                 if (ret != -ERESTARTSYS) {
635                         printk(KERN_ERR TTM_PFX
636                                "Failed to expire sync object before "
637                                "buffer eviction.\n");
638                 }
639                 goto out;
640         }
641
642         BUG_ON(!atomic_read(&bo->reserved));
643
644         evict_mem = bo->mem;
645         evict_mem.mm_node = NULL;
646         evict_mem.bus.io_reserved = false;
647
648         placement.fpfn = 0;
649         placement.lpfn = 0;
650         placement.num_placement = 0;
651         placement.num_busy_placement = 0;
652         bdev->driver->evict_flags(bo, &placement);
653         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
654                                 no_wait_reserve, no_wait_gpu);
655         if (ret) {
656                 if (ret != -ERESTARTSYS) {
657                         printk(KERN_ERR TTM_PFX
658                                "Failed to find memory space for "
659                                "buffer 0x%p eviction.\n", bo);
660                         ttm_bo_mem_space_debug(bo, &placement);
661                 }
662                 goto out;
663         }
664
665         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
666                                      no_wait_reserve, no_wait_gpu);
667         if (ret) {
668                 if (ret != -ERESTARTSYS)
669                         printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
670                 spin_lock(&glob->lru_lock);
671                 if (evict_mem.mm_node) {
672                         drm_mm_put_block(evict_mem.mm_node);
673                         evict_mem.mm_node = NULL;
674                 }
675                 spin_unlock(&glob->lru_lock);
676                 goto out;
677         }
678         bo->evicted = true;
679 out:
680         return ret;
681 }
682
683 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
684                                 uint32_t mem_type,
685                                 bool interruptible, bool no_wait_reserve,
686                                 bool no_wait_gpu)
687 {
688         struct ttm_bo_global *glob = bdev->glob;
689         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
690         struct ttm_buffer_object *bo;
691         int ret, put_count = 0;
692
693 retry:
694         spin_lock(&glob->lru_lock);
695         if (list_empty(&man->lru)) {
696                 spin_unlock(&glob->lru_lock);
697                 return -EBUSY;
698         }
699
700         bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
701         kref_get(&bo->list_kref);
702
703         ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
704
705         if (unlikely(ret == -EBUSY)) {
706                 spin_unlock(&glob->lru_lock);
707                 if (likely(!no_wait_gpu))
708                         ret = ttm_bo_wait_unreserved(bo, interruptible);
709
710                 kref_put(&bo->list_kref, ttm_bo_release_list);
711
712                 /**
713                  * We *need* to retry after releasing the lru lock.
714                  */
715
716                 if (unlikely(ret != 0))
717                         return ret;
718                 goto retry;
719         }
720
721         put_count = ttm_bo_del_from_lru(bo);
722         spin_unlock(&glob->lru_lock);
723
724         BUG_ON(ret != 0);
725
726         while (put_count--)
727                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
728
729         ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
730         ttm_bo_unreserve(bo);
731
732         kref_put(&bo->list_kref, ttm_bo_release_list);
733         return ret;
734 }
735
736 static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
737                                 struct ttm_mem_type_manager *man,
738                                 struct ttm_placement *placement,
739                                 struct ttm_mem_reg *mem,
740                                 struct drm_mm_node **node)
741 {
742         struct ttm_bo_global *glob = bo->glob;
743         unsigned long lpfn;
744         int ret;
745
746         lpfn = placement->lpfn;
747         if (!lpfn)
748                 lpfn = man->size;
749         *node = NULL;
750         do {
751                 ret = drm_mm_pre_get(&man->manager);
752                 if (unlikely(ret))
753                         return ret;
754
755                 spin_lock(&glob->lru_lock);
756                 *node = drm_mm_search_free_in_range(&man->manager,
757                                         mem->num_pages, mem->page_alignment,
758                                         placement->fpfn, lpfn, 1);
759                 if (unlikely(*node == NULL)) {
760                         spin_unlock(&glob->lru_lock);
761                         return 0;
762                 }
763                 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
764                                                         mem->page_alignment,
765                                                         placement->fpfn,
766                                                         lpfn);
767                 spin_unlock(&glob->lru_lock);
768         } while (*node == NULL);
769         return 0;
770 }
771
772 /**
773  * Repeatedly evict memory from the LRU for @mem_type until we create enough
774  * space, or we've evicted everything and there isn't enough space.
775  */
776 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
777                                         uint32_t mem_type,
778                                         struct ttm_placement *placement,
779                                         struct ttm_mem_reg *mem,
780                                         bool interruptible,
781                                         bool no_wait_reserve,
782                                         bool no_wait_gpu)
783 {
784         struct ttm_bo_device *bdev = bo->bdev;
785         struct ttm_bo_global *glob = bdev->glob;
786         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
787         struct drm_mm_node *node;
788         int ret;
789
790         do {
791                 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
792                 if (unlikely(ret != 0))
793                         return ret;
794                 if (node)
795                         break;
796                 spin_lock(&glob->lru_lock);
797                 if (list_empty(&man->lru)) {
798                         spin_unlock(&glob->lru_lock);
799                         break;
800                 }
801                 spin_unlock(&glob->lru_lock);
802                 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
803                                                 no_wait_reserve, no_wait_gpu);
804                 if (unlikely(ret != 0))
805                         return ret;
806         } while (1);
807         if (node == NULL)
808                 return -ENOMEM;
809         mem->mm_node = node;
810         mem->mem_type = mem_type;
811         return 0;
812 }
813
814 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
815                                       uint32_t cur_placement,
816                                       uint32_t proposed_placement)
817 {
818         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
819         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
820
821         /**
822          * Keep current caching if possible.
823          */
824
825         if ((cur_placement & caching) != 0)
826                 result |= (cur_placement & caching);
827         else if ((man->default_caching & caching) != 0)
828                 result |= man->default_caching;
829         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
830                 result |= TTM_PL_FLAG_CACHED;
831         else if ((TTM_PL_FLAG_WC & caching) != 0)
832                 result |= TTM_PL_FLAG_WC;
833         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
834                 result |= TTM_PL_FLAG_UNCACHED;
835
836         return result;
837 }
838
839 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
840                                  bool disallow_fixed,
841                                  uint32_t mem_type,
842                                  uint32_t proposed_placement,
843                                  uint32_t *masked_placement)
844 {
845         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
846
847         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
848                 return false;
849
850         if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
851                 return false;
852
853         if ((proposed_placement & man->available_caching) == 0)
854                 return false;
855
856         cur_flags |= (proposed_placement & man->available_caching);
857
858         *masked_placement = cur_flags;
859         return true;
860 }
861
862 /**
863  * Creates space for memory region @mem according to its type.
864  *
865  * This function first searches for free space in compatible memory types in
866  * the priority order defined by the driver.  If free space isn't found, then
867  * ttm_bo_mem_force_space is attempted in priority order to evict and find
868  * space.
869  */
870 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
871                         struct ttm_placement *placement,
872                         struct ttm_mem_reg *mem,
873                         bool interruptible, bool no_wait_reserve,
874                         bool no_wait_gpu)
875 {
876         struct ttm_bo_device *bdev = bo->bdev;
877         struct ttm_mem_type_manager *man;
878         uint32_t mem_type = TTM_PL_SYSTEM;
879         uint32_t cur_flags = 0;
880         bool type_found = false;
881         bool type_ok = false;
882         bool has_erestartsys = false;
883         struct drm_mm_node *node = NULL;
884         int i, ret;
885
886         mem->mm_node = NULL;
887         for (i = 0; i < placement->num_placement; ++i) {
888                 ret = ttm_mem_type_from_flags(placement->placement[i],
889                                                 &mem_type);
890                 if (ret)
891                         return ret;
892                 man = &bdev->man[mem_type];
893
894                 type_ok = ttm_bo_mt_compatible(man,
895                                                 bo->type == ttm_bo_type_user,
896                                                 mem_type,
897                                                 placement->placement[i],
898                                                 &cur_flags);
899
900                 if (!type_ok)
901                         continue;
902
903                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
904                                                   cur_flags);
905                 /*
906                  * Use the access and other non-mapping-related flag bits from
907                  * the memory placement flags to the current flags
908                  */
909                 ttm_flag_masked(&cur_flags, placement->placement[i],
910                                 ~TTM_PL_MASK_MEMTYPE);
911
912                 if (mem_type == TTM_PL_SYSTEM)
913                         break;
914
915                 if (man->has_type && man->use_type) {
916                         type_found = true;
917                         ret = ttm_bo_man_get_node(bo, man, placement, mem,
918                                                         &node);
919                         if (unlikely(ret))
920                                 return ret;
921                 }
922                 if (node)
923                         break;
924         }
925
926         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
927                 mem->mm_node = node;
928                 mem->mem_type = mem_type;
929                 mem->placement = cur_flags;
930                 return 0;
931         }
932
933         if (!type_found)
934                 return -EINVAL;
935
936         for (i = 0; i < placement->num_busy_placement; ++i) {
937                 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
938                                                 &mem_type);
939                 if (ret)
940                         return ret;
941                 man = &bdev->man[mem_type];
942                 if (!man->has_type)
943                         continue;
944                 if (!ttm_bo_mt_compatible(man,
945                                                 bo->type == ttm_bo_type_user,
946                                                 mem_type,
947                                                 placement->busy_placement[i],
948                                                 &cur_flags))
949                         continue;
950
951                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
952                                                   cur_flags);
953                 /*
954                  * Use the access and other non-mapping-related flag bits from
955                  * the memory placement flags to the current flags
956                  */
957                 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
958                                 ~TTM_PL_MASK_MEMTYPE);
959
960
961                 if (mem_type == TTM_PL_SYSTEM) {
962                         mem->mem_type = mem_type;
963                         mem->placement = cur_flags;
964                         mem->mm_node = NULL;
965                         return 0;
966                 }
967
968                 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
969                                                 interruptible, no_wait_reserve, no_wait_gpu);
970                 if (ret == 0 && mem->mm_node) {
971                         mem->placement = cur_flags;
972                         return 0;
973                 }
974                 if (ret == -ERESTARTSYS)
975                         has_erestartsys = true;
976         }
977         ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
978         return ret;
979 }
980 EXPORT_SYMBOL(ttm_bo_mem_space);
981
982 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
983 {
984         if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
985                 return -EBUSY;
986
987         return wait_event_interruptible(bo->event_queue,
988                                         atomic_read(&bo->cpu_writers) == 0);
989 }
990 EXPORT_SYMBOL(ttm_bo_wait_cpu);
991
992 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
993                         struct ttm_placement *placement,
994                         bool interruptible, bool no_wait_reserve,
995                         bool no_wait_gpu)
996 {
997         struct ttm_bo_global *glob = bo->glob;
998         int ret = 0;
999         struct ttm_mem_reg mem;
1000
1001         BUG_ON(!atomic_read(&bo->reserved));
1002
1003         /*
1004          * FIXME: It's possible to pipeline buffer moves.
1005          * Have the driver move function wait for idle when necessary,
1006          * instead of doing it here.
1007          */
1008         spin_lock(&bo->lock);
1009         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1010         spin_unlock(&bo->lock);
1011         if (ret)
1012                 return ret;
1013         mem.num_pages = bo->num_pages;
1014         mem.size = mem.num_pages << PAGE_SHIFT;
1015         mem.page_alignment = bo->mem.page_alignment;
1016         mem.bus.io_reserved = false;
1017         /*
1018          * Determine where to move the buffer.
1019          */
1020         ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1021         if (ret)
1022                 goto out_unlock;
1023         ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1024 out_unlock:
1025         if (ret && mem.mm_node) {
1026                 spin_lock(&glob->lru_lock);
1027                 drm_mm_put_block(mem.mm_node);
1028                 spin_unlock(&glob->lru_lock);
1029         }
1030         return ret;
1031 }
1032
1033 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1034                              struct ttm_mem_reg *mem)
1035 {
1036         int i;
1037         struct drm_mm_node *node = mem->mm_node;
1038
1039         if (node && placement->lpfn != 0 &&
1040             (node->start < placement->fpfn ||
1041              node->start + node->size > placement->lpfn))
1042                 return -1;
1043
1044         for (i = 0; i < placement->num_placement; i++) {
1045                 if ((placement->placement[i] & mem->placement &
1046                         TTM_PL_MASK_CACHING) &&
1047                         (placement->placement[i] & mem->placement &
1048                         TTM_PL_MASK_MEM))
1049                         return i;
1050         }
1051         return -1;
1052 }
1053
1054 int ttm_bo_validate(struct ttm_buffer_object *bo,
1055                         struct ttm_placement *placement,
1056                         bool interruptible, bool no_wait_reserve,
1057                         bool no_wait_gpu)
1058 {
1059         int ret;
1060
1061         BUG_ON(!atomic_read(&bo->reserved));
1062         /* Check that range is valid */
1063         if (placement->lpfn || placement->fpfn)
1064                 if (placement->fpfn > placement->lpfn ||
1065                         (placement->lpfn - placement->fpfn) < bo->num_pages)
1066                         return -EINVAL;
1067         /*
1068          * Check whether we need to move buffer.
1069          */
1070         ret = ttm_bo_mem_compat(placement, &bo->mem);
1071         if (ret < 0) {
1072                 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1073                 if (ret)
1074                         return ret;
1075         } else {
1076                 /*
1077                  * Use the access and other non-mapping-related flag bits from
1078                  * the compatible memory placement flags to the active flags
1079                  */
1080                 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1081                                 ~TTM_PL_MASK_MEMTYPE);
1082         }
1083         /*
1084          * We might need to add a TTM.
1085          */
1086         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1087                 ret = ttm_bo_add_ttm(bo, true);
1088                 if (ret)
1089                         return ret;
1090         }
1091         return 0;
1092 }
1093 EXPORT_SYMBOL(ttm_bo_validate);
1094
1095 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1096                                 struct ttm_placement *placement)
1097 {
1098         int i;
1099
1100         if (placement->fpfn || placement->lpfn) {
1101                 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1102                         printk(KERN_ERR TTM_PFX "Page number range to small "
1103                                 "Need %lu pages, range is [%u, %u]\n",
1104                                 bo->mem.num_pages, placement->fpfn,
1105                                 placement->lpfn);
1106                         return -EINVAL;
1107                 }
1108         }
1109         for (i = 0; i < placement->num_placement; i++) {
1110                 if (!capable(CAP_SYS_ADMIN)) {
1111                         if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1112                                 printk(KERN_ERR TTM_PFX "Need to be root to "
1113                                         "modify NO_EVICT status.\n");
1114                                 return -EINVAL;
1115                         }
1116                 }
1117         }
1118         for (i = 0; i < placement->num_busy_placement; i++) {
1119                 if (!capable(CAP_SYS_ADMIN)) {
1120                         if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1121                                 printk(KERN_ERR TTM_PFX "Need to be root to "
1122                                         "modify NO_EVICT status.\n");
1123                                 return -EINVAL;
1124                         }
1125                 }
1126         }
1127         return 0;
1128 }
1129
1130 int ttm_bo_init(struct ttm_bo_device *bdev,
1131                 struct ttm_buffer_object *bo,
1132                 unsigned long size,
1133                 enum ttm_bo_type type,
1134                 struct ttm_placement *placement,
1135                 uint32_t page_alignment,
1136                 unsigned long buffer_start,
1137                 bool interruptible,
1138                 struct file *persistant_swap_storage,
1139                 size_t acc_size,
1140                 void (*destroy) (struct ttm_buffer_object *))
1141 {
1142         int ret = 0;
1143         unsigned long num_pages;
1144
1145         size += buffer_start & ~PAGE_MASK;
1146         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1147         if (num_pages == 0) {
1148                 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1149                 return -EINVAL;
1150         }
1151         bo->destroy = destroy;
1152
1153         spin_lock_init(&bo->lock);
1154         kref_init(&bo->kref);
1155         kref_init(&bo->list_kref);
1156         atomic_set(&bo->cpu_writers, 0);
1157         atomic_set(&bo->reserved, 1);
1158         init_waitqueue_head(&bo->event_queue);
1159         INIT_LIST_HEAD(&bo->lru);
1160         INIT_LIST_HEAD(&bo->ddestroy);
1161         INIT_LIST_HEAD(&bo->swap);
1162         bo->bdev = bdev;
1163         bo->glob = bdev->glob;
1164         bo->type = type;
1165         bo->num_pages = num_pages;
1166         bo->mem.size = num_pages << PAGE_SHIFT;
1167         bo->mem.mem_type = TTM_PL_SYSTEM;
1168         bo->mem.num_pages = bo->num_pages;
1169         bo->mem.mm_node = NULL;
1170         bo->mem.page_alignment = page_alignment;
1171         bo->mem.bus.io_reserved = false;
1172         bo->buffer_start = buffer_start & PAGE_MASK;
1173         bo->priv_flags = 0;
1174         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1175         bo->seq_valid = false;
1176         bo->persistant_swap_storage = persistant_swap_storage;
1177         bo->acc_size = acc_size;
1178         atomic_inc(&bo->glob->bo_count);
1179
1180         ret = ttm_bo_check_placement(bo, placement);
1181         if (unlikely(ret != 0))
1182                 goto out_err;
1183
1184         /*
1185          * For ttm_bo_type_device buffers, allocate
1186          * address space from the device.
1187          */
1188         if (bo->type == ttm_bo_type_device) {
1189                 ret = ttm_bo_setup_vm(bo);
1190                 if (ret)
1191                         goto out_err;
1192         }
1193
1194         ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1195         if (ret)
1196                 goto out_err;
1197
1198         ttm_bo_unreserve(bo);
1199         return 0;
1200
1201 out_err:
1202         ttm_bo_unreserve(bo);
1203         ttm_bo_unref(&bo);
1204
1205         return ret;
1206 }
1207 EXPORT_SYMBOL(ttm_bo_init);
1208
1209 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1210                                  unsigned long num_pages)
1211 {
1212         size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1213             PAGE_MASK;
1214
1215         return glob->ttm_bo_size + 2 * page_array_size;
1216 }
1217
1218 int ttm_bo_create(struct ttm_bo_device *bdev,
1219                         unsigned long size,
1220                         enum ttm_bo_type type,
1221                         struct ttm_placement *placement,
1222                         uint32_t page_alignment,
1223                         unsigned long buffer_start,
1224                         bool interruptible,
1225                         struct file *persistant_swap_storage,
1226                         struct ttm_buffer_object **p_bo)
1227 {
1228         struct ttm_buffer_object *bo;
1229         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1230         int ret;
1231
1232         size_t acc_size =
1233             ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1234         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1235         if (unlikely(ret != 0))
1236                 return ret;
1237
1238         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1239
1240         if (unlikely(bo == NULL)) {
1241                 ttm_mem_global_free(mem_glob, acc_size);
1242                 return -ENOMEM;
1243         }
1244
1245         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1246                                 buffer_start, interruptible,
1247                                 persistant_swap_storage, acc_size, NULL);
1248         if (likely(ret == 0))
1249                 *p_bo = bo;
1250
1251         return ret;
1252 }
1253
1254 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1255                                         unsigned mem_type, bool allow_errors)
1256 {
1257         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1258         struct ttm_bo_global *glob = bdev->glob;
1259         int ret;
1260
1261         /*
1262          * Can't use standard list traversal since we're unlocking.
1263          */
1264
1265         spin_lock(&glob->lru_lock);
1266         while (!list_empty(&man->lru)) {
1267                 spin_unlock(&glob->lru_lock);
1268                 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1269                 if (ret) {
1270                         if (allow_errors) {
1271                                 return ret;
1272                         } else {
1273                                 printk(KERN_ERR TTM_PFX
1274                                         "Cleanup eviction failed\n");
1275                         }
1276                 }
1277                 spin_lock(&glob->lru_lock);
1278         }
1279         spin_unlock(&glob->lru_lock);
1280         return 0;
1281 }
1282
1283 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1284 {
1285         struct ttm_bo_global *glob = bdev->glob;
1286         struct ttm_mem_type_manager *man;
1287         int ret = -EINVAL;
1288
1289         if (mem_type >= TTM_NUM_MEM_TYPES) {
1290                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1291                 return ret;
1292         }
1293         man = &bdev->man[mem_type];
1294
1295         if (!man->has_type) {
1296                 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1297                        "memory manager type %u\n", mem_type);
1298                 return ret;
1299         }
1300
1301         man->use_type = false;
1302         man->has_type = false;
1303
1304         ret = 0;
1305         if (mem_type > 0) {
1306                 ttm_bo_force_list_clean(bdev, mem_type, false);
1307
1308                 spin_lock(&glob->lru_lock);
1309                 if (drm_mm_clean(&man->manager))
1310                         drm_mm_takedown(&man->manager);
1311                 else
1312                         ret = -EBUSY;
1313
1314                 spin_unlock(&glob->lru_lock);
1315         }
1316
1317         return ret;
1318 }
1319 EXPORT_SYMBOL(ttm_bo_clean_mm);
1320
1321 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1322 {
1323         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1324
1325         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1326                 printk(KERN_ERR TTM_PFX
1327                        "Illegal memory manager memory type %u.\n",
1328                        mem_type);
1329                 return -EINVAL;
1330         }
1331
1332         if (!man->has_type) {
1333                 printk(KERN_ERR TTM_PFX
1334                        "Memory type %u has not been initialized.\n",
1335                        mem_type);
1336                 return 0;
1337         }
1338
1339         return ttm_bo_force_list_clean(bdev, mem_type, true);
1340 }
1341 EXPORT_SYMBOL(ttm_bo_evict_mm);
1342
1343 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1344                         unsigned long p_size)
1345 {
1346         int ret = -EINVAL;
1347         struct ttm_mem_type_manager *man;
1348
1349         if (type >= TTM_NUM_MEM_TYPES) {
1350                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1351                 return ret;
1352         }
1353
1354         man = &bdev->man[type];
1355         if (man->has_type) {
1356                 printk(KERN_ERR TTM_PFX
1357                        "Memory manager already initialized for type %d\n",
1358                        type);
1359                 return ret;
1360         }
1361
1362         ret = bdev->driver->init_mem_type(bdev, type, man);
1363         if (ret)
1364                 return ret;
1365
1366         ret = 0;
1367         if (type != TTM_PL_SYSTEM) {
1368                 if (!p_size) {
1369                         printk(KERN_ERR TTM_PFX
1370                                "Zero size memory manager type %d\n",
1371                                type);
1372                         return ret;
1373                 }
1374                 ret = drm_mm_init(&man->manager, 0, p_size);
1375                 if (ret)
1376                         return ret;
1377         }
1378         man->has_type = true;
1379         man->use_type = true;
1380         man->size = p_size;
1381
1382         INIT_LIST_HEAD(&man->lru);
1383
1384         return 0;
1385 }
1386 EXPORT_SYMBOL(ttm_bo_init_mm);
1387
1388 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1389 {
1390         struct ttm_bo_global *glob =
1391                 container_of(kobj, struct ttm_bo_global, kobj);
1392
1393         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1394         __free_page(glob->dummy_read_page);
1395         kfree(glob);
1396 }
1397
1398 void ttm_bo_global_release(struct drm_global_reference *ref)
1399 {
1400         struct ttm_bo_global *glob = ref->object;
1401
1402         kobject_del(&glob->kobj);
1403         kobject_put(&glob->kobj);
1404 }
1405 EXPORT_SYMBOL(ttm_bo_global_release);
1406
1407 int ttm_bo_global_init(struct drm_global_reference *ref)
1408 {
1409         struct ttm_bo_global_ref *bo_ref =
1410                 container_of(ref, struct ttm_bo_global_ref, ref);
1411         struct ttm_bo_global *glob = ref->object;
1412         int ret;
1413
1414         mutex_init(&glob->device_list_mutex);
1415         spin_lock_init(&glob->lru_lock);
1416         glob->mem_glob = bo_ref->mem_glob;
1417         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1418
1419         if (unlikely(glob->dummy_read_page == NULL)) {
1420                 ret = -ENOMEM;
1421                 goto out_no_drp;
1422         }
1423
1424         INIT_LIST_HEAD(&glob->swap_lru);
1425         INIT_LIST_HEAD(&glob->device_list);
1426
1427         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1428         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1429         if (unlikely(ret != 0)) {
1430                 printk(KERN_ERR TTM_PFX
1431                        "Could not register buffer object swapout.\n");
1432                 goto out_no_shrink;
1433         }
1434
1435         glob->ttm_bo_extra_size =
1436                 ttm_round_pot(sizeof(struct ttm_tt)) +
1437                 ttm_round_pot(sizeof(struct ttm_backend));
1438
1439         glob->ttm_bo_size = glob->ttm_bo_extra_size +
1440                 ttm_round_pot(sizeof(struct ttm_buffer_object));
1441
1442         atomic_set(&glob->bo_count, 0);
1443
1444         ret = kobject_init_and_add(
1445                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1446         if (unlikely(ret != 0))
1447                 kobject_put(&glob->kobj);
1448         return ret;
1449 out_no_shrink:
1450         __free_page(glob->dummy_read_page);
1451 out_no_drp:
1452         kfree(glob);
1453         return ret;
1454 }
1455 EXPORT_SYMBOL(ttm_bo_global_init);
1456
1457
1458 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1459 {
1460         int ret = 0;
1461         unsigned i = TTM_NUM_MEM_TYPES;
1462         struct ttm_mem_type_manager *man;
1463         struct ttm_bo_global *glob = bdev->glob;
1464
1465         while (i--) {
1466                 man = &bdev->man[i];
1467                 if (man->has_type) {
1468                         man->use_type = false;
1469                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1470                                 ret = -EBUSY;
1471                                 printk(KERN_ERR TTM_PFX
1472                                        "DRM memory manager type %d "
1473                                        "is not clean.\n", i);
1474                         }
1475                         man->has_type = false;
1476                 }
1477         }
1478
1479         mutex_lock(&glob->device_list_mutex);
1480         list_del(&bdev->device_list);
1481         mutex_unlock(&glob->device_list_mutex);
1482
1483         if (!cancel_delayed_work(&bdev->wq))
1484                 flush_scheduled_work();
1485
1486         while (ttm_bo_delayed_delete(bdev, true))
1487                 ;
1488
1489         spin_lock(&glob->lru_lock);
1490         if (list_empty(&bdev->ddestroy))
1491                 TTM_DEBUG("Delayed destroy list was clean\n");
1492
1493         if (list_empty(&bdev->man[0].lru))
1494                 TTM_DEBUG("Swap list was clean\n");
1495         spin_unlock(&glob->lru_lock);
1496
1497         BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1498         write_lock(&bdev->vm_lock);
1499         drm_mm_takedown(&bdev->addr_space_mm);
1500         write_unlock(&bdev->vm_lock);
1501
1502         return ret;
1503 }
1504 EXPORT_SYMBOL(ttm_bo_device_release);
1505
1506 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1507                        struct ttm_bo_global *glob,
1508                        struct ttm_bo_driver *driver,
1509                        uint64_t file_page_offset,
1510                        bool need_dma32)
1511 {
1512         int ret = -EINVAL;
1513
1514         rwlock_init(&bdev->vm_lock);
1515         bdev->driver = driver;
1516
1517         memset(bdev->man, 0, sizeof(bdev->man));
1518
1519         /*
1520          * Initialize the system memory buffer type.
1521          * Other types need to be driver / IOCTL initialized.
1522          */
1523         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1524         if (unlikely(ret != 0))
1525                 goto out_no_sys;
1526
1527         bdev->addr_space_rb = RB_ROOT;
1528         ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1529         if (unlikely(ret != 0))
1530                 goto out_no_addr_mm;
1531
1532         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1533         bdev->nice_mode = true;
1534         INIT_LIST_HEAD(&bdev->ddestroy);
1535         bdev->dev_mapping = NULL;
1536         bdev->glob = glob;
1537         bdev->need_dma32 = need_dma32;
1538
1539         mutex_lock(&glob->device_list_mutex);
1540         list_add_tail(&bdev->device_list, &glob->device_list);
1541         mutex_unlock(&glob->device_list_mutex);
1542
1543         return 0;
1544 out_no_addr_mm:
1545         ttm_bo_clean_mm(bdev, 0);
1546 out_no_sys:
1547         return ret;
1548 }
1549 EXPORT_SYMBOL(ttm_bo_device_init);
1550
1551 /*
1552  * buffer object vm functions.
1553  */
1554
1555 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1556 {
1557         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1558
1559         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1560                 if (mem->mem_type == TTM_PL_SYSTEM)
1561                         return false;
1562
1563                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1564                         return false;
1565
1566                 if (mem->placement & TTM_PL_FLAG_CACHED)
1567                         return false;
1568         }
1569         return true;
1570 }
1571
1572 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1573 {
1574         struct ttm_bo_device *bdev = bo->bdev;
1575         loff_t offset = (loff_t) bo->addr_space_offset;
1576         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1577
1578         if (!bdev->dev_mapping)
1579                 return;
1580         unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1581         ttm_mem_io_free(bdev, &bo->mem);
1582 }
1583 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1584
1585 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1586 {
1587         struct ttm_bo_device *bdev = bo->bdev;
1588         struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1589         struct rb_node *parent = NULL;
1590         struct ttm_buffer_object *cur_bo;
1591         unsigned long offset = bo->vm_node->start;
1592         unsigned long cur_offset;
1593
1594         while (*cur) {
1595                 parent = *cur;
1596                 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1597                 cur_offset = cur_bo->vm_node->start;
1598                 if (offset < cur_offset)
1599                         cur = &parent->rb_left;
1600                 else if (offset > cur_offset)
1601                         cur = &parent->rb_right;
1602                 else
1603                         BUG();
1604         }
1605
1606         rb_link_node(&bo->vm_rb, parent, cur);
1607         rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1608 }
1609
1610 /**
1611  * ttm_bo_setup_vm:
1612  *
1613  * @bo: the buffer to allocate address space for
1614  *
1615  * Allocate address space in the drm device so that applications
1616  * can mmap the buffer and access the contents. This only
1617  * applies to ttm_bo_type_device objects as others are not
1618  * placed in the drm device address space.
1619  */
1620
1621 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1622 {
1623         struct ttm_bo_device *bdev = bo->bdev;
1624         int ret;
1625
1626 retry_pre_get:
1627         ret = drm_mm_pre_get(&bdev->addr_space_mm);
1628         if (unlikely(ret != 0))
1629                 return ret;
1630
1631         write_lock(&bdev->vm_lock);
1632         bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1633                                          bo->mem.num_pages, 0, 0);
1634
1635         if (unlikely(bo->vm_node == NULL)) {
1636                 ret = -ENOMEM;
1637                 goto out_unlock;
1638         }
1639
1640         bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1641                                               bo->mem.num_pages, 0);
1642
1643         if (unlikely(bo->vm_node == NULL)) {
1644                 write_unlock(&bdev->vm_lock);
1645                 goto retry_pre_get;
1646         }
1647
1648         ttm_bo_vm_insert_rb(bo);
1649         write_unlock(&bdev->vm_lock);
1650         bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1651
1652         return 0;
1653 out_unlock:
1654         write_unlock(&bdev->vm_lock);
1655         return ret;
1656 }
1657
1658 int ttm_bo_wait(struct ttm_buffer_object *bo,
1659                 bool lazy, bool interruptible, bool no_wait)
1660 {
1661         struct ttm_bo_driver *driver = bo->bdev->driver;
1662         void *sync_obj;
1663         void *sync_obj_arg;
1664         int ret = 0;
1665
1666         if (likely(bo->sync_obj == NULL))
1667                 return 0;
1668
1669         while (bo->sync_obj) {
1670
1671                 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1672                         void *tmp_obj = bo->sync_obj;
1673                         bo->sync_obj = NULL;
1674                         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1675                         spin_unlock(&bo->lock);
1676                         driver->sync_obj_unref(&tmp_obj);
1677                         spin_lock(&bo->lock);
1678                         continue;
1679                 }
1680
1681                 if (no_wait)
1682                         return -EBUSY;
1683
1684                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1685                 sync_obj_arg = bo->sync_obj_arg;
1686                 spin_unlock(&bo->lock);
1687                 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1688                                             lazy, interruptible);
1689                 if (unlikely(ret != 0)) {
1690                         driver->sync_obj_unref(&sync_obj);
1691                         spin_lock(&bo->lock);
1692                         return ret;
1693                 }
1694                 spin_lock(&bo->lock);
1695                 if (likely(bo->sync_obj == sync_obj &&
1696                            bo->sync_obj_arg == sync_obj_arg)) {
1697                         void *tmp_obj = bo->sync_obj;
1698                         bo->sync_obj = NULL;
1699                         clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1700                                   &bo->priv_flags);
1701                         spin_unlock(&bo->lock);
1702                         driver->sync_obj_unref(&sync_obj);
1703                         driver->sync_obj_unref(&tmp_obj);
1704                         spin_lock(&bo->lock);
1705                 } else {
1706                         spin_unlock(&bo->lock);
1707                         driver->sync_obj_unref(&sync_obj);
1708                         spin_lock(&bo->lock);
1709                 }
1710         }
1711         return 0;
1712 }
1713 EXPORT_SYMBOL(ttm_bo_wait);
1714
1715 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1716 {
1717         int ret = 0;
1718
1719         /*
1720          * Using ttm_bo_reserve makes sure the lru lists are updated.
1721          */
1722
1723         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1724         if (unlikely(ret != 0))
1725                 return ret;
1726         spin_lock(&bo->lock);
1727         ret = ttm_bo_wait(bo, false, true, no_wait);
1728         spin_unlock(&bo->lock);
1729         if (likely(ret == 0))
1730                 atomic_inc(&bo->cpu_writers);
1731         ttm_bo_unreserve(bo);
1732         return ret;
1733 }
1734 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1735
1736 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1737 {
1738         if (atomic_dec_and_test(&bo->cpu_writers))
1739                 wake_up_all(&bo->event_queue);
1740 }
1741 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1742
1743 /**
1744  * A buffer object shrink method that tries to swap out the first
1745  * buffer object on the bo_global::swap_lru list.
1746  */
1747
1748 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1749 {
1750         struct ttm_bo_global *glob =
1751             container_of(shrink, struct ttm_bo_global, shrink);
1752         struct ttm_buffer_object *bo;
1753         int ret = -EBUSY;
1754         int put_count;
1755         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1756
1757         spin_lock(&glob->lru_lock);
1758         while (ret == -EBUSY) {
1759                 if (unlikely(list_empty(&glob->swap_lru))) {
1760                         spin_unlock(&glob->lru_lock);
1761                         return -EBUSY;
1762                 }
1763
1764                 bo = list_first_entry(&glob->swap_lru,
1765                                       struct ttm_buffer_object, swap);
1766                 kref_get(&bo->list_kref);
1767
1768                 /**
1769                  * Reserve buffer. Since we unlock while sleeping, we need
1770                  * to re-check that nobody removed us from the swap-list while
1771                  * we slept.
1772                  */
1773
1774                 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1775                 if (unlikely(ret == -EBUSY)) {
1776                         spin_unlock(&glob->lru_lock);
1777                         ttm_bo_wait_unreserved(bo, false);
1778                         kref_put(&bo->list_kref, ttm_bo_release_list);
1779                         spin_lock(&glob->lru_lock);
1780                 }
1781         }
1782
1783         BUG_ON(ret != 0);
1784         put_count = ttm_bo_del_from_lru(bo);
1785         spin_unlock(&glob->lru_lock);
1786
1787         while (put_count--)
1788                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1789
1790         /**
1791          * Wait for GPU, then move to system cached.
1792          */
1793
1794         spin_lock(&bo->lock);
1795         ret = ttm_bo_wait(bo, false, false, false);
1796         spin_unlock(&bo->lock);
1797
1798         if (unlikely(ret != 0))
1799                 goto out;
1800
1801         if ((bo->mem.placement & swap_placement) != swap_placement) {
1802                 struct ttm_mem_reg evict_mem;
1803
1804                 evict_mem = bo->mem;
1805                 evict_mem.mm_node = NULL;
1806                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1807                 evict_mem.mem_type = TTM_PL_SYSTEM;
1808
1809                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1810                                              false, false, false);
1811                 if (unlikely(ret != 0))
1812                         goto out;
1813         }
1814
1815         ttm_bo_unmap_virtual(bo);
1816
1817         /**
1818          * Swap out. Buffer will be swapped in again as soon as
1819          * anyone tries to access a ttm page.
1820          */
1821
1822         if (bo->bdev->driver->swap_notify)
1823                 bo->bdev->driver->swap_notify(bo);
1824
1825         ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1826 out:
1827
1828         /**
1829          *
1830          * Unreserve without putting on LRU to avoid swapping out an
1831          * already swapped buffer.
1832          */
1833
1834         atomic_set(&bo->reserved, 0);
1835         wake_up_all(&bo->event_queue);
1836         kref_put(&bo->list_kref, ttm_bo_release_list);
1837         return ret;
1838 }
1839
1840 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1841 {
1842         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1843                 ;
1844 }
1845 EXPORT_SYMBOL(ttm_bo_swapout_all);