Merge branch 'topic/ice1724-pm' into for-linus
[pandora-kernel.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/mm.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
40
41 #define TTM_ASSERT_LOCKED(param)
42 #define TTM_DEBUG(fmt, arg...)
43 #define TTM_BO_HASH_ORDER 13
44
45 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
47
48 static inline uint32_t ttm_bo_type_flags(unsigned type)
49 {
50         return 1 << (type);
51 }
52
53 static void ttm_bo_release_list(struct kref *list_kref)
54 {
55         struct ttm_buffer_object *bo =
56             container_of(list_kref, struct ttm_buffer_object, list_kref);
57         struct ttm_bo_device *bdev = bo->bdev;
58
59         BUG_ON(atomic_read(&bo->list_kref.refcount));
60         BUG_ON(atomic_read(&bo->kref.refcount));
61         BUG_ON(atomic_read(&bo->cpu_writers));
62         BUG_ON(bo->sync_obj != NULL);
63         BUG_ON(bo->mem.mm_node != NULL);
64         BUG_ON(!list_empty(&bo->lru));
65         BUG_ON(!list_empty(&bo->ddestroy));
66
67         if (bo->ttm)
68                 ttm_tt_destroy(bo->ttm);
69         if (bo->destroy)
70                 bo->destroy(bo);
71         else {
72                 ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
73                 kfree(bo);
74         }
75 }
76
77 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
78 {
79
80         if (interruptible) {
81                 int ret = 0;
82
83                 ret = wait_event_interruptible(bo->event_queue,
84                                                atomic_read(&bo->reserved) == 0);
85                 if (unlikely(ret != 0))
86                         return -ERESTART;
87         } else {
88                 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
89         }
90         return 0;
91 }
92
93 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
94 {
95         struct ttm_bo_device *bdev = bo->bdev;
96         struct ttm_mem_type_manager *man;
97
98         BUG_ON(!atomic_read(&bo->reserved));
99
100         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
101
102                 BUG_ON(!list_empty(&bo->lru));
103
104                 man = &bdev->man[bo->mem.mem_type];
105                 list_add_tail(&bo->lru, &man->lru);
106                 kref_get(&bo->list_kref);
107
108                 if (bo->ttm != NULL) {
109                         list_add_tail(&bo->swap, &bdev->swap_lru);
110                         kref_get(&bo->list_kref);
111                 }
112         }
113 }
114
115 /**
116  * Call with the lru_lock held.
117  */
118
119 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
120 {
121         int put_count = 0;
122
123         if (!list_empty(&bo->swap)) {
124                 list_del_init(&bo->swap);
125                 ++put_count;
126         }
127         if (!list_empty(&bo->lru)) {
128                 list_del_init(&bo->lru);
129                 ++put_count;
130         }
131
132         /*
133          * TODO: Add a driver hook to delete from
134          * driver-specific LRU's here.
135          */
136
137         return put_count;
138 }
139
140 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
141                           bool interruptible,
142                           bool no_wait, bool use_sequence, uint32_t sequence)
143 {
144         struct ttm_bo_device *bdev = bo->bdev;
145         int ret;
146
147         while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
148                 if (use_sequence && bo->seq_valid &&
149                         (sequence - bo->val_seq < (1 << 31))) {
150                         return -EAGAIN;
151                 }
152
153                 if (no_wait)
154                         return -EBUSY;
155
156                 spin_unlock(&bdev->lru_lock);
157                 ret = ttm_bo_wait_unreserved(bo, interruptible);
158                 spin_lock(&bdev->lru_lock);
159
160                 if (unlikely(ret))
161                         return ret;
162         }
163
164         if (use_sequence) {
165                 bo->val_seq = sequence;
166                 bo->seq_valid = true;
167         } else {
168                 bo->seq_valid = false;
169         }
170
171         return 0;
172 }
173 EXPORT_SYMBOL(ttm_bo_reserve);
174
175 static void ttm_bo_ref_bug(struct kref *list_kref)
176 {
177         BUG();
178 }
179
180 int ttm_bo_reserve(struct ttm_buffer_object *bo,
181                    bool interruptible,
182                    bool no_wait, bool use_sequence, uint32_t sequence)
183 {
184         struct ttm_bo_device *bdev = bo->bdev;
185         int put_count = 0;
186         int ret;
187
188         spin_lock(&bdev->lru_lock);
189         ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
190                                     sequence);
191         if (likely(ret == 0))
192                 put_count = ttm_bo_del_from_lru(bo);
193         spin_unlock(&bdev->lru_lock);
194
195         while (put_count--)
196                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
197
198         return ret;
199 }
200
201 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
202 {
203         struct ttm_bo_device *bdev = bo->bdev;
204
205         spin_lock(&bdev->lru_lock);
206         ttm_bo_add_to_lru(bo);
207         atomic_set(&bo->reserved, 0);
208         wake_up_all(&bo->event_queue);
209         spin_unlock(&bdev->lru_lock);
210 }
211 EXPORT_SYMBOL(ttm_bo_unreserve);
212
213 /*
214  * Call bo->mutex locked.
215  */
216
217 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
218 {
219         struct ttm_bo_device *bdev = bo->bdev;
220         int ret = 0;
221         uint32_t page_flags = 0;
222
223         TTM_ASSERT_LOCKED(&bo->mutex);
224         bo->ttm = NULL;
225
226         if (bdev->need_dma32)
227                 page_flags |= TTM_PAGE_FLAG_DMA32;
228
229         switch (bo->type) {
230         case ttm_bo_type_device:
231                 if (zero_alloc)
232                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
233         case ttm_bo_type_kernel:
234                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
235                                         page_flags, bdev->dummy_read_page);
236                 if (unlikely(bo->ttm == NULL))
237                         ret = -ENOMEM;
238                 break;
239         case ttm_bo_type_user:
240                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
241                                         page_flags | TTM_PAGE_FLAG_USER,
242                                         bdev->dummy_read_page);
243                 if (unlikely(bo->ttm == NULL))
244                         ret = -ENOMEM;
245                 break;
246
247                 ret = ttm_tt_set_user(bo->ttm, current,
248                                       bo->buffer_start, bo->num_pages);
249                 if (unlikely(ret != 0))
250                         ttm_tt_destroy(bo->ttm);
251                 break;
252         default:
253                 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
254                 ret = -EINVAL;
255                 break;
256         }
257
258         return ret;
259 }
260
261 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
262                                   struct ttm_mem_reg *mem,
263                                   bool evict, bool interruptible, bool no_wait)
264 {
265         struct ttm_bo_device *bdev = bo->bdev;
266         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
267         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
268         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
269         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
270         int ret = 0;
271
272         if (old_is_pci || new_is_pci ||
273             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
274                 ttm_bo_unmap_virtual(bo);
275
276         /*
277          * Create and bind a ttm if required.
278          */
279
280         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
281                 ret = ttm_bo_add_ttm(bo, false);
282                 if (ret)
283                         goto out_err;
284
285                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
286                 if (ret)
287                         goto out_err;
288
289                 if (mem->mem_type != TTM_PL_SYSTEM) {
290                         ret = ttm_tt_bind(bo->ttm, mem);
291                         if (ret)
292                                 goto out_err;
293                 }
294
295                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
296
297                         struct ttm_mem_reg *old_mem = &bo->mem;
298                         uint32_t save_flags = old_mem->placement;
299
300                         *old_mem = *mem;
301                         mem->mm_node = NULL;
302                         ttm_flag_masked(&save_flags, mem->placement,
303                                         TTM_PL_MASK_MEMTYPE);
304                         goto moved;
305                 }
306
307         }
308
309         if (bdev->driver->move_notify)
310                 bdev->driver->move_notify(bo, mem);
311
312         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
313             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
314                 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
315         else if (bdev->driver->move)
316                 ret = bdev->driver->move(bo, evict, interruptible,
317                                          no_wait, mem);
318         else
319                 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
320
321         if (ret)
322                 goto out_err;
323
324 moved:
325         if (bo->evicted) {
326                 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
327                 if (ret)
328                         printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
329                 bo->evicted = false;
330         }
331
332         if (bo->mem.mm_node) {
333                 spin_lock(&bo->lock);
334                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
335                     bdev->man[bo->mem.mem_type].gpu_offset;
336                 bo->cur_placement = bo->mem.placement;
337                 spin_unlock(&bo->lock);
338         }
339
340         return 0;
341
342 out_err:
343         new_man = &bdev->man[bo->mem.mem_type];
344         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
345                 ttm_tt_unbind(bo->ttm);
346                 ttm_tt_destroy(bo->ttm);
347                 bo->ttm = NULL;
348         }
349
350         return ret;
351 }
352
353 /**
354  * If bo idle, remove from delayed- and lru lists, and unref.
355  * If not idle, and already on delayed list, do nothing.
356  * If not idle, and not on delayed list, put on delayed list,
357  *   up the list_kref and schedule a delayed list check.
358  */
359
360 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
361 {
362         struct ttm_bo_device *bdev = bo->bdev;
363         struct ttm_bo_driver *driver = bdev->driver;
364         int ret;
365
366         spin_lock(&bo->lock);
367         (void) ttm_bo_wait(bo, false, false, !remove_all);
368
369         if (!bo->sync_obj) {
370                 int put_count;
371
372                 spin_unlock(&bo->lock);
373
374                 spin_lock(&bdev->lru_lock);
375                 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
376                 BUG_ON(ret);
377                 if (bo->ttm)
378                         ttm_tt_unbind(bo->ttm);
379
380                 if (!list_empty(&bo->ddestroy)) {
381                         list_del_init(&bo->ddestroy);
382                         kref_put(&bo->list_kref, ttm_bo_ref_bug);
383                 }
384                 if (bo->mem.mm_node) {
385                         drm_mm_put_block(bo->mem.mm_node);
386                         bo->mem.mm_node = NULL;
387                 }
388                 put_count = ttm_bo_del_from_lru(bo);
389                 spin_unlock(&bdev->lru_lock);
390
391                 atomic_set(&bo->reserved, 0);
392
393                 while (put_count--)
394                         kref_put(&bo->list_kref, ttm_bo_release_list);
395
396                 return 0;
397         }
398
399         spin_lock(&bdev->lru_lock);
400         if (list_empty(&bo->ddestroy)) {
401                 void *sync_obj = bo->sync_obj;
402                 void *sync_obj_arg = bo->sync_obj_arg;
403
404                 kref_get(&bo->list_kref);
405                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
406                 spin_unlock(&bdev->lru_lock);
407                 spin_unlock(&bo->lock);
408
409                 if (sync_obj)
410                         driver->sync_obj_flush(sync_obj, sync_obj_arg);
411                 schedule_delayed_work(&bdev->wq,
412                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
413                 ret = 0;
414
415         } else {
416                 spin_unlock(&bdev->lru_lock);
417                 spin_unlock(&bo->lock);
418                 ret = -EBUSY;
419         }
420
421         return ret;
422 }
423
424 /**
425  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
426  * encountered buffers.
427  */
428
429 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
430 {
431         struct ttm_buffer_object *entry, *nentry;
432         struct list_head *list, *next;
433         int ret;
434
435         spin_lock(&bdev->lru_lock);
436         list_for_each_safe(list, next, &bdev->ddestroy) {
437                 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
438                 nentry = NULL;
439
440                 /*
441                  * Protect the next list entry from destruction while we
442                  * unlock the lru_lock.
443                  */
444
445                 if (next != &bdev->ddestroy) {
446                         nentry = list_entry(next, struct ttm_buffer_object,
447                                             ddestroy);
448                         kref_get(&nentry->list_kref);
449                 }
450                 kref_get(&entry->list_kref);
451
452                 spin_unlock(&bdev->lru_lock);
453                 ret = ttm_bo_cleanup_refs(entry, remove_all);
454                 kref_put(&entry->list_kref, ttm_bo_release_list);
455
456                 spin_lock(&bdev->lru_lock);
457                 if (nentry) {
458                         bool next_onlist = !list_empty(next);
459                         spin_unlock(&bdev->lru_lock);
460                         kref_put(&nentry->list_kref, ttm_bo_release_list);
461                         spin_lock(&bdev->lru_lock);
462                         /*
463                          * Someone might have raced us and removed the
464                          * next entry from the list. We don't bother restarting
465                          * list traversal.
466                          */
467
468                         if (!next_onlist)
469                                 break;
470                 }
471                 if (ret)
472                         break;
473         }
474         ret = !list_empty(&bdev->ddestroy);
475         spin_unlock(&bdev->lru_lock);
476
477         return ret;
478 }
479
480 static void ttm_bo_delayed_workqueue(struct work_struct *work)
481 {
482         struct ttm_bo_device *bdev =
483             container_of(work, struct ttm_bo_device, wq.work);
484
485         if (ttm_bo_delayed_delete(bdev, false)) {
486                 schedule_delayed_work(&bdev->wq,
487                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
488         }
489 }
490
491 static void ttm_bo_release(struct kref *kref)
492 {
493         struct ttm_buffer_object *bo =
494             container_of(kref, struct ttm_buffer_object, kref);
495         struct ttm_bo_device *bdev = bo->bdev;
496
497         if (likely(bo->vm_node != NULL)) {
498                 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
499                 drm_mm_put_block(bo->vm_node);
500                 bo->vm_node = NULL;
501         }
502         write_unlock(&bdev->vm_lock);
503         ttm_bo_cleanup_refs(bo, false);
504         kref_put(&bo->list_kref, ttm_bo_release_list);
505         write_lock(&bdev->vm_lock);
506 }
507
508 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
509 {
510         struct ttm_buffer_object *bo = *p_bo;
511         struct ttm_bo_device *bdev = bo->bdev;
512
513         *p_bo = NULL;
514         write_lock(&bdev->vm_lock);
515         kref_put(&bo->kref, ttm_bo_release);
516         write_unlock(&bdev->vm_lock);
517 }
518 EXPORT_SYMBOL(ttm_bo_unref);
519
520 static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
521                         bool interruptible, bool no_wait)
522 {
523         int ret = 0;
524         struct ttm_bo_device *bdev = bo->bdev;
525         struct ttm_mem_reg evict_mem;
526         uint32_t proposed_placement;
527
528         if (bo->mem.mem_type != mem_type)
529                 goto out;
530
531         spin_lock(&bo->lock);
532         ret = ttm_bo_wait(bo, false, interruptible, no_wait);
533         spin_unlock(&bo->lock);
534
535         if (unlikely(ret != 0)) {
536                 if (ret != -ERESTART) {
537                         printk(KERN_ERR TTM_PFX
538                                "Failed to expire sync object before "
539                                "buffer eviction.\n");
540                 }
541                 goto out;
542         }
543
544         BUG_ON(!atomic_read(&bo->reserved));
545
546         evict_mem = bo->mem;
547         evict_mem.mm_node = NULL;
548
549         proposed_placement = bdev->driver->evict_flags(bo);
550
551         ret = ttm_bo_mem_space(bo, proposed_placement,
552                                &evict_mem, interruptible, no_wait);
553         if (unlikely(ret != 0 && ret != -ERESTART))
554                 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
555                                        &evict_mem, interruptible, no_wait);
556
557         if (ret) {
558                 if (ret != -ERESTART)
559                         printk(KERN_ERR TTM_PFX
560                                "Failed to find memory space for "
561                                "buffer 0x%p eviction.\n", bo);
562                 goto out;
563         }
564
565         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
566                                      no_wait);
567         if (ret) {
568                 if (ret != -ERESTART)
569                         printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
570                 goto out;
571         }
572
573         spin_lock(&bdev->lru_lock);
574         if (evict_mem.mm_node) {
575                 drm_mm_put_block(evict_mem.mm_node);
576                 evict_mem.mm_node = NULL;
577         }
578         spin_unlock(&bdev->lru_lock);
579         bo->evicted = true;
580 out:
581         return ret;
582 }
583
584 /**
585  * Repeatedly evict memory from the LRU for @mem_type until we create enough
586  * space, or we've evicted everything and there isn't enough space.
587  */
588 static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
589                                   struct ttm_mem_reg *mem,
590                                   uint32_t mem_type,
591                                   bool interruptible, bool no_wait)
592 {
593         struct drm_mm_node *node;
594         struct ttm_buffer_object *entry;
595         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
596         struct list_head *lru;
597         unsigned long num_pages = mem->num_pages;
598         int put_count = 0;
599         int ret;
600
601 retry_pre_get:
602         ret = drm_mm_pre_get(&man->manager);
603         if (unlikely(ret != 0))
604                 return ret;
605
606         spin_lock(&bdev->lru_lock);
607         do {
608                 node = drm_mm_search_free(&man->manager, num_pages,
609                                           mem->page_alignment, 1);
610                 if (node)
611                         break;
612
613                 lru = &man->lru;
614                 if (list_empty(lru))
615                         break;
616
617                 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
618                 kref_get(&entry->list_kref);
619
620                 ret =
621                     ttm_bo_reserve_locked(entry, interruptible, no_wait,
622                                           false, 0);
623
624                 if (likely(ret == 0))
625                         put_count = ttm_bo_del_from_lru(entry);
626
627                 spin_unlock(&bdev->lru_lock);
628
629                 if (unlikely(ret != 0))
630                         return ret;
631
632                 while (put_count--)
633                         kref_put(&entry->list_kref, ttm_bo_ref_bug);
634
635                 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
636
637                 ttm_bo_unreserve(entry);
638
639                 kref_put(&entry->list_kref, ttm_bo_release_list);
640                 if (ret)
641                         return ret;
642
643                 spin_lock(&bdev->lru_lock);
644         } while (1);
645
646         if (!node) {
647                 spin_unlock(&bdev->lru_lock);
648                 return -ENOMEM;
649         }
650
651         node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
652         if (unlikely(!node)) {
653                 spin_unlock(&bdev->lru_lock);
654                 goto retry_pre_get;
655         }
656
657         spin_unlock(&bdev->lru_lock);
658         mem->mm_node = node;
659         mem->mem_type = mem_type;
660         return 0;
661 }
662
663 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
664                                       uint32_t cur_placement,
665                                       uint32_t proposed_placement)
666 {
667         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
668         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
669
670         /**
671          * Keep current caching if possible.
672          */
673
674         if ((cur_placement & caching) != 0)
675                 result |= (cur_placement & caching);
676         else if ((man->default_caching & caching) != 0)
677                 result |= man->default_caching;
678         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
679                 result |= TTM_PL_FLAG_CACHED;
680         else if ((TTM_PL_FLAG_WC & caching) != 0)
681                 result |= TTM_PL_FLAG_WC;
682         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
683                 result |= TTM_PL_FLAG_UNCACHED;
684
685         return result;
686 }
687
688
689 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
690                                  bool disallow_fixed,
691                                  uint32_t mem_type,
692                                  uint32_t proposed_placement,
693                                  uint32_t *masked_placement)
694 {
695         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
696
697         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
698                 return false;
699
700         if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
701                 return false;
702
703         if ((proposed_placement & man->available_caching) == 0)
704                 return false;
705
706         cur_flags |= (proposed_placement & man->available_caching);
707
708         *masked_placement = cur_flags;
709         return true;
710 }
711
712 /**
713  * Creates space for memory region @mem according to its type.
714  *
715  * This function first searches for free space in compatible memory types in
716  * the priority order defined by the driver.  If free space isn't found, then
717  * ttm_bo_mem_force_space is attempted in priority order to evict and find
718  * space.
719  */
720 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
721                      uint32_t proposed_placement,
722                      struct ttm_mem_reg *mem,
723                      bool interruptible, bool no_wait)
724 {
725         struct ttm_bo_device *bdev = bo->bdev;
726         struct ttm_mem_type_manager *man;
727
728         uint32_t num_prios = bdev->driver->num_mem_type_prio;
729         const uint32_t *prios = bdev->driver->mem_type_prio;
730         uint32_t i;
731         uint32_t mem_type = TTM_PL_SYSTEM;
732         uint32_t cur_flags = 0;
733         bool type_found = false;
734         bool type_ok = false;
735         bool has_eagain = false;
736         struct drm_mm_node *node = NULL;
737         int ret;
738
739         mem->mm_node = NULL;
740         for (i = 0; i < num_prios; ++i) {
741                 mem_type = prios[i];
742                 man = &bdev->man[mem_type];
743
744                 type_ok = ttm_bo_mt_compatible(man,
745                                                bo->type == ttm_bo_type_user,
746                                                mem_type, proposed_placement,
747                                                &cur_flags);
748
749                 if (!type_ok)
750                         continue;
751
752                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
753                                                   cur_flags);
754
755                 if (mem_type == TTM_PL_SYSTEM)
756                         break;
757
758                 if (man->has_type && man->use_type) {
759                         type_found = true;
760                         do {
761                                 ret = drm_mm_pre_get(&man->manager);
762                                 if (unlikely(ret))
763                                         return ret;
764
765                                 spin_lock(&bdev->lru_lock);
766                                 node = drm_mm_search_free(&man->manager,
767                                                           mem->num_pages,
768                                                           mem->page_alignment,
769                                                           1);
770                                 if (unlikely(!node)) {
771                                         spin_unlock(&bdev->lru_lock);
772                                         break;
773                                 }
774                                 node = drm_mm_get_block_atomic(node,
775                                                                mem->num_pages,
776                                                                mem->
777                                                                page_alignment);
778                                 spin_unlock(&bdev->lru_lock);
779                         } while (!node);
780                 }
781                 if (node)
782                         break;
783         }
784
785         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
786                 mem->mm_node = node;
787                 mem->mem_type = mem_type;
788                 mem->placement = cur_flags;
789                 return 0;
790         }
791
792         if (!type_found)
793                 return -EINVAL;
794
795         num_prios = bdev->driver->num_mem_busy_prio;
796         prios = bdev->driver->mem_busy_prio;
797
798         for (i = 0; i < num_prios; ++i) {
799                 mem_type = prios[i];
800                 man = &bdev->man[mem_type];
801
802                 if (!man->has_type)
803                         continue;
804
805                 if (!ttm_bo_mt_compatible(man,
806                                           bo->type == ttm_bo_type_user,
807                                           mem_type,
808                                           proposed_placement, &cur_flags))
809                         continue;
810
811                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
812                                                   cur_flags);
813
814                 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
815                                              interruptible, no_wait);
816
817                 if (ret == 0 && mem->mm_node) {
818                         mem->placement = cur_flags;
819                         return 0;
820                 }
821
822                 if (ret == -ERESTART)
823                         has_eagain = true;
824         }
825
826         ret = (has_eagain) ? -ERESTART : -ENOMEM;
827         return ret;
828 }
829 EXPORT_SYMBOL(ttm_bo_mem_space);
830
831 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
832 {
833         int ret = 0;
834
835         if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
836                 return -EBUSY;
837
838         ret = wait_event_interruptible(bo->event_queue,
839                                        atomic_read(&bo->cpu_writers) == 0);
840
841         if (ret == -ERESTARTSYS)
842                 ret = -ERESTART;
843
844         return ret;
845 }
846
847 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
848                        uint32_t proposed_placement,
849                        bool interruptible, bool no_wait)
850 {
851         struct ttm_bo_device *bdev = bo->bdev;
852         int ret = 0;
853         struct ttm_mem_reg mem;
854
855         BUG_ON(!atomic_read(&bo->reserved));
856
857         /*
858          * FIXME: It's possible to pipeline buffer moves.
859          * Have the driver move function wait for idle when necessary,
860          * instead of doing it here.
861          */
862
863         spin_lock(&bo->lock);
864         ret = ttm_bo_wait(bo, false, interruptible, no_wait);
865         spin_unlock(&bo->lock);
866
867         if (ret)
868                 return ret;
869
870         mem.num_pages = bo->num_pages;
871         mem.size = mem.num_pages << PAGE_SHIFT;
872         mem.page_alignment = bo->mem.page_alignment;
873
874         /*
875          * Determine where to move the buffer.
876          */
877
878         ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
879                                interruptible, no_wait);
880         if (ret)
881                 goto out_unlock;
882
883         ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
884
885 out_unlock:
886         if (ret && mem.mm_node) {
887                 spin_lock(&bdev->lru_lock);
888                 drm_mm_put_block(mem.mm_node);
889                 spin_unlock(&bdev->lru_lock);
890         }
891         return ret;
892 }
893
894 static int ttm_bo_mem_compat(uint32_t proposed_placement,
895                              struct ttm_mem_reg *mem)
896 {
897         if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
898                 return 0;
899         if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
900                 return 0;
901
902         return 1;
903 }
904
905 int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
906                                uint32_t proposed_placement,
907                                bool interruptible, bool no_wait)
908 {
909         int ret;
910
911         BUG_ON(!atomic_read(&bo->reserved));
912         bo->proposed_placement = proposed_placement;
913
914         TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
915                   (unsigned long)proposed_placement,
916                   (unsigned long)bo->mem.placement);
917
918         /*
919          * Check whether we need to move buffer.
920          */
921
922         if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
923                 ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
924                                          interruptible, no_wait);
925                 if (ret) {
926                         if (ret != -ERESTART)
927                                 printk(KERN_ERR TTM_PFX
928                                        "Failed moving buffer. "
929                                        "Proposed placement 0x%08x\n",
930                                        bo->proposed_placement);
931                         if (ret == -ENOMEM)
932                                 printk(KERN_ERR TTM_PFX
933                                        "Out of aperture space or "
934                                        "DRM memory quota.\n");
935                         return ret;
936                 }
937         }
938
939         /*
940          * We might need to add a TTM.
941          */
942
943         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
944                 ret = ttm_bo_add_ttm(bo, true);
945                 if (ret)
946                         return ret;
947         }
948         /*
949          * Validation has succeeded, move the access and other
950          * non-mapping-related flag bits from the proposed flags to
951          * the active flags
952          */
953
954         ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
955                         ~TTM_PL_MASK_MEMTYPE);
956
957         return 0;
958 }
959 EXPORT_SYMBOL(ttm_buffer_object_validate);
960
961 int
962 ttm_bo_check_placement(struct ttm_buffer_object *bo,
963                        uint32_t set_flags, uint32_t clr_flags)
964 {
965         uint32_t new_mask = set_flags | clr_flags;
966
967         if ((bo->type == ttm_bo_type_user) &&
968             (clr_flags & TTM_PL_FLAG_CACHED)) {
969                 printk(KERN_ERR TTM_PFX
970                        "User buffers require cache-coherent memory.\n");
971                 return -EINVAL;
972         }
973
974         if (!capable(CAP_SYS_ADMIN)) {
975                 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
976                         printk(KERN_ERR TTM_PFX "Need to be root to modify"
977                                " NO_EVICT status.\n");
978                         return -EINVAL;
979                 }
980
981                 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
982                     (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
983                         printk(KERN_ERR TTM_PFX
984                                "Incompatible memory specification"
985                                " for NO_EVICT buffer.\n");
986                         return -EINVAL;
987                 }
988         }
989         return 0;
990 }
991
992 int ttm_buffer_object_init(struct ttm_bo_device *bdev,
993                            struct ttm_buffer_object *bo,
994                            unsigned long size,
995                            enum ttm_bo_type type,
996                            uint32_t flags,
997                            uint32_t page_alignment,
998                            unsigned long buffer_start,
999                            bool interruptible,
1000                            struct file *persistant_swap_storage,
1001                            size_t acc_size,
1002                            void (*destroy) (struct ttm_buffer_object *))
1003 {
1004         int ret = 0;
1005         unsigned long num_pages;
1006
1007         size += buffer_start & ~PAGE_MASK;
1008         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1009         if (num_pages == 0) {
1010                 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1011                 return -EINVAL;
1012         }
1013         bo->destroy = destroy;
1014
1015         spin_lock_init(&bo->lock);
1016         kref_init(&bo->kref);
1017         kref_init(&bo->list_kref);
1018         atomic_set(&bo->cpu_writers, 0);
1019         atomic_set(&bo->reserved, 1);
1020         init_waitqueue_head(&bo->event_queue);
1021         INIT_LIST_HEAD(&bo->lru);
1022         INIT_LIST_HEAD(&bo->ddestroy);
1023         INIT_LIST_HEAD(&bo->swap);
1024         bo->bdev = bdev;
1025         bo->type = type;
1026         bo->num_pages = num_pages;
1027         bo->mem.mem_type = TTM_PL_SYSTEM;
1028         bo->mem.num_pages = bo->num_pages;
1029         bo->mem.mm_node = NULL;
1030         bo->mem.page_alignment = page_alignment;
1031         bo->buffer_start = buffer_start & PAGE_MASK;
1032         bo->priv_flags = 0;
1033         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1034         bo->seq_valid = false;
1035         bo->persistant_swap_storage = persistant_swap_storage;
1036         bo->acc_size = acc_size;
1037
1038         ret = ttm_bo_check_placement(bo, flags, 0ULL);
1039         if (unlikely(ret != 0))
1040                 goto out_err;
1041
1042         /*
1043          * If no caching attributes are set, accept any form of caching.
1044          */
1045
1046         if ((flags & TTM_PL_MASK_CACHING) == 0)
1047                 flags |= TTM_PL_MASK_CACHING;
1048
1049         /*
1050          * For ttm_bo_type_device buffers, allocate
1051          * address space from the device.
1052          */
1053
1054         if (bo->type == ttm_bo_type_device) {
1055                 ret = ttm_bo_setup_vm(bo);
1056                 if (ret)
1057                         goto out_err;
1058         }
1059
1060         ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
1061         if (ret)
1062                 goto out_err;
1063
1064         ttm_bo_unreserve(bo);
1065         return 0;
1066
1067 out_err:
1068         ttm_bo_unreserve(bo);
1069         ttm_bo_unref(&bo);
1070
1071         return ret;
1072 }
1073 EXPORT_SYMBOL(ttm_buffer_object_init);
1074
1075 static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
1076                                  unsigned long num_pages)
1077 {
1078         size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1079             PAGE_MASK;
1080
1081         return bdev->ttm_bo_size + 2 * page_array_size;
1082 }
1083
1084 int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1085                              unsigned long size,
1086                              enum ttm_bo_type type,
1087                              uint32_t flags,
1088                              uint32_t page_alignment,
1089                              unsigned long buffer_start,
1090                              bool interruptible,
1091                              struct file *persistant_swap_storage,
1092                              struct ttm_buffer_object **p_bo)
1093 {
1094         struct ttm_buffer_object *bo;
1095         int ret;
1096         struct ttm_mem_global *mem_glob = bdev->mem_glob;
1097
1098         size_t acc_size =
1099             ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1100         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
1101         if (unlikely(ret != 0))
1102                 return ret;
1103
1104         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1105
1106         if (unlikely(bo == NULL)) {
1107                 ttm_mem_global_free(mem_glob, acc_size, false);
1108                 return -ENOMEM;
1109         }
1110
1111         ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1112                                      page_alignment, buffer_start,
1113                                      interruptible,
1114                                      persistant_swap_storage, acc_size, NULL);
1115         if (likely(ret == 0))
1116                 *p_bo = bo;
1117
1118         return ret;
1119 }
1120
1121 static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1122                              uint32_t mem_type, bool allow_errors)
1123 {
1124         int ret;
1125
1126         spin_lock(&bo->lock);
1127         ret = ttm_bo_wait(bo, false, false, false);
1128         spin_unlock(&bo->lock);
1129
1130         if (ret && allow_errors)
1131                 goto out;
1132
1133         if (bo->mem.mem_type == mem_type)
1134                 ret = ttm_bo_evict(bo, mem_type, false, false);
1135
1136         if (ret) {
1137                 if (allow_errors) {
1138                         goto out;
1139                 } else {
1140                         ret = 0;
1141                         printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1142                 }
1143         }
1144
1145 out:
1146         return ret;
1147 }
1148
1149 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1150                                    struct list_head *head,
1151                                    unsigned mem_type, bool allow_errors)
1152 {
1153         struct ttm_buffer_object *entry;
1154         int ret;
1155         int put_count;
1156
1157         /*
1158          * Can't use standard list traversal since we're unlocking.
1159          */
1160
1161         spin_lock(&bdev->lru_lock);
1162
1163         while (!list_empty(head)) {
1164                 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1165                 kref_get(&entry->list_kref);
1166                 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1167                 put_count = ttm_bo_del_from_lru(entry);
1168                 spin_unlock(&bdev->lru_lock);
1169                 while (put_count--)
1170                         kref_put(&entry->list_kref, ttm_bo_ref_bug);
1171                 BUG_ON(ret);
1172                 ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1173                 ttm_bo_unreserve(entry);
1174                 kref_put(&entry->list_kref, ttm_bo_release_list);
1175                 spin_lock(&bdev->lru_lock);
1176         }
1177
1178         spin_unlock(&bdev->lru_lock);
1179
1180         return 0;
1181 }
1182
1183 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1184 {
1185         struct ttm_mem_type_manager *man;
1186         int ret = -EINVAL;
1187
1188         if (mem_type >= TTM_NUM_MEM_TYPES) {
1189                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1190                 return ret;
1191         }
1192         man = &bdev->man[mem_type];
1193
1194         if (!man->has_type) {
1195                 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1196                        "memory manager type %u\n", mem_type);
1197                 return ret;
1198         }
1199
1200         man->use_type = false;
1201         man->has_type = false;
1202
1203         ret = 0;
1204         if (mem_type > 0) {
1205                 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1206
1207                 spin_lock(&bdev->lru_lock);
1208                 if (drm_mm_clean(&man->manager))
1209                         drm_mm_takedown(&man->manager);
1210                 else
1211                         ret = -EBUSY;
1212
1213                 spin_unlock(&bdev->lru_lock);
1214         }
1215
1216         return ret;
1217 }
1218 EXPORT_SYMBOL(ttm_bo_clean_mm);
1219
1220 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1221 {
1222         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1223
1224         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1225                 printk(KERN_ERR TTM_PFX
1226                        "Illegal memory manager memory type %u.\n",
1227                        mem_type);
1228                 return -EINVAL;
1229         }
1230
1231         if (!man->has_type) {
1232                 printk(KERN_ERR TTM_PFX
1233                        "Memory type %u has not been initialized.\n",
1234                        mem_type);
1235                 return 0;
1236         }
1237
1238         return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
1239 }
1240 EXPORT_SYMBOL(ttm_bo_evict_mm);
1241
1242 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1243                    unsigned long p_offset, unsigned long p_size)
1244 {
1245         int ret = -EINVAL;
1246         struct ttm_mem_type_manager *man;
1247
1248         if (type >= TTM_NUM_MEM_TYPES) {
1249                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1250                 return ret;
1251         }
1252
1253         man = &bdev->man[type];
1254         if (man->has_type) {
1255                 printk(KERN_ERR TTM_PFX
1256                        "Memory manager already initialized for type %d\n",
1257                        type);
1258                 return ret;
1259         }
1260
1261         ret = bdev->driver->init_mem_type(bdev, type, man);
1262         if (ret)
1263                 return ret;
1264
1265         ret = 0;
1266         if (type != TTM_PL_SYSTEM) {
1267                 if (!p_size) {
1268                         printk(KERN_ERR TTM_PFX
1269                                "Zero size memory manager type %d\n",
1270                                type);
1271                         return ret;
1272                 }
1273                 ret = drm_mm_init(&man->manager, p_offset, p_size);
1274                 if (ret)
1275                         return ret;
1276         }
1277         man->has_type = true;
1278         man->use_type = true;
1279         man->size = p_size;
1280
1281         INIT_LIST_HEAD(&man->lru);
1282
1283         return 0;
1284 }
1285 EXPORT_SYMBOL(ttm_bo_init_mm);
1286
1287 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1288 {
1289         int ret = 0;
1290         unsigned i = TTM_NUM_MEM_TYPES;
1291         struct ttm_mem_type_manager *man;
1292
1293         while (i--) {
1294                 man = &bdev->man[i];
1295                 if (man->has_type) {
1296                         man->use_type = false;
1297                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1298                                 ret = -EBUSY;
1299                                 printk(KERN_ERR TTM_PFX
1300                                        "DRM memory manager type %d "
1301                                        "is not clean.\n", i);
1302                         }
1303                         man->has_type = false;
1304                 }
1305         }
1306
1307         if (!cancel_delayed_work(&bdev->wq))
1308                 flush_scheduled_work();
1309
1310         while (ttm_bo_delayed_delete(bdev, true))
1311                 ;
1312
1313         spin_lock(&bdev->lru_lock);
1314         if (list_empty(&bdev->ddestroy))
1315                 TTM_DEBUG("Delayed destroy list was clean\n");
1316
1317         if (list_empty(&bdev->man[0].lru))
1318                 TTM_DEBUG("Swap list was clean\n");
1319         spin_unlock(&bdev->lru_lock);
1320
1321         ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
1322         BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1323         write_lock(&bdev->vm_lock);
1324         drm_mm_takedown(&bdev->addr_space_mm);
1325         write_unlock(&bdev->vm_lock);
1326
1327         __free_page(bdev->dummy_read_page);
1328         return ret;
1329 }
1330 EXPORT_SYMBOL(ttm_bo_device_release);
1331
1332 /*
1333  * This function is intended to be called on drm driver load.
1334  * If you decide to call it from firstopen, you must protect the call
1335  * from a potentially racing ttm_bo_driver_finish in lastclose.
1336  * (This may happen on X server restart).
1337  */
1338
1339 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1340                        struct ttm_mem_global *mem_glob,
1341                        struct ttm_bo_driver *driver, uint64_t file_page_offset,
1342                        bool need_dma32)
1343 {
1344         int ret = -EINVAL;
1345
1346         bdev->dummy_read_page = NULL;
1347         rwlock_init(&bdev->vm_lock);
1348         spin_lock_init(&bdev->lru_lock);
1349
1350         bdev->driver = driver;
1351         bdev->mem_glob = mem_glob;
1352
1353         memset(bdev->man, 0, sizeof(bdev->man));
1354
1355         bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1356         if (unlikely(bdev->dummy_read_page == NULL)) {
1357                 ret = -ENOMEM;
1358                 goto out_err0;
1359         }
1360
1361         /*
1362          * Initialize the system memory buffer type.
1363          * Other types need to be driver / IOCTL initialized.
1364          */
1365         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1366         if (unlikely(ret != 0))
1367                 goto out_err1;
1368
1369         bdev->addr_space_rb = RB_ROOT;
1370         ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1371         if (unlikely(ret != 0))
1372                 goto out_err2;
1373
1374         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1375         bdev->nice_mode = true;
1376         INIT_LIST_HEAD(&bdev->ddestroy);
1377         INIT_LIST_HEAD(&bdev->swap_lru);
1378         bdev->dev_mapping = NULL;
1379         bdev->need_dma32 = need_dma32;
1380         ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
1381         ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1382         if (unlikely(ret != 0)) {
1383                 printk(KERN_ERR TTM_PFX
1384                        "Could not register buffer object swapout.\n");
1385                 goto out_err2;
1386         }
1387
1388         bdev->ttm_bo_extra_size =
1389                 ttm_round_pot(sizeof(struct ttm_tt)) +
1390                 ttm_round_pot(sizeof(struct ttm_backend));
1391
1392         bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
1393                 ttm_round_pot(sizeof(struct ttm_buffer_object));
1394
1395         return 0;
1396 out_err2:
1397         ttm_bo_clean_mm(bdev, 0);
1398 out_err1:
1399         __free_page(bdev->dummy_read_page);
1400 out_err0:
1401         return ret;
1402 }
1403 EXPORT_SYMBOL(ttm_bo_device_init);
1404
1405 /*
1406  * buffer object vm functions.
1407  */
1408
1409 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1410 {
1411         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1412
1413         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1414                 if (mem->mem_type == TTM_PL_SYSTEM)
1415                         return false;
1416
1417                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1418                         return false;
1419
1420                 if (mem->placement & TTM_PL_FLAG_CACHED)
1421                         return false;
1422         }
1423         return true;
1424 }
1425
1426 int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1427                       struct ttm_mem_reg *mem,
1428                       unsigned long *bus_base,
1429                       unsigned long *bus_offset, unsigned long *bus_size)
1430 {
1431         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1432
1433         *bus_size = 0;
1434         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1435                 return -EINVAL;
1436
1437         if (ttm_mem_reg_is_pci(bdev, mem)) {
1438                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1439                 *bus_size = mem->num_pages << PAGE_SHIFT;
1440                 *bus_base = man->io_offset;
1441         }
1442
1443         return 0;
1444 }
1445
1446 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1447 {
1448         struct ttm_bo_device *bdev = bo->bdev;
1449         loff_t offset = (loff_t) bo->addr_space_offset;
1450         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1451
1452         if (!bdev->dev_mapping)
1453                 return;
1454
1455         unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1456 }
1457 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1458
1459 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1460 {
1461         struct ttm_bo_device *bdev = bo->bdev;
1462         struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1463         struct rb_node *parent = NULL;
1464         struct ttm_buffer_object *cur_bo;
1465         unsigned long offset = bo->vm_node->start;
1466         unsigned long cur_offset;
1467
1468         while (*cur) {
1469                 parent = *cur;
1470                 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1471                 cur_offset = cur_bo->vm_node->start;
1472                 if (offset < cur_offset)
1473                         cur = &parent->rb_left;
1474                 else if (offset > cur_offset)
1475                         cur = &parent->rb_right;
1476                 else
1477                         BUG();
1478         }
1479
1480         rb_link_node(&bo->vm_rb, parent, cur);
1481         rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1482 }
1483
1484 /**
1485  * ttm_bo_setup_vm:
1486  *
1487  * @bo: the buffer to allocate address space for
1488  *
1489  * Allocate address space in the drm device so that applications
1490  * can mmap the buffer and access the contents. This only
1491  * applies to ttm_bo_type_device objects as others are not
1492  * placed in the drm device address space.
1493  */
1494
1495 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1496 {
1497         struct ttm_bo_device *bdev = bo->bdev;
1498         int ret;
1499
1500 retry_pre_get:
1501         ret = drm_mm_pre_get(&bdev->addr_space_mm);
1502         if (unlikely(ret != 0))
1503                 return ret;
1504
1505         write_lock(&bdev->vm_lock);
1506         bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1507                                          bo->mem.num_pages, 0, 0);
1508
1509         if (unlikely(bo->vm_node == NULL)) {
1510                 ret = -ENOMEM;
1511                 goto out_unlock;
1512         }
1513
1514         bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1515                                               bo->mem.num_pages, 0);
1516
1517         if (unlikely(bo->vm_node == NULL)) {
1518                 write_unlock(&bdev->vm_lock);
1519                 goto retry_pre_get;
1520         }
1521
1522         ttm_bo_vm_insert_rb(bo);
1523         write_unlock(&bdev->vm_lock);
1524         bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1525
1526         return 0;
1527 out_unlock:
1528         write_unlock(&bdev->vm_lock);
1529         return ret;
1530 }
1531
1532 int ttm_bo_wait(struct ttm_buffer_object *bo,
1533                 bool lazy, bool interruptible, bool no_wait)
1534 {
1535         struct ttm_bo_driver *driver = bo->bdev->driver;
1536         void *sync_obj;
1537         void *sync_obj_arg;
1538         int ret = 0;
1539
1540         if (likely(bo->sync_obj == NULL))
1541                 return 0;
1542
1543         while (bo->sync_obj) {
1544
1545                 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1546                         void *tmp_obj = bo->sync_obj;
1547                         bo->sync_obj = NULL;
1548                         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1549                         spin_unlock(&bo->lock);
1550                         driver->sync_obj_unref(&tmp_obj);
1551                         spin_lock(&bo->lock);
1552                         continue;
1553                 }
1554
1555                 if (no_wait)
1556                         return -EBUSY;
1557
1558                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1559                 sync_obj_arg = bo->sync_obj_arg;
1560                 spin_unlock(&bo->lock);
1561                 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1562                                             lazy, interruptible);
1563                 if (unlikely(ret != 0)) {
1564                         driver->sync_obj_unref(&sync_obj);
1565                         spin_lock(&bo->lock);
1566                         return ret;
1567                 }
1568                 spin_lock(&bo->lock);
1569                 if (likely(bo->sync_obj == sync_obj &&
1570                            bo->sync_obj_arg == sync_obj_arg)) {
1571                         void *tmp_obj = bo->sync_obj;
1572                         bo->sync_obj = NULL;
1573                         clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1574                                   &bo->priv_flags);
1575                         spin_unlock(&bo->lock);
1576                         driver->sync_obj_unref(&sync_obj);
1577                         driver->sync_obj_unref(&tmp_obj);
1578                         spin_lock(&bo->lock);
1579                 } else {
1580                         spin_unlock(&bo->lock);
1581                         driver->sync_obj_unref(&sync_obj);
1582                         spin_lock(&bo->lock);
1583                 }
1584         }
1585         return 0;
1586 }
1587 EXPORT_SYMBOL(ttm_bo_wait);
1588
1589 void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1590 {
1591         atomic_set(&bo->reserved, 0);
1592         wake_up_all(&bo->event_queue);
1593 }
1594
1595 int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1596                              bool no_wait)
1597 {
1598         int ret;
1599
1600         while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1601                 if (no_wait)
1602                         return -EBUSY;
1603                 else if (interruptible) {
1604                         ret = wait_event_interruptible
1605                             (bo->event_queue, atomic_read(&bo->reserved) == 0);
1606                         if (unlikely(ret != 0))
1607                                 return -ERESTART;
1608                 } else {
1609                         wait_event(bo->event_queue,
1610                                    atomic_read(&bo->reserved) == 0);
1611                 }
1612         }
1613         return 0;
1614 }
1615
1616 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1617 {
1618         int ret = 0;
1619
1620         /*
1621          * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1622          * makes sure the lru lists are updated.
1623          */
1624
1625         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1626         if (unlikely(ret != 0))
1627                 return ret;
1628         spin_lock(&bo->lock);
1629         ret = ttm_bo_wait(bo, false, true, no_wait);
1630         spin_unlock(&bo->lock);
1631         if (likely(ret == 0))
1632                 atomic_inc(&bo->cpu_writers);
1633         ttm_bo_unreserve(bo);
1634         return ret;
1635 }
1636
1637 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1638 {
1639         if (atomic_dec_and_test(&bo->cpu_writers))
1640                 wake_up_all(&bo->event_queue);
1641 }
1642
1643 /**
1644  * A buffer object shrink method that tries to swap out the first
1645  * buffer object on the bo_global::swap_lru list.
1646  */
1647
1648 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1649 {
1650         struct ttm_bo_device *bdev =
1651             container_of(shrink, struct ttm_bo_device, shrink);
1652         struct ttm_buffer_object *bo;
1653         int ret = -EBUSY;
1654         int put_count;
1655         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1656
1657         spin_lock(&bdev->lru_lock);
1658         while (ret == -EBUSY) {
1659                 if (unlikely(list_empty(&bdev->swap_lru))) {
1660                         spin_unlock(&bdev->lru_lock);
1661                         return -EBUSY;
1662                 }
1663
1664                 bo = list_first_entry(&bdev->swap_lru,
1665                                       struct ttm_buffer_object, swap);
1666                 kref_get(&bo->list_kref);
1667
1668                 /**
1669                  * Reserve buffer. Since we unlock while sleeping, we need
1670                  * to re-check that nobody removed us from the swap-list while
1671                  * we slept.
1672                  */
1673
1674                 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1675                 if (unlikely(ret == -EBUSY)) {
1676                         spin_unlock(&bdev->lru_lock);
1677                         ttm_bo_wait_unreserved(bo, false);
1678                         kref_put(&bo->list_kref, ttm_bo_release_list);
1679                         spin_lock(&bdev->lru_lock);
1680                 }
1681         }
1682
1683         BUG_ON(ret != 0);
1684         put_count = ttm_bo_del_from_lru(bo);
1685         spin_unlock(&bdev->lru_lock);
1686
1687         while (put_count--)
1688                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1689
1690         /**
1691          * Wait for GPU, then move to system cached.
1692          */
1693
1694         spin_lock(&bo->lock);
1695         ret = ttm_bo_wait(bo, false, false, false);
1696         spin_unlock(&bo->lock);
1697
1698         if (unlikely(ret != 0))
1699                 goto out;
1700
1701         if ((bo->mem.placement & swap_placement) != swap_placement) {
1702                 struct ttm_mem_reg evict_mem;
1703
1704                 evict_mem = bo->mem;
1705                 evict_mem.mm_node = NULL;
1706                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1707                 evict_mem.mem_type = TTM_PL_SYSTEM;
1708
1709                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1710                                              false, false);
1711                 if (unlikely(ret != 0))
1712                         goto out;
1713         }
1714
1715         ttm_bo_unmap_virtual(bo);
1716
1717         /**
1718          * Swap out. Buffer will be swapped in again as soon as
1719          * anyone tries to access a ttm page.
1720          */
1721
1722         ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1723 out:
1724
1725         /**
1726          *
1727          * Unreserve without putting on LRU to avoid swapping out an
1728          * already swapped buffer.
1729          */
1730
1731         atomic_set(&bo->reserved, 0);
1732         wake_up_all(&bo->event_queue);
1733         kref_put(&bo->list_kref, ttm_bo_release_list);
1734         return ret;
1735 }
1736
1737 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1738 {
1739         while (ttm_bo_swapout(&bdev->shrink) == 0)
1740                 ;
1741 }