Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /* Notes:
31  *
32  * We store bo pointer in drm_mm_node struct so we know which bo own a
33  * specific node. There is no protection on the pointer, thus to make
34  * sure things don't go berserk you have to access this pointer while
35  * holding the global lru lock and make sure anytime you free a node you
36  * reset the pointer to NULL.
37  */
38
39 #include "ttm/ttm_module.h"
40 #include "ttm/ttm_bo_driver.h"
41 #include "ttm/ttm_placement.h"
42 #include <linux/jiffies.h>
43 #include <linux/slab.h>
44 #include <linux/sched.h>
45 #include <linux/mm.h>
46 #include <linux/file.h>
47 #include <linux/module.h>
48
49 #define TTM_ASSERT_LOCKED(param)
50 #define TTM_DEBUG(fmt, arg...)
51 #define TTM_BO_HASH_ORDER 13
52
53 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
54 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
55 static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57 static struct attribute ttm_bo_count = {
58         .name = "bo_count",
59         .mode = S_IRUGO
60 };
61
62 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63 {
64         int i;
65
66         for (i = 0; i <= TTM_PL_PRIV5; i++)
67                 if (flags & (1 << i)) {
68                         *mem_type = i;
69                         return 0;
70                 }
71         return -EINVAL;
72 }
73
74 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
75 {
76         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
78         printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
79         printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
80         printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
81         printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
82         printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
83         printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
84                 man->available_caching);
85         printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
86                 man->default_caching);
87         if (mem_type != TTM_PL_SYSTEM) {
88                 spin_lock(&bdev->glob->lru_lock);
89                 drm_mm_debug_table(&man->manager, TTM_PFX);
90                 spin_unlock(&bdev->glob->lru_lock);
91         }
92 }
93
94 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
95                                         struct ttm_placement *placement)
96 {
97         int i, ret, mem_type;
98
99         printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
100                 bo, bo->mem.num_pages, bo->mem.size >> 10,
101                 bo->mem.size >> 20);
102         for (i = 0; i < placement->num_placement; i++) {
103                 ret = ttm_mem_type_from_flags(placement->placement[i],
104                                                 &mem_type);
105                 if (ret)
106                         return;
107                 printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
108                         i, placement->placement[i], mem_type);
109                 ttm_mem_type_debug(bo->bdev, mem_type);
110         }
111 }
112
113 static ssize_t ttm_bo_global_show(struct kobject *kobj,
114                                   struct attribute *attr,
115                                   char *buffer)
116 {
117         struct ttm_bo_global *glob =
118                 container_of(kobj, struct ttm_bo_global, kobj);
119
120         return snprintf(buffer, PAGE_SIZE, "%lu\n",
121                         (unsigned long) atomic_read(&glob->bo_count));
122 }
123
124 static struct attribute *ttm_bo_global_attrs[] = {
125         &ttm_bo_count,
126         NULL
127 };
128
129 static const struct sysfs_ops ttm_bo_global_ops = {
130         .show = &ttm_bo_global_show
131 };
132
133 static struct kobj_type ttm_bo_glob_kobj_type  = {
134         .release = &ttm_bo_global_kobj_release,
135         .sysfs_ops = &ttm_bo_global_ops,
136         .default_attrs = ttm_bo_global_attrs
137 };
138
139
140 static inline uint32_t ttm_bo_type_flags(unsigned type)
141 {
142         return 1 << (type);
143 }
144
145 static void ttm_bo_release_list(struct kref *list_kref)
146 {
147         struct ttm_buffer_object *bo =
148             container_of(list_kref, struct ttm_buffer_object, list_kref);
149         struct ttm_bo_device *bdev = bo->bdev;
150
151         BUG_ON(atomic_read(&bo->list_kref.refcount));
152         BUG_ON(atomic_read(&bo->kref.refcount));
153         BUG_ON(atomic_read(&bo->cpu_writers));
154         BUG_ON(bo->sync_obj != NULL);
155         BUG_ON(bo->mem.mm_node != NULL);
156         BUG_ON(!list_empty(&bo->lru));
157         BUG_ON(!list_empty(&bo->ddestroy));
158
159         if (bo->ttm)
160                 ttm_tt_destroy(bo->ttm);
161         atomic_dec(&bo->glob->bo_count);
162         if (bo->destroy)
163                 bo->destroy(bo);
164         else {
165                 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
166                 kfree(bo);
167         }
168 }
169
170 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
171 {
172
173         if (interruptible) {
174                 int ret = 0;
175
176                 ret = wait_event_interruptible(bo->event_queue,
177                                                atomic_read(&bo->reserved) == 0);
178                 if (unlikely(ret != 0))
179                         return ret;
180         } else {
181                 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
182         }
183         return 0;
184 }
185 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
186
187 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
188 {
189         struct ttm_bo_device *bdev = bo->bdev;
190         struct ttm_mem_type_manager *man;
191
192         BUG_ON(!atomic_read(&bo->reserved));
193
194         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
195
196                 BUG_ON(!list_empty(&bo->lru));
197
198                 man = &bdev->man[bo->mem.mem_type];
199                 list_add_tail(&bo->lru, &man->lru);
200                 kref_get(&bo->list_kref);
201
202                 if (bo->ttm != NULL) {
203                         list_add_tail(&bo->swap, &bo->glob->swap_lru);
204                         kref_get(&bo->list_kref);
205                 }
206         }
207 }
208
209 /**
210  * Call with the lru_lock held.
211  */
212
213 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
214 {
215         int put_count = 0;
216
217         if (!list_empty(&bo->swap)) {
218                 list_del_init(&bo->swap);
219                 ++put_count;
220         }
221         if (!list_empty(&bo->lru)) {
222                 list_del_init(&bo->lru);
223                 ++put_count;
224         }
225
226         /*
227          * TODO: Add a driver hook to delete from
228          * driver-specific LRU's here.
229          */
230
231         return put_count;
232 }
233
234 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
235                           bool interruptible,
236                           bool no_wait, bool use_sequence, uint32_t sequence)
237 {
238         struct ttm_bo_global *glob = bo->glob;
239         int ret;
240
241         while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
242                 if (use_sequence && bo->seq_valid &&
243                         (sequence - bo->val_seq < (1 << 31))) {
244                         return -EAGAIN;
245                 }
246
247                 if (no_wait)
248                         return -EBUSY;
249
250                 spin_unlock(&glob->lru_lock);
251                 ret = ttm_bo_wait_unreserved(bo, interruptible);
252                 spin_lock(&glob->lru_lock);
253
254                 if (unlikely(ret))
255                         return ret;
256         }
257
258         if (use_sequence) {
259                 bo->val_seq = sequence;
260                 bo->seq_valid = true;
261         } else {
262                 bo->seq_valid = false;
263         }
264
265         return 0;
266 }
267 EXPORT_SYMBOL(ttm_bo_reserve);
268
269 static void ttm_bo_ref_bug(struct kref *list_kref)
270 {
271         BUG();
272 }
273
274 int ttm_bo_reserve(struct ttm_buffer_object *bo,
275                    bool interruptible,
276                    bool no_wait, bool use_sequence, uint32_t sequence)
277 {
278         struct ttm_bo_global *glob = bo->glob;
279         int put_count = 0;
280         int ret;
281
282         spin_lock(&glob->lru_lock);
283         ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
284                                     sequence);
285         if (likely(ret == 0))
286                 put_count = ttm_bo_del_from_lru(bo);
287         spin_unlock(&glob->lru_lock);
288
289         while (put_count--)
290                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
291
292         return ret;
293 }
294
295 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
296 {
297         struct ttm_bo_global *glob = bo->glob;
298
299         spin_lock(&glob->lru_lock);
300         ttm_bo_add_to_lru(bo);
301         atomic_set(&bo->reserved, 0);
302         wake_up_all(&bo->event_queue);
303         spin_unlock(&glob->lru_lock);
304 }
305 EXPORT_SYMBOL(ttm_bo_unreserve);
306
307 /*
308  * Call bo->mutex locked.
309  */
310 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
311 {
312         struct ttm_bo_device *bdev = bo->bdev;
313         struct ttm_bo_global *glob = bo->glob;
314         int ret = 0;
315         uint32_t page_flags = 0;
316
317         TTM_ASSERT_LOCKED(&bo->mutex);
318         bo->ttm = NULL;
319
320         if (bdev->need_dma32)
321                 page_flags |= TTM_PAGE_FLAG_DMA32;
322
323         switch (bo->type) {
324         case ttm_bo_type_device:
325                 if (zero_alloc)
326                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
327         case ttm_bo_type_kernel:
328                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
329                                         page_flags, glob->dummy_read_page);
330                 if (unlikely(bo->ttm == NULL))
331                         ret = -ENOMEM;
332                 break;
333         case ttm_bo_type_user:
334                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
335                                         page_flags | TTM_PAGE_FLAG_USER,
336                                         glob->dummy_read_page);
337                 if (unlikely(bo->ttm == NULL)) {
338                         ret = -ENOMEM;
339                         break;
340                 }
341
342                 ret = ttm_tt_set_user(bo->ttm, current,
343                                       bo->buffer_start, bo->num_pages);
344                 if (unlikely(ret != 0))
345                         ttm_tt_destroy(bo->ttm);
346                 break;
347         default:
348                 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
349                 ret = -EINVAL;
350                 break;
351         }
352
353         return ret;
354 }
355
356 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
357                                   struct ttm_mem_reg *mem,
358                                   bool evict, bool interruptible,
359                                   bool no_wait_reserve, bool no_wait_gpu)
360 {
361         struct ttm_bo_device *bdev = bo->bdev;
362         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
363         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
364         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
365         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
366         int ret = 0;
367
368         if (old_is_pci || new_is_pci ||
369             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
370                 ttm_bo_unmap_virtual(bo);
371
372         /*
373          * Create and bind a ttm if required.
374          */
375
376         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
377                 ret = ttm_bo_add_ttm(bo, false);
378                 if (ret)
379                         goto out_err;
380
381                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
382                 if (ret)
383                         goto out_err;
384
385                 if (mem->mem_type != TTM_PL_SYSTEM) {
386                         ret = ttm_tt_bind(bo->ttm, mem);
387                         if (ret)
388                                 goto out_err;
389                 }
390
391                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
392                         bo->mem = *mem;
393                         mem->mm_node = NULL;
394                         goto moved;
395                 }
396
397         }
398
399         if (bdev->driver->move_notify)
400                 bdev->driver->move_notify(bo, mem);
401
402         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
403             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
404                 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
405         else if (bdev->driver->move)
406                 ret = bdev->driver->move(bo, evict, interruptible,
407                                          no_wait_reserve, no_wait_gpu, mem);
408         else
409                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
410
411         if (ret)
412                 goto out_err;
413
414 moved:
415         if (bo->evicted) {
416                 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
417                 if (ret)
418                         printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
419                 bo->evicted = false;
420         }
421
422         if (bo->mem.mm_node) {
423                 spin_lock(&bo->lock);
424                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
425                     bdev->man[bo->mem.mem_type].gpu_offset;
426                 bo->cur_placement = bo->mem.placement;
427                 spin_unlock(&bo->lock);
428         } else
429                 bo->offset = 0;
430
431         return 0;
432
433 out_err:
434         new_man = &bdev->man[bo->mem.mem_type];
435         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
436                 ttm_tt_unbind(bo->ttm);
437                 ttm_tt_destroy(bo->ttm);
438                 bo->ttm = NULL;
439         }
440
441         return ret;
442 }
443
444 /**
445  * Call bo::reserved and with the lru lock held.
446  * Will release GPU memory type usage on destruction.
447  * This is the place to put in driver specific hooks.
448  * Will release the bo::reserved lock and the
449  * lru lock on exit.
450  */
451
452 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
453 {
454         struct ttm_bo_global *glob = bo->glob;
455
456         if (bo->ttm) {
457
458                 /**
459                  * Release the lru_lock, since we don't want to have
460                  * an atomic requirement on ttm_tt[unbind|destroy].
461                  */
462
463                 spin_unlock(&glob->lru_lock);
464                 ttm_tt_unbind(bo->ttm);
465                 ttm_tt_destroy(bo->ttm);
466                 bo->ttm = NULL;
467                 spin_lock(&glob->lru_lock);
468         }
469
470         if (bo->mem.mm_node) {
471                 drm_mm_put_block(bo->mem.mm_node);
472                 bo->mem.mm_node = NULL;
473         }
474
475         atomic_set(&bo->reserved, 0);
476         wake_up_all(&bo->event_queue);
477         spin_unlock(&glob->lru_lock);
478 }
479
480
481 /**
482  * If bo idle, remove from delayed- and lru lists, and unref.
483  * If not idle, and already on delayed list, do nothing.
484  * If not idle, and not on delayed list, put on delayed list,
485  *   up the list_kref and schedule a delayed list check.
486  */
487
488 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
489 {
490         struct ttm_bo_device *bdev = bo->bdev;
491         struct ttm_bo_global *glob = bo->glob;
492         struct ttm_bo_driver *driver = bdev->driver;
493         int ret;
494
495         spin_lock(&bo->lock);
496 retry:
497         (void) ttm_bo_wait(bo, false, false, !remove_all);
498
499         if (!bo->sync_obj) {
500                 int put_count;
501
502                 spin_unlock(&bo->lock);
503
504                 spin_lock(&glob->lru_lock);
505                 ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
506
507                 /**
508                  * Someone else has the object reserved. Bail and retry.
509                  */
510
511                 if (unlikely(ret == -EBUSY)) {
512                         spin_unlock(&glob->lru_lock);
513                         spin_lock(&bo->lock);
514                         goto requeue;
515                 }
516
517                 /**
518                  * We can re-check for sync object without taking
519                  * the bo::lock since setting the sync object requires
520                  * also bo::reserved. A busy object at this point may
521                  * be caused by another thread starting an accelerated
522                  * eviction.
523                  */
524
525                 if (unlikely(bo->sync_obj)) {
526                         atomic_set(&bo->reserved, 0);
527                         wake_up_all(&bo->event_queue);
528                         spin_unlock(&glob->lru_lock);
529                         spin_lock(&bo->lock);
530                         if (remove_all)
531                                 goto retry;
532                         else
533                                 goto requeue;
534                 }
535
536                 put_count = ttm_bo_del_from_lru(bo);
537
538                 if (!list_empty(&bo->ddestroy)) {
539                         list_del_init(&bo->ddestroy);
540                         ++put_count;
541                 }
542
543                 ttm_bo_cleanup_memtype_use(bo);
544
545                 while (put_count--)
546                         kref_put(&bo->list_kref, ttm_bo_ref_bug);
547
548                 return 0;
549         }
550 requeue:
551         spin_lock(&glob->lru_lock);
552         if (list_empty(&bo->ddestroy)) {
553                 void *sync_obj = bo->sync_obj;
554                 void *sync_obj_arg = bo->sync_obj_arg;
555
556                 kref_get(&bo->list_kref);
557                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
558                 spin_unlock(&glob->lru_lock);
559                 spin_unlock(&bo->lock);
560
561                 if (sync_obj)
562                         driver->sync_obj_flush(sync_obj, sync_obj_arg);
563                 schedule_delayed_work(&bdev->wq,
564                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
565                 ret = 0;
566
567         } else {
568                 spin_unlock(&glob->lru_lock);
569                 spin_unlock(&bo->lock);
570                 ret = -EBUSY;
571         }
572
573         return ret;
574 }
575
576 /**
577  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
578  * encountered buffers.
579  */
580
581 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
582 {
583         struct ttm_bo_global *glob = bdev->glob;
584         struct ttm_buffer_object *entry = NULL;
585         int ret = 0;
586
587         spin_lock(&glob->lru_lock);
588         if (list_empty(&bdev->ddestroy))
589                 goto out_unlock;
590
591         entry = list_first_entry(&bdev->ddestroy,
592                 struct ttm_buffer_object, ddestroy);
593         kref_get(&entry->list_kref);
594
595         for (;;) {
596                 struct ttm_buffer_object *nentry = NULL;
597
598                 if (entry->ddestroy.next != &bdev->ddestroy) {
599                         nentry = list_first_entry(&entry->ddestroy,
600                                 struct ttm_buffer_object, ddestroy);
601                         kref_get(&nentry->list_kref);
602                 }
603
604                 spin_unlock(&glob->lru_lock);
605                 ret = ttm_bo_cleanup_refs(entry, remove_all);
606                 kref_put(&entry->list_kref, ttm_bo_release_list);
607                 entry = nentry;
608
609                 if (ret || !entry)
610                         goto out;
611
612                 spin_lock(&glob->lru_lock);
613                 if (list_empty(&entry->ddestroy))
614                         break;
615         }
616
617 out_unlock:
618         spin_unlock(&glob->lru_lock);
619 out:
620         if (entry)
621                 kref_put(&entry->list_kref, ttm_bo_release_list);
622         return ret;
623 }
624
625 static void ttm_bo_delayed_workqueue(struct work_struct *work)
626 {
627         struct ttm_bo_device *bdev =
628             container_of(work, struct ttm_bo_device, wq.work);
629
630         if (ttm_bo_delayed_delete(bdev, false)) {
631                 schedule_delayed_work(&bdev->wq,
632                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
633         }
634 }
635
636 static void ttm_bo_release(struct kref *kref)
637 {
638         struct ttm_buffer_object *bo =
639             container_of(kref, struct ttm_buffer_object, kref);
640         struct ttm_bo_device *bdev = bo->bdev;
641
642         if (likely(bo->vm_node != NULL)) {
643                 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
644                 drm_mm_put_block(bo->vm_node);
645                 bo->vm_node = NULL;
646         }
647         write_unlock(&bdev->vm_lock);
648         ttm_bo_cleanup_refs(bo, false);
649         kref_put(&bo->list_kref, ttm_bo_release_list);
650         write_lock(&bdev->vm_lock);
651 }
652
653 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
654 {
655         struct ttm_buffer_object *bo = *p_bo;
656         struct ttm_bo_device *bdev = bo->bdev;
657
658         *p_bo = NULL;
659         write_lock(&bdev->vm_lock);
660         kref_put(&bo->kref, ttm_bo_release);
661         write_unlock(&bdev->vm_lock);
662 }
663 EXPORT_SYMBOL(ttm_bo_unref);
664
665 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
666 {
667         return cancel_delayed_work_sync(&bdev->wq);
668 }
669 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
670
671 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
672 {
673         if (resched)
674                 schedule_delayed_work(&bdev->wq,
675                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
676 }
677 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
678
679 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
680                         bool no_wait_reserve, bool no_wait_gpu)
681 {
682         struct ttm_bo_device *bdev = bo->bdev;
683         struct ttm_bo_global *glob = bo->glob;
684         struct ttm_mem_reg evict_mem;
685         struct ttm_placement placement;
686         int ret = 0;
687
688         spin_lock(&bo->lock);
689         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
690         spin_unlock(&bo->lock);
691
692         if (unlikely(ret != 0)) {
693                 if (ret != -ERESTARTSYS) {
694                         printk(KERN_ERR TTM_PFX
695                                "Failed to expire sync object before "
696                                "buffer eviction.\n");
697                 }
698                 goto out;
699         }
700
701         BUG_ON(!atomic_read(&bo->reserved));
702
703         evict_mem = bo->mem;
704         evict_mem.mm_node = NULL;
705         evict_mem.bus.io_reserved = false;
706
707         placement.fpfn = 0;
708         placement.lpfn = 0;
709         placement.num_placement = 0;
710         placement.num_busy_placement = 0;
711         bdev->driver->evict_flags(bo, &placement);
712         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
713                                 no_wait_reserve, no_wait_gpu);
714         if (ret) {
715                 if (ret != -ERESTARTSYS) {
716                         printk(KERN_ERR TTM_PFX
717                                "Failed to find memory space for "
718                                "buffer 0x%p eviction.\n", bo);
719                         ttm_bo_mem_space_debug(bo, &placement);
720                 }
721                 goto out;
722         }
723
724         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
725                                      no_wait_reserve, no_wait_gpu);
726         if (ret) {
727                 if (ret != -ERESTARTSYS)
728                         printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
729                 spin_lock(&glob->lru_lock);
730                 if (evict_mem.mm_node) {
731                         drm_mm_put_block(evict_mem.mm_node);
732                         evict_mem.mm_node = NULL;
733                 }
734                 spin_unlock(&glob->lru_lock);
735                 goto out;
736         }
737         bo->evicted = true;
738 out:
739         return ret;
740 }
741
742 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
743                                 uint32_t mem_type,
744                                 bool interruptible, bool no_wait_reserve,
745                                 bool no_wait_gpu)
746 {
747         struct ttm_bo_global *glob = bdev->glob;
748         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
749         struct ttm_buffer_object *bo;
750         int ret, put_count = 0;
751
752 retry:
753         spin_lock(&glob->lru_lock);
754         if (list_empty(&man->lru)) {
755                 spin_unlock(&glob->lru_lock);
756                 return -EBUSY;
757         }
758
759         bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
760         kref_get(&bo->list_kref);
761
762         ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
763
764         if (unlikely(ret == -EBUSY)) {
765                 spin_unlock(&glob->lru_lock);
766                 if (likely(!no_wait_gpu))
767                         ret = ttm_bo_wait_unreserved(bo, interruptible);
768
769                 kref_put(&bo->list_kref, ttm_bo_release_list);
770
771                 /**
772                  * We *need* to retry after releasing the lru lock.
773                  */
774
775                 if (unlikely(ret != 0))
776                         return ret;
777                 goto retry;
778         }
779
780         put_count = ttm_bo_del_from_lru(bo);
781         spin_unlock(&glob->lru_lock);
782
783         BUG_ON(ret != 0);
784
785         while (put_count--)
786                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
787
788         ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
789         ttm_bo_unreserve(bo);
790
791         kref_put(&bo->list_kref, ttm_bo_release_list);
792         return ret;
793 }
794
795 static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
796                                 struct ttm_mem_type_manager *man,
797                                 struct ttm_placement *placement,
798                                 struct ttm_mem_reg *mem,
799                                 struct drm_mm_node **node)
800 {
801         struct ttm_bo_global *glob = bo->glob;
802         unsigned long lpfn;
803         int ret;
804
805         lpfn = placement->lpfn;
806         if (!lpfn)
807                 lpfn = man->size;
808         *node = NULL;
809         do {
810                 ret = drm_mm_pre_get(&man->manager);
811                 if (unlikely(ret))
812                         return ret;
813
814                 spin_lock(&glob->lru_lock);
815                 *node = drm_mm_search_free_in_range(&man->manager,
816                                         mem->num_pages, mem->page_alignment,
817                                         placement->fpfn, lpfn, 1);
818                 if (unlikely(*node == NULL)) {
819                         spin_unlock(&glob->lru_lock);
820                         return 0;
821                 }
822                 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
823                                                         mem->page_alignment,
824                                                         placement->fpfn,
825                                                         lpfn);
826                 spin_unlock(&glob->lru_lock);
827         } while (*node == NULL);
828         return 0;
829 }
830
831 /**
832  * Repeatedly evict memory from the LRU for @mem_type until we create enough
833  * space, or we've evicted everything and there isn't enough space.
834  */
835 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
836                                         uint32_t mem_type,
837                                         struct ttm_placement *placement,
838                                         struct ttm_mem_reg *mem,
839                                         bool interruptible,
840                                         bool no_wait_reserve,
841                                         bool no_wait_gpu)
842 {
843         struct ttm_bo_device *bdev = bo->bdev;
844         struct ttm_bo_global *glob = bdev->glob;
845         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
846         struct drm_mm_node *node;
847         int ret;
848
849         do {
850                 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
851                 if (unlikely(ret != 0))
852                         return ret;
853                 if (node)
854                         break;
855                 spin_lock(&glob->lru_lock);
856                 if (list_empty(&man->lru)) {
857                         spin_unlock(&glob->lru_lock);
858                         break;
859                 }
860                 spin_unlock(&glob->lru_lock);
861                 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
862                                                 no_wait_reserve, no_wait_gpu);
863                 if (unlikely(ret != 0))
864                         return ret;
865         } while (1);
866         if (node == NULL)
867                 return -ENOMEM;
868         mem->mm_node = node;
869         mem->mem_type = mem_type;
870         return 0;
871 }
872
873 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
874                                       uint32_t cur_placement,
875                                       uint32_t proposed_placement)
876 {
877         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
878         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
879
880         /**
881          * Keep current caching if possible.
882          */
883
884         if ((cur_placement & caching) != 0)
885                 result |= (cur_placement & caching);
886         else if ((man->default_caching & caching) != 0)
887                 result |= man->default_caching;
888         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
889                 result |= TTM_PL_FLAG_CACHED;
890         else if ((TTM_PL_FLAG_WC & caching) != 0)
891                 result |= TTM_PL_FLAG_WC;
892         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
893                 result |= TTM_PL_FLAG_UNCACHED;
894
895         return result;
896 }
897
898 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
899                                  bool disallow_fixed,
900                                  uint32_t mem_type,
901                                  uint32_t proposed_placement,
902                                  uint32_t *masked_placement)
903 {
904         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
905
906         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
907                 return false;
908
909         if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
910                 return false;
911
912         if ((proposed_placement & man->available_caching) == 0)
913                 return false;
914
915         cur_flags |= (proposed_placement & man->available_caching);
916
917         *masked_placement = cur_flags;
918         return true;
919 }
920
921 /**
922  * Creates space for memory region @mem according to its type.
923  *
924  * This function first searches for free space in compatible memory types in
925  * the priority order defined by the driver.  If free space isn't found, then
926  * ttm_bo_mem_force_space is attempted in priority order to evict and find
927  * space.
928  */
929 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
930                         struct ttm_placement *placement,
931                         struct ttm_mem_reg *mem,
932                         bool interruptible, bool no_wait_reserve,
933                         bool no_wait_gpu)
934 {
935         struct ttm_bo_device *bdev = bo->bdev;
936         struct ttm_mem_type_manager *man;
937         uint32_t mem_type = TTM_PL_SYSTEM;
938         uint32_t cur_flags = 0;
939         bool type_found = false;
940         bool type_ok = false;
941         bool has_erestartsys = false;
942         struct drm_mm_node *node = NULL;
943         int i, ret;
944
945         mem->mm_node = NULL;
946         for (i = 0; i < placement->num_placement; ++i) {
947                 ret = ttm_mem_type_from_flags(placement->placement[i],
948                                                 &mem_type);
949                 if (ret)
950                         return ret;
951                 man = &bdev->man[mem_type];
952
953                 type_ok = ttm_bo_mt_compatible(man,
954                                                 bo->type == ttm_bo_type_user,
955                                                 mem_type,
956                                                 placement->placement[i],
957                                                 &cur_flags);
958
959                 if (!type_ok)
960                         continue;
961
962                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
963                                                   cur_flags);
964                 /*
965                  * Use the access and other non-mapping-related flag bits from
966                  * the memory placement flags to the current flags
967                  */
968                 ttm_flag_masked(&cur_flags, placement->placement[i],
969                                 ~TTM_PL_MASK_MEMTYPE);
970
971                 if (mem_type == TTM_PL_SYSTEM)
972                         break;
973
974                 if (man->has_type && man->use_type) {
975                         type_found = true;
976                         ret = ttm_bo_man_get_node(bo, man, placement, mem,
977                                                         &node);
978                         if (unlikely(ret))
979                                 return ret;
980                 }
981                 if (node)
982                         break;
983         }
984
985         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
986                 mem->mm_node = node;
987                 mem->mem_type = mem_type;
988                 mem->placement = cur_flags;
989                 return 0;
990         }
991
992         if (!type_found)
993                 return -EINVAL;
994
995         for (i = 0; i < placement->num_busy_placement; ++i) {
996                 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
997                                                 &mem_type);
998                 if (ret)
999                         return ret;
1000                 man = &bdev->man[mem_type];
1001                 if (!man->has_type)
1002                         continue;
1003                 if (!ttm_bo_mt_compatible(man,
1004                                                 bo->type == ttm_bo_type_user,
1005                                                 mem_type,
1006                                                 placement->busy_placement[i],
1007                                                 &cur_flags))
1008                         continue;
1009
1010                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1011                                                   cur_flags);
1012                 /*
1013                  * Use the access and other non-mapping-related flag bits from
1014                  * the memory placement flags to the current flags
1015                  */
1016                 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1017                                 ~TTM_PL_MASK_MEMTYPE);
1018
1019
1020                 if (mem_type == TTM_PL_SYSTEM) {
1021                         mem->mem_type = mem_type;
1022                         mem->placement = cur_flags;
1023                         mem->mm_node = NULL;
1024                         return 0;
1025                 }
1026
1027                 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1028                                                 interruptible, no_wait_reserve, no_wait_gpu);
1029                 if (ret == 0 && mem->mm_node) {
1030                         mem->placement = cur_flags;
1031                         return 0;
1032                 }
1033                 if (ret == -ERESTARTSYS)
1034                         has_erestartsys = true;
1035         }
1036         ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1037         return ret;
1038 }
1039 EXPORT_SYMBOL(ttm_bo_mem_space);
1040
1041 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1042 {
1043         if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1044                 return -EBUSY;
1045
1046         return wait_event_interruptible(bo->event_queue,
1047                                         atomic_read(&bo->cpu_writers) == 0);
1048 }
1049 EXPORT_SYMBOL(ttm_bo_wait_cpu);
1050
1051 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1052                         struct ttm_placement *placement,
1053                         bool interruptible, bool no_wait_reserve,
1054                         bool no_wait_gpu)
1055 {
1056         struct ttm_bo_global *glob = bo->glob;
1057         int ret = 0;
1058         struct ttm_mem_reg mem;
1059
1060         BUG_ON(!atomic_read(&bo->reserved));
1061
1062         /*
1063          * FIXME: It's possible to pipeline buffer moves.
1064          * Have the driver move function wait for idle when necessary,
1065          * instead of doing it here.
1066          */
1067         spin_lock(&bo->lock);
1068         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1069         spin_unlock(&bo->lock);
1070         if (ret)
1071                 return ret;
1072         mem.num_pages = bo->num_pages;
1073         mem.size = mem.num_pages << PAGE_SHIFT;
1074         mem.page_alignment = bo->mem.page_alignment;
1075         mem.bus.io_reserved = false;
1076         /*
1077          * Determine where to move the buffer.
1078          */
1079         ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1080         if (ret)
1081                 goto out_unlock;
1082         ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1083 out_unlock:
1084         if (ret && mem.mm_node) {
1085                 spin_lock(&glob->lru_lock);
1086                 drm_mm_put_block(mem.mm_node);
1087                 spin_unlock(&glob->lru_lock);
1088         }
1089         return ret;
1090 }
1091
1092 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1093                              struct ttm_mem_reg *mem)
1094 {
1095         int i;
1096         struct drm_mm_node *node = mem->mm_node;
1097
1098         if (node && placement->lpfn != 0 &&
1099             (node->start < placement->fpfn ||
1100              node->start + node->size > placement->lpfn))
1101                 return -1;
1102
1103         for (i = 0; i < placement->num_placement; i++) {
1104                 if ((placement->placement[i] & mem->placement &
1105                         TTM_PL_MASK_CACHING) &&
1106                         (placement->placement[i] & mem->placement &
1107                         TTM_PL_MASK_MEM))
1108                         return i;
1109         }
1110         return -1;
1111 }
1112
1113 int ttm_bo_validate(struct ttm_buffer_object *bo,
1114                         struct ttm_placement *placement,
1115                         bool interruptible, bool no_wait_reserve,
1116                         bool no_wait_gpu)
1117 {
1118         int ret;
1119
1120         BUG_ON(!atomic_read(&bo->reserved));
1121         /* Check that range is valid */
1122         if (placement->lpfn || placement->fpfn)
1123                 if (placement->fpfn > placement->lpfn ||
1124                         (placement->lpfn - placement->fpfn) < bo->num_pages)
1125                         return -EINVAL;
1126         /*
1127          * Check whether we need to move buffer.
1128          */
1129         ret = ttm_bo_mem_compat(placement, &bo->mem);
1130         if (ret < 0) {
1131                 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1132                 if (ret)
1133                         return ret;
1134         } else {
1135                 /*
1136                  * Use the access and other non-mapping-related flag bits from
1137                  * the compatible memory placement flags to the active flags
1138                  */
1139                 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1140                                 ~TTM_PL_MASK_MEMTYPE);
1141         }
1142         /*
1143          * We might need to add a TTM.
1144          */
1145         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1146                 ret = ttm_bo_add_ttm(bo, true);
1147                 if (ret)
1148                         return ret;
1149         }
1150         return 0;
1151 }
1152 EXPORT_SYMBOL(ttm_bo_validate);
1153
1154 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1155                                 struct ttm_placement *placement)
1156 {
1157         int i;
1158
1159         if (placement->fpfn || placement->lpfn) {
1160                 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1161                         printk(KERN_ERR TTM_PFX "Page number range to small "
1162                                 "Need %lu pages, range is [%u, %u]\n",
1163                                 bo->mem.num_pages, placement->fpfn,
1164                                 placement->lpfn);
1165                         return -EINVAL;
1166                 }
1167         }
1168         for (i = 0; i < placement->num_placement; i++) {
1169                 if (!capable(CAP_SYS_ADMIN)) {
1170                         if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1171                                 printk(KERN_ERR TTM_PFX "Need to be root to "
1172                                         "modify NO_EVICT status.\n");
1173                                 return -EINVAL;
1174                         }
1175                 }
1176         }
1177         for (i = 0; i < placement->num_busy_placement; i++) {
1178                 if (!capable(CAP_SYS_ADMIN)) {
1179                         if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1180                                 printk(KERN_ERR TTM_PFX "Need to be root to "
1181                                         "modify NO_EVICT status.\n");
1182                                 return -EINVAL;
1183                         }
1184                 }
1185         }
1186         return 0;
1187 }
1188
1189 int ttm_bo_init(struct ttm_bo_device *bdev,
1190                 struct ttm_buffer_object *bo,
1191                 unsigned long size,
1192                 enum ttm_bo_type type,
1193                 struct ttm_placement *placement,
1194                 uint32_t page_alignment,
1195                 unsigned long buffer_start,
1196                 bool interruptible,
1197                 struct file *persistant_swap_storage,
1198                 size_t acc_size,
1199                 void (*destroy) (struct ttm_buffer_object *))
1200 {
1201         int ret = 0;
1202         unsigned long num_pages;
1203
1204         size += buffer_start & ~PAGE_MASK;
1205         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1206         if (num_pages == 0) {
1207                 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1208                 return -EINVAL;
1209         }
1210         bo->destroy = destroy;
1211
1212         spin_lock_init(&bo->lock);
1213         kref_init(&bo->kref);
1214         kref_init(&bo->list_kref);
1215         atomic_set(&bo->cpu_writers, 0);
1216         atomic_set(&bo->reserved, 1);
1217         init_waitqueue_head(&bo->event_queue);
1218         INIT_LIST_HEAD(&bo->lru);
1219         INIT_LIST_HEAD(&bo->ddestroy);
1220         INIT_LIST_HEAD(&bo->swap);
1221         bo->bdev = bdev;
1222         bo->glob = bdev->glob;
1223         bo->type = type;
1224         bo->num_pages = num_pages;
1225         bo->mem.size = num_pages << PAGE_SHIFT;
1226         bo->mem.mem_type = TTM_PL_SYSTEM;
1227         bo->mem.num_pages = bo->num_pages;
1228         bo->mem.mm_node = NULL;
1229         bo->mem.page_alignment = page_alignment;
1230         bo->mem.bus.io_reserved = false;
1231         bo->buffer_start = buffer_start & PAGE_MASK;
1232         bo->priv_flags = 0;
1233         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1234         bo->seq_valid = false;
1235         bo->persistant_swap_storage = persistant_swap_storage;
1236         bo->acc_size = acc_size;
1237         atomic_inc(&bo->glob->bo_count);
1238
1239         ret = ttm_bo_check_placement(bo, placement);
1240         if (unlikely(ret != 0))
1241                 goto out_err;
1242
1243         /*
1244          * For ttm_bo_type_device buffers, allocate
1245          * address space from the device.
1246          */
1247         if (bo->type == ttm_bo_type_device) {
1248                 ret = ttm_bo_setup_vm(bo);
1249                 if (ret)
1250                         goto out_err;
1251         }
1252
1253         ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1254         if (ret)
1255                 goto out_err;
1256
1257         ttm_bo_unreserve(bo);
1258         return 0;
1259
1260 out_err:
1261         ttm_bo_unreserve(bo);
1262         ttm_bo_unref(&bo);
1263
1264         return ret;
1265 }
1266 EXPORT_SYMBOL(ttm_bo_init);
1267
1268 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1269                                  unsigned long num_pages)
1270 {
1271         size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1272             PAGE_MASK;
1273
1274         return glob->ttm_bo_size + 2 * page_array_size;
1275 }
1276
1277 int ttm_bo_create(struct ttm_bo_device *bdev,
1278                         unsigned long size,
1279                         enum ttm_bo_type type,
1280                         struct ttm_placement *placement,
1281                         uint32_t page_alignment,
1282                         unsigned long buffer_start,
1283                         bool interruptible,
1284                         struct file *persistant_swap_storage,
1285                         struct ttm_buffer_object **p_bo)
1286 {
1287         struct ttm_buffer_object *bo;
1288         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1289         int ret;
1290
1291         size_t acc_size =
1292             ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1293         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1294         if (unlikely(ret != 0))
1295                 return ret;
1296
1297         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1298
1299         if (unlikely(bo == NULL)) {
1300                 ttm_mem_global_free(mem_glob, acc_size);
1301                 return -ENOMEM;
1302         }
1303
1304         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1305                                 buffer_start, interruptible,
1306                                 persistant_swap_storage, acc_size, NULL);
1307         if (likely(ret == 0))
1308                 *p_bo = bo;
1309
1310         return ret;
1311 }
1312
1313 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1314                                         unsigned mem_type, bool allow_errors)
1315 {
1316         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1317         struct ttm_bo_global *glob = bdev->glob;
1318         int ret;
1319
1320         /*
1321          * Can't use standard list traversal since we're unlocking.
1322          */
1323
1324         spin_lock(&glob->lru_lock);
1325         while (!list_empty(&man->lru)) {
1326                 spin_unlock(&glob->lru_lock);
1327                 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1328                 if (ret) {
1329                         if (allow_errors) {
1330                                 return ret;
1331                         } else {
1332                                 printk(KERN_ERR TTM_PFX
1333                                         "Cleanup eviction failed\n");
1334                         }
1335                 }
1336                 spin_lock(&glob->lru_lock);
1337         }
1338         spin_unlock(&glob->lru_lock);
1339         return 0;
1340 }
1341
1342 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1343 {
1344         struct ttm_bo_global *glob = bdev->glob;
1345         struct ttm_mem_type_manager *man;
1346         int ret = -EINVAL;
1347
1348         if (mem_type >= TTM_NUM_MEM_TYPES) {
1349                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1350                 return ret;
1351         }
1352         man = &bdev->man[mem_type];
1353
1354         if (!man->has_type) {
1355                 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1356                        "memory manager type %u\n", mem_type);
1357                 return ret;
1358         }
1359
1360         man->use_type = false;
1361         man->has_type = false;
1362
1363         ret = 0;
1364         if (mem_type > 0) {
1365                 ttm_bo_force_list_clean(bdev, mem_type, false);
1366
1367                 spin_lock(&glob->lru_lock);
1368                 if (drm_mm_clean(&man->manager))
1369                         drm_mm_takedown(&man->manager);
1370                 else
1371                         ret = -EBUSY;
1372
1373                 spin_unlock(&glob->lru_lock);
1374         }
1375
1376         return ret;
1377 }
1378 EXPORT_SYMBOL(ttm_bo_clean_mm);
1379
1380 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1381 {
1382         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1383
1384         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1385                 printk(KERN_ERR TTM_PFX
1386                        "Illegal memory manager memory type %u.\n",
1387                        mem_type);
1388                 return -EINVAL;
1389         }
1390
1391         if (!man->has_type) {
1392                 printk(KERN_ERR TTM_PFX
1393                        "Memory type %u has not been initialized.\n",
1394                        mem_type);
1395                 return 0;
1396         }
1397
1398         return ttm_bo_force_list_clean(bdev, mem_type, true);
1399 }
1400 EXPORT_SYMBOL(ttm_bo_evict_mm);
1401
1402 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1403                         unsigned long p_size)
1404 {
1405         int ret = -EINVAL;
1406         struct ttm_mem_type_manager *man;
1407
1408         if (type >= TTM_NUM_MEM_TYPES) {
1409                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1410                 return ret;
1411         }
1412
1413         man = &bdev->man[type];
1414         if (man->has_type) {
1415                 printk(KERN_ERR TTM_PFX
1416                        "Memory manager already initialized for type %d\n",
1417                        type);
1418                 return ret;
1419         }
1420
1421         ret = bdev->driver->init_mem_type(bdev, type, man);
1422         if (ret)
1423                 return ret;
1424
1425         ret = 0;
1426         if (type != TTM_PL_SYSTEM) {
1427                 if (!p_size) {
1428                         printk(KERN_ERR TTM_PFX
1429                                "Zero size memory manager type %d\n",
1430                                type);
1431                         return ret;
1432                 }
1433                 ret = drm_mm_init(&man->manager, 0, p_size);
1434                 if (ret)
1435                         return ret;
1436         }
1437         man->has_type = true;
1438         man->use_type = true;
1439         man->size = p_size;
1440
1441         INIT_LIST_HEAD(&man->lru);
1442
1443         return 0;
1444 }
1445 EXPORT_SYMBOL(ttm_bo_init_mm);
1446
1447 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1448 {
1449         struct ttm_bo_global *glob =
1450                 container_of(kobj, struct ttm_bo_global, kobj);
1451
1452         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1453         __free_page(glob->dummy_read_page);
1454         kfree(glob);
1455 }
1456
1457 void ttm_bo_global_release(struct drm_global_reference *ref)
1458 {
1459         struct ttm_bo_global *glob = ref->object;
1460
1461         kobject_del(&glob->kobj);
1462         kobject_put(&glob->kobj);
1463 }
1464 EXPORT_SYMBOL(ttm_bo_global_release);
1465
1466 int ttm_bo_global_init(struct drm_global_reference *ref)
1467 {
1468         struct ttm_bo_global_ref *bo_ref =
1469                 container_of(ref, struct ttm_bo_global_ref, ref);
1470         struct ttm_bo_global *glob = ref->object;
1471         int ret;
1472
1473         mutex_init(&glob->device_list_mutex);
1474         spin_lock_init(&glob->lru_lock);
1475         glob->mem_glob = bo_ref->mem_glob;
1476         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1477
1478         if (unlikely(glob->dummy_read_page == NULL)) {
1479                 ret = -ENOMEM;
1480                 goto out_no_drp;
1481         }
1482
1483         INIT_LIST_HEAD(&glob->swap_lru);
1484         INIT_LIST_HEAD(&glob->device_list);
1485
1486         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1487         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1488         if (unlikely(ret != 0)) {
1489                 printk(KERN_ERR TTM_PFX
1490                        "Could not register buffer object swapout.\n");
1491                 goto out_no_shrink;
1492         }
1493
1494         glob->ttm_bo_extra_size =
1495                 ttm_round_pot(sizeof(struct ttm_tt)) +
1496                 ttm_round_pot(sizeof(struct ttm_backend));
1497
1498         glob->ttm_bo_size = glob->ttm_bo_extra_size +
1499                 ttm_round_pot(sizeof(struct ttm_buffer_object));
1500
1501         atomic_set(&glob->bo_count, 0);
1502
1503         ret = kobject_init_and_add(
1504                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1505         if (unlikely(ret != 0))
1506                 kobject_put(&glob->kobj);
1507         return ret;
1508 out_no_shrink:
1509         __free_page(glob->dummy_read_page);
1510 out_no_drp:
1511         kfree(glob);
1512         return ret;
1513 }
1514 EXPORT_SYMBOL(ttm_bo_global_init);
1515
1516
1517 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1518 {
1519         int ret = 0;
1520         unsigned i = TTM_NUM_MEM_TYPES;
1521         struct ttm_mem_type_manager *man;
1522         struct ttm_bo_global *glob = bdev->glob;
1523
1524         while (i--) {
1525                 man = &bdev->man[i];
1526                 if (man->has_type) {
1527                         man->use_type = false;
1528                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1529                                 ret = -EBUSY;
1530                                 printk(KERN_ERR TTM_PFX
1531                                        "DRM memory manager type %d "
1532                                        "is not clean.\n", i);
1533                         }
1534                         man->has_type = false;
1535                 }
1536         }
1537
1538         mutex_lock(&glob->device_list_mutex);
1539         list_del(&bdev->device_list);
1540         mutex_unlock(&glob->device_list_mutex);
1541
1542         if (!cancel_delayed_work(&bdev->wq))
1543                 flush_scheduled_work();
1544
1545         while (ttm_bo_delayed_delete(bdev, true))
1546                 ;
1547
1548         spin_lock(&glob->lru_lock);
1549         if (list_empty(&bdev->ddestroy))
1550                 TTM_DEBUG("Delayed destroy list was clean\n");
1551
1552         if (list_empty(&bdev->man[0].lru))
1553                 TTM_DEBUG("Swap list was clean\n");
1554         spin_unlock(&glob->lru_lock);
1555
1556         BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1557         write_lock(&bdev->vm_lock);
1558         drm_mm_takedown(&bdev->addr_space_mm);
1559         write_unlock(&bdev->vm_lock);
1560
1561         return ret;
1562 }
1563 EXPORT_SYMBOL(ttm_bo_device_release);
1564
1565 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1566                        struct ttm_bo_global *glob,
1567                        struct ttm_bo_driver *driver,
1568                        uint64_t file_page_offset,
1569                        bool need_dma32)
1570 {
1571         int ret = -EINVAL;
1572
1573         rwlock_init(&bdev->vm_lock);
1574         bdev->driver = driver;
1575
1576         memset(bdev->man, 0, sizeof(bdev->man));
1577
1578         /*
1579          * Initialize the system memory buffer type.
1580          * Other types need to be driver / IOCTL initialized.
1581          */
1582         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1583         if (unlikely(ret != 0))
1584                 goto out_no_sys;
1585
1586         bdev->addr_space_rb = RB_ROOT;
1587         ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1588         if (unlikely(ret != 0))
1589                 goto out_no_addr_mm;
1590
1591         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1592         bdev->nice_mode = true;
1593         INIT_LIST_HEAD(&bdev->ddestroy);
1594         bdev->dev_mapping = NULL;
1595         bdev->glob = glob;
1596         bdev->need_dma32 = need_dma32;
1597
1598         mutex_lock(&glob->device_list_mutex);
1599         list_add_tail(&bdev->device_list, &glob->device_list);
1600         mutex_unlock(&glob->device_list_mutex);
1601
1602         return 0;
1603 out_no_addr_mm:
1604         ttm_bo_clean_mm(bdev, 0);
1605 out_no_sys:
1606         return ret;
1607 }
1608 EXPORT_SYMBOL(ttm_bo_device_init);
1609
1610 /*
1611  * buffer object vm functions.
1612  */
1613
1614 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1615 {
1616         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1617
1618         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1619                 if (mem->mem_type == TTM_PL_SYSTEM)
1620                         return false;
1621
1622                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1623                         return false;
1624
1625                 if (mem->placement & TTM_PL_FLAG_CACHED)
1626                         return false;
1627         }
1628         return true;
1629 }
1630
1631 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1632 {
1633         struct ttm_bo_device *bdev = bo->bdev;
1634         loff_t offset = (loff_t) bo->addr_space_offset;
1635         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1636
1637         if (!bdev->dev_mapping)
1638                 return;
1639         unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1640         ttm_mem_io_free(bdev, &bo->mem);
1641 }
1642 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1643
1644 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1645 {
1646         struct ttm_bo_device *bdev = bo->bdev;
1647         struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1648         struct rb_node *parent = NULL;
1649         struct ttm_buffer_object *cur_bo;
1650         unsigned long offset = bo->vm_node->start;
1651         unsigned long cur_offset;
1652
1653         while (*cur) {
1654                 parent = *cur;
1655                 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1656                 cur_offset = cur_bo->vm_node->start;
1657                 if (offset < cur_offset)
1658                         cur = &parent->rb_left;
1659                 else if (offset > cur_offset)
1660                         cur = &parent->rb_right;
1661                 else
1662                         BUG();
1663         }
1664
1665         rb_link_node(&bo->vm_rb, parent, cur);
1666         rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1667 }
1668
1669 /**
1670  * ttm_bo_setup_vm:
1671  *
1672  * @bo: the buffer to allocate address space for
1673  *
1674  * Allocate address space in the drm device so that applications
1675  * can mmap the buffer and access the contents. This only
1676  * applies to ttm_bo_type_device objects as others are not
1677  * placed in the drm device address space.
1678  */
1679
1680 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1681 {
1682         struct ttm_bo_device *bdev = bo->bdev;
1683         int ret;
1684
1685 retry_pre_get:
1686         ret = drm_mm_pre_get(&bdev->addr_space_mm);
1687         if (unlikely(ret != 0))
1688                 return ret;
1689
1690         write_lock(&bdev->vm_lock);
1691         bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1692                                          bo->mem.num_pages, 0, 0);
1693
1694         if (unlikely(bo->vm_node == NULL)) {
1695                 ret = -ENOMEM;
1696                 goto out_unlock;
1697         }
1698
1699         bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1700                                               bo->mem.num_pages, 0);
1701
1702         if (unlikely(bo->vm_node == NULL)) {
1703                 write_unlock(&bdev->vm_lock);
1704                 goto retry_pre_get;
1705         }
1706
1707         ttm_bo_vm_insert_rb(bo);
1708         write_unlock(&bdev->vm_lock);
1709         bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1710
1711         return 0;
1712 out_unlock:
1713         write_unlock(&bdev->vm_lock);
1714         return ret;
1715 }
1716
1717 int ttm_bo_wait(struct ttm_buffer_object *bo,
1718                 bool lazy, bool interruptible, bool no_wait)
1719 {
1720         struct ttm_bo_driver *driver = bo->bdev->driver;
1721         void *sync_obj;
1722         void *sync_obj_arg;
1723         int ret = 0;
1724
1725         if (likely(bo->sync_obj == NULL))
1726                 return 0;
1727
1728         while (bo->sync_obj) {
1729
1730                 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1731                         void *tmp_obj = bo->sync_obj;
1732                         bo->sync_obj = NULL;
1733                         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1734                         spin_unlock(&bo->lock);
1735                         driver->sync_obj_unref(&tmp_obj);
1736                         spin_lock(&bo->lock);
1737                         continue;
1738                 }
1739
1740                 if (no_wait)
1741                         return -EBUSY;
1742
1743                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1744                 sync_obj_arg = bo->sync_obj_arg;
1745                 spin_unlock(&bo->lock);
1746                 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1747                                             lazy, interruptible);
1748                 if (unlikely(ret != 0)) {
1749                         driver->sync_obj_unref(&sync_obj);
1750                         spin_lock(&bo->lock);
1751                         return ret;
1752                 }
1753                 spin_lock(&bo->lock);
1754                 if (likely(bo->sync_obj == sync_obj &&
1755                            bo->sync_obj_arg == sync_obj_arg)) {
1756                         void *tmp_obj = bo->sync_obj;
1757                         bo->sync_obj = NULL;
1758                         clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1759                                   &bo->priv_flags);
1760                         spin_unlock(&bo->lock);
1761                         driver->sync_obj_unref(&sync_obj);
1762                         driver->sync_obj_unref(&tmp_obj);
1763                         spin_lock(&bo->lock);
1764                 } else {
1765                         spin_unlock(&bo->lock);
1766                         driver->sync_obj_unref(&sync_obj);
1767                         spin_lock(&bo->lock);
1768                 }
1769         }
1770         return 0;
1771 }
1772 EXPORT_SYMBOL(ttm_bo_wait);
1773
1774 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1775 {
1776         int ret = 0;
1777
1778         /*
1779          * Using ttm_bo_reserve makes sure the lru lists are updated.
1780          */
1781
1782         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1783         if (unlikely(ret != 0))
1784                 return ret;
1785         spin_lock(&bo->lock);
1786         ret = ttm_bo_wait(bo, false, true, no_wait);
1787         spin_unlock(&bo->lock);
1788         if (likely(ret == 0))
1789                 atomic_inc(&bo->cpu_writers);
1790         ttm_bo_unreserve(bo);
1791         return ret;
1792 }
1793 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1794
1795 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1796 {
1797         if (atomic_dec_and_test(&bo->cpu_writers))
1798                 wake_up_all(&bo->event_queue);
1799 }
1800 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1801
1802 /**
1803  * A buffer object shrink method that tries to swap out the first
1804  * buffer object on the bo_global::swap_lru list.
1805  */
1806
1807 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1808 {
1809         struct ttm_bo_global *glob =
1810             container_of(shrink, struct ttm_bo_global, shrink);
1811         struct ttm_buffer_object *bo;
1812         int ret = -EBUSY;
1813         int put_count;
1814         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1815
1816         spin_lock(&glob->lru_lock);
1817         while (ret == -EBUSY) {
1818                 if (unlikely(list_empty(&glob->swap_lru))) {
1819                         spin_unlock(&glob->lru_lock);
1820                         return -EBUSY;
1821                 }
1822
1823                 bo = list_first_entry(&glob->swap_lru,
1824                                       struct ttm_buffer_object, swap);
1825                 kref_get(&bo->list_kref);
1826
1827                 /**
1828                  * Reserve buffer. Since we unlock while sleeping, we need
1829                  * to re-check that nobody removed us from the swap-list while
1830                  * we slept.
1831                  */
1832
1833                 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1834                 if (unlikely(ret == -EBUSY)) {
1835                         spin_unlock(&glob->lru_lock);
1836                         ttm_bo_wait_unreserved(bo, false);
1837                         kref_put(&bo->list_kref, ttm_bo_release_list);
1838                         spin_lock(&glob->lru_lock);
1839                 }
1840         }
1841
1842         BUG_ON(ret != 0);
1843         put_count = ttm_bo_del_from_lru(bo);
1844         spin_unlock(&glob->lru_lock);
1845
1846         while (put_count--)
1847                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1848
1849         /**
1850          * Wait for GPU, then move to system cached.
1851          */
1852
1853         spin_lock(&bo->lock);
1854         ret = ttm_bo_wait(bo, false, false, false);
1855         spin_unlock(&bo->lock);
1856
1857         if (unlikely(ret != 0))
1858                 goto out;
1859
1860         if ((bo->mem.placement & swap_placement) != swap_placement) {
1861                 struct ttm_mem_reg evict_mem;
1862
1863                 evict_mem = bo->mem;
1864                 evict_mem.mm_node = NULL;
1865                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1866                 evict_mem.mem_type = TTM_PL_SYSTEM;
1867
1868                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1869                                              false, false, false);
1870                 if (unlikely(ret != 0))
1871                         goto out;
1872         }
1873
1874         ttm_bo_unmap_virtual(bo);
1875
1876         /**
1877          * Swap out. Buffer will be swapped in again as soon as
1878          * anyone tries to access a ttm page.
1879          */
1880
1881         if (bo->bdev->driver->swap_notify)
1882                 bo->bdev->driver->swap_notify(bo);
1883
1884         ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1885 out:
1886
1887         /**
1888          *
1889          * Unreserve without putting on LRU to avoid swapping out an
1890          * already swapped buffer.
1891          */
1892
1893         atomic_set(&bo->reserved, 0);
1894         wake_up_all(&bo->event_queue);
1895         kref_put(&bo->list_kref, ttm_bo_release_list);
1896         return ret;
1897 }
1898
1899 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1900 {
1901         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1902                 ;
1903 }
1904 EXPORT_SYMBOL(ttm_bo_swapout_all);