Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[pandora-kernel.git] / drivers / gpu / drm / ttm / ttm_bo_vm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <ttm/ttm_module.h>
32 #include <ttm/ttm_bo_driver.h>
33 #include <ttm/ttm_placement.h>
34 #include <linux/mm.h>
35 #include <linux/rbtree.h>
36 #include <linux/module.h>
37 #include <linux/uaccess.h>
38
39 #define TTM_BO_VM_NUM_PREFAULT 16
40
41 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
42                                                      unsigned long page_start,
43                                                      unsigned long num_pages)
44 {
45         struct rb_node *cur = bdev->addr_space_rb.rb_node;
46         unsigned long cur_offset;
47         struct ttm_buffer_object *bo;
48         struct ttm_buffer_object *best_bo = NULL;
49
50         while (likely(cur != NULL)) {
51                 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
52                 cur_offset = bo->vm_node->start;
53                 if (page_start >= cur_offset) {
54                         cur = cur->rb_right;
55                         best_bo = bo;
56                         if (page_start == cur_offset)
57                                 break;
58                 } else
59                         cur = cur->rb_left;
60         }
61
62         if (unlikely(best_bo == NULL))
63                 return NULL;
64
65         if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
66                      (page_start + num_pages)))
67                 return NULL;
68
69         return best_bo;
70 }
71
72 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
73 {
74         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
75             vma->vm_private_data;
76         struct ttm_bo_device *bdev = bo->bdev;
77         unsigned long page_offset;
78         unsigned long page_last;
79         unsigned long pfn;
80         struct ttm_tt *ttm = NULL;
81         struct page *page;
82         int ret;
83         int i;
84         unsigned long address = (unsigned long)vmf->virtual_address;
85         int retval = VM_FAULT_NOPAGE;
86
87         /*
88          * Work around locking order reversal in fault / nopfn
89          * between mmap_sem and bo_reserve: Perform a trylock operation
90          * for reserve, and if it fails, retry the fault after scheduling.
91          */
92
93         ret = ttm_bo_reserve(bo, true, true, false, 0);
94         if (unlikely(ret != 0)) {
95                 if (ret == -EBUSY)
96                         set_need_resched();
97                 return VM_FAULT_NOPAGE;
98         }
99
100         if (bdev->driver->fault_reserve_notify) {
101                 ret = bdev->driver->fault_reserve_notify(bo);
102                 switch (ret) {
103                 case 0:
104                         break;
105                 case -EBUSY:
106                         set_need_resched();
107                 case -ERESTARTSYS:
108                         retval = VM_FAULT_NOPAGE;
109                         goto out_unlock;
110                 default:
111                         retval = VM_FAULT_SIGBUS;
112                         goto out_unlock;
113                 }
114         }
115
116         /*
117          * Wait for buffer data in transit, due to a pipelined
118          * move.
119          */
120
121         spin_lock(&bo->lock);
122         if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
123                 ret = ttm_bo_wait(bo, false, true, false);
124                 spin_unlock(&bo->lock);
125                 if (unlikely(ret != 0)) {
126                         retval = (ret != -ERESTARTSYS) ?
127                             VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
128                         goto out_unlock;
129                 }
130         } else
131                 spin_unlock(&bo->lock);
132
133
134         ret = ttm_mem_io_reserve(bdev, &bo->mem);
135         if (ret) {
136                 retval = VM_FAULT_SIGBUS;
137                 goto out_unlock;
138         }
139
140         page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
141             bo->vm_node->start - vma->vm_pgoff;
142         page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
143             bo->vm_node->start - vma->vm_pgoff;
144
145         if (unlikely(page_offset >= bo->num_pages)) {
146                 retval = VM_FAULT_SIGBUS;
147                 goto out_unlock;
148         }
149
150         /*
151          * Strictly, we're not allowed to modify vma->vm_page_prot here,
152          * since the mmap_sem is only held in read mode. However, we
153          * modify only the caching bits of vma->vm_page_prot and
154          * consider those bits protected by
155          * the bo->mutex, as we should be the only writers.
156          * There shouldn't really be any readers of these bits except
157          * within vm_insert_mixed()? fork?
158          *
159          * TODO: Add a list of vmas to the bo, and change the
160          * vma->vm_page_prot when the object changes caching policy, with
161          * the correct locks held.
162          */
163         if (bo->mem.bus.is_iomem) {
164                 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
165                                                 vma->vm_page_prot);
166         } else {
167                 ttm = bo->ttm;
168                 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
169                     vm_get_page_prot(vma->vm_flags) :
170                     ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
171         }
172
173         /*
174          * Speculatively prefault a number of pages. Only error on
175          * first page.
176          */
177
178         for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
179                 if (bo->mem.bus.is_iomem)
180                         pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
181                 else {
182                         page = ttm_tt_get_page(ttm, page_offset);
183                         if (unlikely(!page && i == 0)) {
184                                 retval = VM_FAULT_OOM;
185                                 goto out_unlock;
186                         } else if (unlikely(!page)) {
187                                 break;
188                         }
189                         pfn = page_to_pfn(page);
190                 }
191
192                 ret = vm_insert_mixed(vma, address, pfn);
193                 /*
194                  * Somebody beat us to this PTE or prefaulting to
195                  * an already populated PTE, or prefaulting error.
196                  */
197
198                 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
199                         break;
200                 else if (unlikely(ret != 0)) {
201                         retval =
202                             (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
203                         goto out_unlock;
204                 }
205
206                 address += PAGE_SIZE;
207                 if (unlikely(++page_offset >= page_last))
208                         break;
209         }
210
211 out_unlock:
212         ttm_bo_unreserve(bo);
213         return retval;
214 }
215
216 static void ttm_bo_vm_open(struct vm_area_struct *vma)
217 {
218         struct ttm_buffer_object *bo =
219             (struct ttm_buffer_object *)vma->vm_private_data;
220
221         (void)ttm_bo_reference(bo);
222 }
223
224 static void ttm_bo_vm_close(struct vm_area_struct *vma)
225 {
226         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
227
228         ttm_bo_unref(&bo);
229         vma->vm_private_data = NULL;
230 }
231
232 static const struct vm_operations_struct ttm_bo_vm_ops = {
233         .fault = ttm_bo_vm_fault,
234         .open = ttm_bo_vm_open,
235         .close = ttm_bo_vm_close
236 };
237
238 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
239                 struct ttm_bo_device *bdev)
240 {
241         struct ttm_bo_driver *driver;
242         struct ttm_buffer_object *bo;
243         int ret;
244
245         read_lock(&bdev->vm_lock);
246         bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
247                                  (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
248         if (likely(bo != NULL))
249                 ttm_bo_reference(bo);
250         read_unlock(&bdev->vm_lock);
251
252         if (unlikely(bo == NULL)) {
253                 printk(KERN_ERR TTM_PFX
254                        "Could not find buffer object to map.\n");
255                 return -EINVAL;
256         }
257
258         driver = bo->bdev->driver;
259         if (unlikely(!driver->verify_access)) {
260                 ret = -EPERM;
261                 goto out_unref;
262         }
263         ret = driver->verify_access(bo, filp);
264         if (unlikely(ret != 0))
265                 goto out_unref;
266
267         vma->vm_ops = &ttm_bo_vm_ops;
268
269         /*
270          * Note: We're transferring the bo reference to
271          * vma->vm_private_data here.
272          */
273
274         vma->vm_private_data = bo;
275         vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
276         return 0;
277 out_unref:
278         ttm_bo_unref(&bo);
279         return ret;
280 }
281 EXPORT_SYMBOL(ttm_bo_mmap);
282
283 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
284 {
285         if (vma->vm_pgoff != 0)
286                 return -EACCES;
287
288         vma->vm_ops = &ttm_bo_vm_ops;
289         vma->vm_private_data = ttm_bo_reference(bo);
290         vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
291         return 0;
292 }
293 EXPORT_SYMBOL(ttm_fbdev_mmap);
294
295
296 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
297                   const char __user *wbuf, char __user *rbuf, size_t count,
298                   loff_t *f_pos, bool write)
299 {
300         struct ttm_buffer_object *bo;
301         struct ttm_bo_driver *driver;
302         struct ttm_bo_kmap_obj map;
303         unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
304         unsigned long kmap_offset;
305         unsigned long kmap_end;
306         unsigned long kmap_num;
307         size_t io_size;
308         unsigned int page_offset;
309         char *virtual;
310         int ret;
311         bool no_wait = false;
312         bool dummy;
313
314         read_lock(&bdev->vm_lock);
315         bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
316         if (likely(bo != NULL))
317                 ttm_bo_reference(bo);
318         read_unlock(&bdev->vm_lock);
319
320         if (unlikely(bo == NULL))
321                 return -EFAULT;
322
323         driver = bo->bdev->driver;
324         if (unlikely(!driver->verify_access)) {
325                 ret = -EPERM;
326                 goto out_unref;
327         }
328
329         ret = driver->verify_access(bo, filp);
330         if (unlikely(ret != 0))
331                 goto out_unref;
332
333         kmap_offset = dev_offset - bo->vm_node->start;
334         if (unlikely(kmap_offset >= bo->num_pages)) {
335                 ret = -EFBIG;
336                 goto out_unref;
337         }
338
339         page_offset = *f_pos & ~PAGE_MASK;
340         io_size = bo->num_pages - kmap_offset;
341         io_size = (io_size << PAGE_SHIFT) - page_offset;
342         if (count < io_size)
343                 io_size = count;
344
345         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
346         kmap_num = kmap_end - kmap_offset + 1;
347
348         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
349
350         switch (ret) {
351         case 0:
352                 break;
353         case -EBUSY:
354                 ret = -EAGAIN;
355                 goto out_unref;
356         default:
357                 goto out_unref;
358         }
359
360         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
361         if (unlikely(ret != 0)) {
362                 ttm_bo_unreserve(bo);
363                 goto out_unref;
364         }
365
366         virtual = ttm_kmap_obj_virtual(&map, &dummy);
367         virtual += page_offset;
368
369         if (write)
370                 ret = copy_from_user(virtual, wbuf, io_size);
371         else
372                 ret = copy_to_user(rbuf, virtual, io_size);
373
374         ttm_bo_kunmap(&map);
375         ttm_bo_unreserve(bo);
376         ttm_bo_unref(&bo);
377
378         if (unlikely(ret != 0))
379                 return -EFBIG;
380
381         *f_pos += io_size;
382
383         return io_size;
384 out_unref:
385         ttm_bo_unref(&bo);
386         return ret;
387 }
388
389 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
390                         char __user *rbuf, size_t count, loff_t *f_pos,
391                         bool write)
392 {
393         struct ttm_bo_kmap_obj map;
394         unsigned long kmap_offset;
395         unsigned long kmap_end;
396         unsigned long kmap_num;
397         size_t io_size;
398         unsigned int page_offset;
399         char *virtual;
400         int ret;
401         bool no_wait = false;
402         bool dummy;
403
404         kmap_offset = (*f_pos >> PAGE_SHIFT);
405         if (unlikely(kmap_offset >= bo->num_pages))
406                 return -EFBIG;
407
408         page_offset = *f_pos & ~PAGE_MASK;
409         io_size = bo->num_pages - kmap_offset;
410         io_size = (io_size << PAGE_SHIFT) - page_offset;
411         if (count < io_size)
412                 io_size = count;
413
414         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
415         kmap_num = kmap_end - kmap_offset + 1;
416
417         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
418
419         switch (ret) {
420         case 0:
421                 break;
422         case -EBUSY:
423                 return -EAGAIN;
424         default:
425                 return ret;
426         }
427
428         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
429         if (unlikely(ret != 0)) {
430                 ttm_bo_unreserve(bo);
431                 return ret;
432         }
433
434         virtual = ttm_kmap_obj_virtual(&map, &dummy);
435         virtual += page_offset;
436
437         if (write)
438                 ret = copy_from_user(virtual, wbuf, io_size);
439         else
440                 ret = copy_to_user(rbuf, virtual, io_size);
441
442         ttm_bo_kunmap(&map);
443         ttm_bo_unreserve(bo);
444         ttm_bo_unref(&bo);
445
446         if (unlikely(ret != 0))
447                 return ret;
448
449         *f_pos += io_size;
450
451         return io_size;
452 }