Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[pandora-kernel.git] / drivers / video / fb_defio.c
1 /*
2  *  linux/drivers/video/fb_defio.c
3  *
4  *  Copyright (C) 2006 Jaya Kumar
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
20 #include <linux/fb.h>
21 #include <linux/list.h>
22 #include <asm/uaccess.h>
23
24 /* to support deferred IO */
25 #include <linux/rmap.h>
26 #include <linux/pagemap.h>
27
28 /* this is to find and return the vmalloc-ed fb pages */
29 static struct page* fb_deferred_io_nopage(struct vm_area_struct *vma,
30                                         unsigned long vaddr, int *type)
31 {
32         unsigned long offset;
33         struct page *page;
34         struct fb_info *info = vma->vm_private_data;
35         /* info->screen_base is in System RAM */
36         void *screen_base = (void __force *) info->screen_base;
37
38         offset = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
39         if (offset >= info->fix.smem_len)
40                 return NOPAGE_SIGBUS;
41
42         page = vmalloc_to_page(screen_base + offset);
43         if (!page)
44                 return NOPAGE_OOM;
45
46         get_page(page);
47         if (type)
48                 *type = VM_FAULT_MINOR;
49         return page;
50 }
51
52 int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
53 {
54         struct fb_info *info = file->private_data;
55
56         /* Kill off the delayed work */
57         cancel_rearming_delayed_work(&info->deferred_work);
58
59         /* Run it immediately */
60         return schedule_delayed_work(&info->deferred_work, 0);
61 }
62 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
63
64 /* vm_ops->page_mkwrite handler */
65 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
66                                   struct page *page)
67 {
68         struct fb_info *info = vma->vm_private_data;
69         struct fb_deferred_io *fbdefio = info->fbdefio;
70
71         /* this is a callback we get when userspace first tries to
72         write to the page. we schedule a workqueue. that workqueue
73         will eventually mkclean the touched pages and execute the
74         deferred framebuffer IO. then if userspace touches a page
75         again, we repeat the same scheme */
76
77         /* protect against the workqueue changing the page list */
78         mutex_lock(&fbdefio->lock);
79         list_add(&page->lru, &fbdefio->pagelist);
80         mutex_unlock(&fbdefio->lock);
81
82         /* come back after delay to process the deferred IO */
83         schedule_delayed_work(&info->deferred_work, fbdefio->delay);
84         return 0;
85 }
86
87 static struct vm_operations_struct fb_deferred_io_vm_ops = {
88         .nopage         = fb_deferred_io_nopage,
89         .page_mkwrite   = fb_deferred_io_mkwrite,
90 };
91
92 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
93 {
94         vma->vm_ops = &fb_deferred_io_vm_ops;
95         vma->vm_flags |= ( VM_IO | VM_RESERVED | VM_DONTEXPAND );
96         vma->vm_private_data = info;
97         return 0;
98 }
99
100 /* workqueue callback */
101 static void fb_deferred_io_work(struct work_struct *work)
102 {
103         struct fb_info *info = container_of(work, struct fb_info,
104                                                 deferred_work.work);
105         struct list_head *node, *next;
106         struct page *cur;
107         struct fb_deferred_io *fbdefio = info->fbdefio;
108
109         /* here we mkclean the pages, then do all deferred IO */
110         mutex_lock(&fbdefio->lock);
111         list_for_each_entry(cur, &fbdefio->pagelist, lru) {
112                 lock_page(cur);
113                 page_mkclean(cur);
114                 unlock_page(cur);
115         }
116
117         /* driver's callback with pagelist */
118         fbdefio->deferred_io(info, &fbdefio->pagelist);
119
120         /* clear the list */
121         list_for_each_safe(node, next, &fbdefio->pagelist) {
122                 list_del(node);
123         }
124         mutex_unlock(&fbdefio->lock);
125 }
126
127 void fb_deferred_io_init(struct fb_info *info)
128 {
129         struct fb_deferred_io *fbdefio = info->fbdefio;
130
131         BUG_ON(!fbdefio);
132         mutex_init(&fbdefio->lock);
133         info->fbops->fb_mmap = fb_deferred_io_mmap;
134         INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
135         INIT_LIST_HEAD(&fbdefio->pagelist);
136         if (fbdefio->delay == 0) /* set a default of 1 s */
137                 fbdefio->delay = HZ;
138 }
139 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
140
141 void fb_deferred_io_cleanup(struct fb_info *info)
142 {
143         struct fb_deferred_io *fbdefio = info->fbdefio;
144
145         BUG_ON(!fbdefio);
146         cancel_delayed_work(&info->deferred_work);
147         flush_scheduled_work();
148 }
149 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
150
151 MODULE_LICENSE("GPL");