DSS2: VRAM: use debugfs, not procfs
[pandora-kernel.git] / arch / arm / plat-omap / vram.c
1 /*
2  * linux/arch/arm/plat-omap/vram.c
3  *
4  * Copyright (C) 2008 Nokia Corporation
5  * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6  *
7  * Some code and ideas taken from drivers/video/omap/ driver
8  * by Imre Deak.
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License version 2 as published by
12  * the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program.  If not, see <http://www.gnu.org/licenses/>.
21  */
22
23 /*#define DEBUG*/
24
25 #include <linux/vmalloc.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/list.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/seq_file.h>
31 #include <linux/bootmem.h>
32 #include <linux/omapfb.h>
33 #include <linux/completion.h>
34 #include <linux/debugfs.h>
35
36 #include <asm/setup.h>
37
38 #include <mach/sram.h>
39 #include <mach/vram.h>
40 #include <mach/dma.h>
41
42 #ifdef DEBUG
43 #define DBG(format, ...) printk(KERN_DEBUG "VRAM: " format, ## __VA_ARGS__)
44 #else
45 #define DBG(format, ...)
46 #endif
47
48 #define OMAP2_SRAM_START                0x40200000
49 /* Maximum size, in reality this is smaller if SRAM is partially locked. */
50 #define OMAP2_SRAM_SIZE                 0xa0000         /* 640k */
51
52 #define REG_MAP_SIZE(_page_cnt) \
53         ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
54 #define REG_MAP_PTR(_rg, _page_nr) \
55         (((_rg)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
56 #define REG_MAP_MASK(_page_nr) \
57         (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
58
59 #if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
60
61 /* postponed regions are used to temporarily store region information at boot
62  * time when we cannot yet allocate the region list */
63 #define MAX_POSTPONED_REGIONS 10
64
65 static bool vram_initialized;
66 static int postponed_cnt __initdata;
67 static struct {
68         unsigned long paddr;
69         size_t size;
70 } postponed_regions[MAX_POSTPONED_REGIONS] __initdata;
71
72 struct vram_alloc {
73         struct list_head list;
74         unsigned long paddr;
75         unsigned pages;
76 };
77
78 struct vram_region {
79         struct list_head list;
80         struct list_head alloc_list;
81         unsigned long paddr;
82         unsigned pages;
83 };
84
85 static DEFINE_MUTEX(region_mutex);
86 static LIST_HEAD(region_list);
87
88 static inline int region_mem_type(unsigned long paddr)
89 {
90         if (paddr >= OMAP2_SRAM_START &&
91             paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
92                 return OMAPFB_MEMTYPE_SRAM;
93         else
94                 return OMAPFB_MEMTYPE_SDRAM;
95 }
96
97 static struct vram_region *omap_vram_create_region(unsigned long paddr,
98                 unsigned pages)
99 {
100         struct vram_region *rm;
101
102         rm = kzalloc(sizeof(*rm), GFP_KERNEL);
103
104         if (rm) {
105                 INIT_LIST_HEAD(&rm->alloc_list);
106                 rm->paddr = paddr;
107                 rm->pages = pages;
108         }
109
110         return rm;
111 }
112
113 #if 0
114 static void omap_vram_free_region(struct vram_region *vr)
115 {
116         list_del(&vr->list);
117         kfree(vr);
118 }
119 #endif
120
121 static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
122                 unsigned long paddr, unsigned pages)
123 {
124         struct vram_alloc *va;
125         struct vram_alloc *new;
126
127         new = kzalloc(sizeof(*va), GFP_KERNEL);
128
129         if (!new)
130                 return NULL;
131
132         new->paddr = paddr;
133         new->pages = pages;
134
135         list_for_each_entry(va, &vr->alloc_list, list) {
136                 if (va->paddr > new->paddr)
137                         break;
138         }
139
140         list_add_tail(&new->list, &va->list);
141
142         return new;
143 }
144
145 static void omap_vram_free_allocation(struct vram_alloc *va)
146 {
147         list_del(&va->list);
148         kfree(va);
149 }
150
151 int omap_vram_add_region(unsigned long paddr, size_t size)
152 {
153         struct vram_region *rm;
154         unsigned pages;
155
156         if (vram_initialized) {
157                 DBG("adding region paddr %08lx size %d\n",
158                                 paddr, size);
159
160                 size &= PAGE_MASK;
161                 pages = size >> PAGE_SHIFT;
162
163                 rm = omap_vram_create_region(paddr, pages);
164                 if (rm == NULL)
165                         return -ENOMEM;
166
167                 list_add(&rm->list, &region_list);
168         } else {
169                 if (postponed_cnt == MAX_POSTPONED_REGIONS)
170                         return -ENOMEM;
171
172                 postponed_regions[postponed_cnt].paddr = paddr;
173                 postponed_regions[postponed_cnt].size = size;
174
175                 ++postponed_cnt;
176         }
177         return 0;
178 }
179
180 int omap_vram_free(unsigned long paddr, size_t size)
181 {
182         struct vram_region *rm;
183         struct vram_alloc *alloc;
184         unsigned start, end;
185
186         DBG("free mem paddr %08lx size %d\n", paddr, size);
187
188         size = PAGE_ALIGN(size);
189
190         mutex_lock(&region_mutex);
191
192         list_for_each_entry(rm, &region_list, list) {
193                 list_for_each_entry(alloc, &rm->alloc_list, list) {
194                         start = alloc->paddr;
195                         end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
196
197                         if (start >= paddr && end < paddr + size)
198                                 goto found;
199                 }
200         }
201
202         mutex_unlock(&region_mutex);
203         return -EINVAL;
204
205 found:
206         omap_vram_free_allocation(alloc);
207
208         mutex_unlock(&region_mutex);
209         return 0;
210 }
211 EXPORT_SYMBOL(omap_vram_free);
212
213 static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
214 {
215         struct vram_region *rm;
216         struct vram_alloc *alloc;
217         size_t size;
218
219         size = pages << PAGE_SHIFT;
220
221         list_for_each_entry(rm, &region_list, list) {
222                 unsigned long start, end;
223
224                 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
225
226                 if (region_mem_type(rm->paddr) != region_mem_type(paddr))
227                         continue;
228
229                 start = rm->paddr;
230                 end = start + (rm->pages << PAGE_SHIFT) - 1;
231                 if (start > paddr || end < paddr + size - 1)
232                         continue;
233
234                 DBG("block ok, checking allocs\n");
235
236                 list_for_each_entry(alloc, &rm->alloc_list, list) {
237                         end = alloc->paddr - 1;
238
239                         if (start <= paddr && end >= paddr + size - 1)
240                                 goto found;
241
242                         start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
243                 }
244
245                 end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
246
247                 if (!(start <= paddr && end >= paddr + size - 1))
248                         continue;
249 found:
250                 DBG("FOUND area start %lx, end %lx\n", start, end);
251
252                 if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
253                         return -ENOMEM;
254
255                 return 0;
256         }
257
258         return -ENOMEM;
259 }
260
261 int omap_vram_reserve(unsigned long paddr, size_t size)
262 {
263         unsigned pages;
264         int r;
265
266         DBG("reserve mem paddr %08lx size %d\n", paddr, size);
267
268         size = PAGE_ALIGN(size);
269         pages = size >> PAGE_SHIFT;
270
271         mutex_lock(&region_mutex);
272
273         r = _omap_vram_reserve(paddr, pages);
274
275         mutex_unlock(&region_mutex);
276
277         return r;
278 }
279 EXPORT_SYMBOL(omap_vram_reserve);
280
281 static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data)
282 {
283         struct completion *compl = data;
284         complete(compl);
285 }
286
287 static int _omap_vram_clear(u32 paddr, unsigned pages)
288 {
289         struct completion compl;
290         unsigned elem_count;
291         unsigned frame_count;
292         int r;
293         int lch;
294
295         init_completion(&compl);
296
297         r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA",
298                         _omap_vram_dma_cb,
299                         &compl, &lch);
300         if (r) {
301                 pr_err("VRAM: request_dma failed for memory clear\n");
302                 return -EBUSY;
303         }
304
305         elem_count = pages * PAGE_SIZE / 4;
306         frame_count = 1;
307
308         omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32,
309                         elem_count, frame_count,
310                         OMAP_DMA_SYNC_ELEMENT,
311                         0, 0);
312
313         omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC,
314                         paddr, 0, 0);
315
316         omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000);
317
318         omap_start_dma(lch);
319
320         if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) {
321                 omap_stop_dma(lch);
322                 pr_err("VRAM: dma timeout while clearing memory\n");
323                 r = -EIO;
324                 goto err;
325         }
326
327         r = 0;
328 err:
329         omap_free_dma(lch);
330
331         return r;
332 }
333
334 static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr)
335 {
336         struct vram_region *rm;
337         struct vram_alloc *alloc;
338
339         list_for_each_entry(rm, &region_list, list) {
340                 unsigned long start, end;
341
342                 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
343
344                 if (region_mem_type(rm->paddr) != mtype)
345                         continue;
346
347                 start = rm->paddr;
348
349                 list_for_each_entry(alloc, &rm->alloc_list, list) {
350                         end = alloc->paddr;
351
352                         if (end - start >= pages << PAGE_SHIFT)
353                                 goto found;
354
355                         start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
356                 }
357
358                 end = rm->paddr + (rm->pages << PAGE_SHIFT);
359 found:
360                 if (end - start < pages << PAGE_SHIFT)
361                         continue;
362
363                 DBG("FOUND %lx, end %lx\n", start, end);
364
365                 alloc = omap_vram_create_allocation(rm, start, pages);
366                 if (alloc == NULL)
367                         return -ENOMEM;
368
369                 *paddr = start;
370
371                 _omap_vram_clear(start, pages);
372
373                 return 0;
374         }
375
376         return -ENOMEM;
377 }
378
379 int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr)
380 {
381         unsigned pages;
382         int r;
383
384         BUG_ON(mtype > OMAPFB_MEMTYPE_MAX || !size);
385
386         DBG("alloc mem type %d size %d\n", mtype, size);
387
388         size = PAGE_ALIGN(size);
389         pages = size >> PAGE_SHIFT;
390
391         mutex_lock(&region_mutex);
392
393         r = _omap_vram_alloc(mtype, pages, paddr);
394
395         mutex_unlock(&region_mutex);
396
397         return r;
398 }
399 EXPORT_SYMBOL(omap_vram_alloc);
400
401 #if defined(CONFIG_DEBUG_FS)
402 static int vram_debug_show(struct seq_file *s, void *unused)
403 {
404         struct vram_region *vr;
405         struct vram_alloc *va;
406         unsigned size;
407
408         mutex_lock(&region_mutex);
409
410         list_for_each_entry(vr, &region_list, list) {
411                 size = vr->pages << PAGE_SHIFT;
412                 seq_printf(s, "%08lx-%08lx (%d bytes)\n",
413                                 vr->paddr, vr->paddr + size - 1,
414                                 size);
415
416                 list_for_each_entry(va, &vr->alloc_list, list) {
417                         size = va->pages << PAGE_SHIFT;
418                         seq_printf(s, "    %08lx-%08lx (%d bytes)\n",
419                                         va->paddr, va->paddr + size - 1,
420                                         size);
421                 }
422         }
423
424         mutex_unlock(&region_mutex);
425
426         return 0;
427 }
428
429 static int vram_debug_open(struct inode *inode, struct file *file)
430 {
431         return single_open(file, vram_debug_show, inode->i_private);
432 }
433
434 static const struct file_operations vram_debug_fops = {
435         .open           = vram_debug_open,
436         .read           = seq_read,
437         .llseek         = seq_lseek,
438         .release        = single_release,
439 };
440
441 static int __init omap_vram_create_debugfs(void)
442 {
443         struct dentry *d;
444
445         d = debugfs_create_file("vram", S_IRUGO, NULL,
446                         NULL, &vram_debug_fops);
447         if (IS_ERR(d))
448                 return PTR_ERR(d);
449
450         return 0;
451 }
452 #endif
453
454 static __init int omap_vram_init(void)
455 {
456         int i;
457
458         vram_initialized = 1;
459
460         for (i = 0; i < postponed_cnt; i++)
461                 omap_vram_add_region(postponed_regions[i].paddr,
462                                 postponed_regions[i].size);
463
464 #ifdef CONFIG_DEBUG_FS
465         if (omap_vram_create_debugfs())
466                 pr_err("VRAM: Failed to create debugfs file\n");
467 #endif
468
469         return 0;
470 }
471
472 arch_initcall(omap_vram_init);
473
474 /* boottime vram alloc stuff */
475
476 /* set from board file */
477 static u32 omapfb_sram_vram_start __initdata;
478 static u32 omapfb_sram_vram_size __initdata;
479
480 /* set from board file */
481 static u32 omapfb_sdram_vram_start __initdata;
482 static u32 omapfb_sdram_vram_size __initdata;
483
484 /* set from kernel cmdline */
485 static u32 omapfb_def_sdram_vram_size __initdata;
486 static u32 omapfb_def_sdram_vram_start __initdata;
487
488 static void __init omapfb_early_vram(char **p)
489 {
490         omapfb_def_sdram_vram_size = memparse(*p, p);
491         if (**p == ',')
492                 omapfb_def_sdram_vram_start = simple_strtoul((*p) + 1, p, 16);
493 }
494 __early_param("vram=", omapfb_early_vram);
495
496 /*
497  * Called from map_io. We need to call to this early enough so that we
498  * can reserve the fixed SDRAM regions before VM could get hold of them.
499  */
500 void __init omapfb_reserve_sdram(void)
501 {
502         struct bootmem_data     *bdata;
503         unsigned long           sdram_start, sdram_size;
504         u32 paddr;
505         u32 size = 0;
506
507         /* cmdline arg overrides the board file definition */
508         if (omapfb_def_sdram_vram_size) {
509                 size = omapfb_def_sdram_vram_size;
510                 paddr = omapfb_def_sdram_vram_start;
511         }
512
513         if (!size) {
514                 size = omapfb_sdram_vram_size;
515                 paddr = omapfb_sdram_vram_start;
516         }
517
518 #ifdef CONFIG_OMAP2_DSS_VRAM_SIZE
519         if (!size) {
520                 size = CONFIG_OMAP2_DSS_VRAM_SIZE * 1024 * 1024;
521                 paddr = 0;
522         }
523 #endif
524
525         if (!size)
526                 return;
527
528         size = PAGE_ALIGN(size);
529
530         bdata = NODE_DATA(0)->bdata;
531         sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
532         sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
533
534         if (paddr) {
535                 if ((paddr & ~PAGE_MASK) || paddr < sdram_start ||
536                                 paddr + size > sdram_start + sdram_size) {
537                         printk(KERN_ERR "Illegal SDRAM region for VRAM\n");
538                         return;
539                 }
540
541                 if (reserve_bootmem(paddr, size, BOOTMEM_EXCLUSIVE) < 0) {
542                         pr_err("FB: failed to reserve VRAM\n");
543                         return;
544                 }
545         } else {
546                 if (size > sdram_size) {
547                         printk(KERN_ERR "Illegal SDRAM size for VRAM\n");
548                         return;
549                 }
550
551                 paddr = virt_to_phys(alloc_bootmem_pages(size));
552                 BUG_ON(paddr & ~PAGE_MASK);
553         }
554
555         omap_vram_add_region(paddr, size);
556
557         pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
558 }
559
560 /*
561  * Called at sram init time, before anything is pushed to the SRAM stack.
562  * Because of the stack scheme, we will allocate everything from the
563  * start of the lowest address region to the end of SRAM. This will also
564  * include padding for page alignment and possible holes between regions.
565  *
566  * As opposed to the SDRAM case, we'll also do any dynamic allocations at
567  * this point, since the driver built as a module would have problem with
568  * freeing / reallocating the regions.
569  */
570 unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
571                                   unsigned long sram_vstart,
572                                   unsigned long sram_size,
573                                   unsigned long pstart_avail,
574                                   unsigned long size_avail)
575 {
576         unsigned long                   pend_avail;
577         unsigned long                   reserved;
578         u32 paddr;
579         u32 size;
580
581         paddr = omapfb_sram_vram_start;
582         size = omapfb_sram_vram_size;
583
584         if (!size)
585                 return 0;
586
587         reserved = 0;
588         pend_avail = pstart_avail + size_avail;
589
590         if (!paddr) {
591                 /* Dynamic allocation */
592                 if ((size_avail & PAGE_MASK) < size) {
593                         printk(KERN_ERR "Not enough SRAM for VRAM\n");
594                         return 0;
595                 }
596                 size_avail = (size_avail - size) & PAGE_MASK;
597                 paddr = pstart_avail + size_avail;
598         }
599
600         if (paddr < sram_pstart ||
601                         paddr + size > sram_pstart + sram_size) {
602                 printk(KERN_ERR "Illegal SRAM region for VRAM\n");
603                 return 0;
604         }
605
606         /* Reserve everything above the start of the region. */
607         if (pend_avail - paddr > reserved)
608                 reserved = pend_avail - paddr;
609         size_avail = pend_avail - reserved - pstart_avail;
610
611         omap_vram_add_region(paddr, size);
612
613         if (reserved)
614                 pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved);
615
616         return reserved;
617 }
618
619 void __init omap2_set_sdram_vram(u32 size, u32 start)
620 {
621         omapfb_sdram_vram_start = start;
622         omapfb_sdram_vram_size = size;
623 }
624
625 void __init omap2_set_sram_vram(u32 size, u32 start)
626 {
627         omapfb_sram_vram_start = start;
628         omapfb_sram_vram_size = size;
629 }
630
631 #endif
632