DSS2: VRAM: clear allocated area with DMA
[pandora-kernel.git] / arch / arm / plat-omap / vram.c
1 /*
2  * linux/arch/arm/plat-omap/vram.c
3  *
4  * Copyright (C) 2008 Nokia Corporation
5  * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6  *
7  * Some code and ideas taken from drivers/video/omap/ driver
8  * by Imre Deak.
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License version 2 as published by
12  * the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program.  If not, see <http://www.gnu.org/licenses/>.
21  */
22
23 /*#define DEBUG*/
24
25 #include <linux/vmalloc.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/list.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/proc_fs.h>
31 #include <linux/seq_file.h>
32 #include <linux/bootmem.h>
33 #include <linux/omapfb.h>
34 #include <linux/completion.h>
35
36 #include <asm/setup.h>
37
38 #include <mach/sram.h>
39 #include <mach/vram.h>
40 #include <mach/dma.h>
41
42 #ifdef DEBUG
43 #define DBG(format, ...) printk(KERN_DEBUG "VRAM: " format, ## __VA_ARGS__)
44 #else
45 #define DBG(format, ...)
46 #endif
47
48 #define OMAP2_SRAM_START                0x40200000
49 /* Maximum size, in reality this is smaller if SRAM is partially locked. */
50 #define OMAP2_SRAM_SIZE                 0xa0000         /* 640k */
51
52 #define REG_MAP_SIZE(_page_cnt) \
53         ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
54 #define REG_MAP_PTR(_rg, _page_nr) \
55         (((_rg)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
56 #define REG_MAP_MASK(_page_nr) \
57         (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
58
59 #if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
60
61 /* postponed regions are used to temporarily store region information at boot
62  * time when we cannot yet allocate the region list */
63 #define MAX_POSTPONED_REGIONS 10
64
65 static bool vram_initialized;
66 static int postponed_cnt __initdata;
67 static struct {
68         unsigned long paddr;
69         size_t size;
70 } postponed_regions[MAX_POSTPONED_REGIONS] __initdata;
71
72 struct vram_alloc {
73         struct list_head list;
74         unsigned long paddr;
75         unsigned pages;
76 };
77
78 struct vram_region {
79         struct list_head list;
80         struct list_head alloc_list;
81         unsigned long paddr;
82         unsigned pages;
83 };
84
85 static DEFINE_MUTEX(region_mutex);
86 static LIST_HEAD(region_list);
87
88 static inline int region_mem_type(unsigned long paddr)
89 {
90         if (paddr >= OMAP2_SRAM_START &&
91             paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
92                 return OMAPFB_MEMTYPE_SRAM;
93         else
94                 return OMAPFB_MEMTYPE_SDRAM;
95 }
96
97 static struct vram_region *omap_vram_create_region(unsigned long paddr,
98                 unsigned pages)
99 {
100         struct vram_region *rm;
101
102         rm = kzalloc(sizeof(*rm), GFP_KERNEL);
103
104         if (rm) {
105                 INIT_LIST_HEAD(&rm->alloc_list);
106                 rm->paddr = paddr;
107                 rm->pages = pages;
108         }
109
110         return rm;
111 }
112
113 #if 0
114 static void omap_vram_free_region(struct vram_region *vr)
115 {
116         list_del(&vr->list);
117         kfree(vr);
118 }
119 #endif
120
121 static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
122                 unsigned long paddr, unsigned pages)
123 {
124         struct vram_alloc *va;
125         struct vram_alloc *new;
126
127         new = kzalloc(sizeof(*va), GFP_KERNEL);
128
129         if (!new)
130                 return NULL;
131
132         new->paddr = paddr;
133         new->pages = pages;
134
135         list_for_each_entry(va, &vr->alloc_list, list) {
136                 if (va->paddr > new->paddr)
137                         break;
138         }
139
140         list_add_tail(&new->list, &va->list);
141
142         return new;
143 }
144
145 static void omap_vram_free_allocation(struct vram_alloc *va)
146 {
147         list_del(&va->list);
148         kfree(va);
149 }
150
151 int omap_vram_add_region(unsigned long paddr, size_t size)
152 {
153         struct vram_region *rm;
154         unsigned pages;
155
156         if (vram_initialized) {
157                 DBG("adding region paddr %08lx size %d\n",
158                                 paddr, size);
159
160                 size &= PAGE_MASK;
161                 pages = size >> PAGE_SHIFT;
162
163                 rm = omap_vram_create_region(paddr, pages);
164                 if (rm == NULL)
165                         return -ENOMEM;
166
167                 list_add(&rm->list, &region_list);
168         } else {
169                 if (postponed_cnt == MAX_POSTPONED_REGIONS)
170                         return -ENOMEM;
171
172                 postponed_regions[postponed_cnt].paddr = paddr;
173                 postponed_regions[postponed_cnt].size = size;
174
175                 ++postponed_cnt;
176         }
177         return 0;
178 }
179
180 int omap_vram_free(unsigned long paddr, size_t size)
181 {
182         struct vram_region *rm;
183         struct vram_alloc *alloc;
184         unsigned start, end;
185
186         DBG("free mem paddr %08lx size %d\n", paddr, size);
187
188         size = PAGE_ALIGN(size);
189
190         mutex_lock(&region_mutex);
191
192         list_for_each_entry(rm, &region_list, list) {
193                 list_for_each_entry(alloc, &rm->alloc_list, list) {
194                         start = alloc->paddr;
195                         end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
196
197                         if (start >= paddr && end < paddr + size)
198                                 goto found;
199                 }
200         }
201
202         mutex_unlock(&region_mutex);
203         return -EINVAL;
204
205 found:
206         omap_vram_free_allocation(alloc);
207
208         mutex_unlock(&region_mutex);
209         return 0;
210 }
211 EXPORT_SYMBOL(omap_vram_free);
212
213 static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
214 {
215         struct vram_region *rm;
216         struct vram_alloc *alloc;
217         size_t size;
218
219         size = pages << PAGE_SHIFT;
220
221         list_for_each_entry(rm, &region_list, list) {
222                 unsigned long start, end;
223
224                 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
225
226                 if (region_mem_type(rm->paddr) != region_mem_type(paddr))
227                         continue;
228
229                 start = rm->paddr;
230                 end = start + (rm->pages << PAGE_SHIFT) - 1;
231                 if (start > paddr || end < paddr + size - 1)
232                         continue;
233
234                 DBG("block ok, checking allocs\n");
235
236                 list_for_each_entry(alloc, &rm->alloc_list, list) {
237                         end = alloc->paddr - 1;
238
239                         if (start <= paddr && end >= paddr + size - 1)
240                                 goto found;
241
242                         start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
243                 }
244
245                 end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
246
247                 if (!(start <= paddr && end >= paddr + size - 1))
248                         continue;
249 found:
250                 DBG("FOUND area start %lx, end %lx\n", start, end);
251
252                 if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
253                         return -ENOMEM;
254
255                 return 0;
256         }
257
258         return -ENOMEM;
259 }
260
261 int omap_vram_reserve(unsigned long paddr, size_t size)
262 {
263         unsigned pages;
264         int r;
265
266         DBG("reserve mem paddr %08lx size %d\n", paddr, size);
267
268         size = PAGE_ALIGN(size);
269         pages = size >> PAGE_SHIFT;
270
271         mutex_lock(&region_mutex);
272
273         r = _omap_vram_reserve(paddr, pages);
274
275         mutex_unlock(&region_mutex);
276
277         return r;
278 }
279 EXPORT_SYMBOL(omap_vram_reserve);
280
281 static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data)
282 {
283         struct completion *compl = data;
284         complete(compl);
285 }
286
287 static int _omap_vram_clear(u32 paddr, unsigned pages)
288 {
289         struct completion compl;
290         unsigned elem_count;
291         unsigned frame_count;
292         int r;
293         int lch;
294
295         init_completion(&compl);
296
297         r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA",
298                         _omap_vram_dma_cb,
299                         &compl, &lch);
300         if (r) {
301                 pr_err("VRAM: request_dma failed for memory clear\n");
302                 return -EBUSY;
303         }
304
305         elem_count = pages * PAGE_SIZE / 4;
306         frame_count = 1;
307
308         omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32,
309                         elem_count, frame_count,
310                         OMAP_DMA_SYNC_ELEMENT,
311                         0, 0);
312
313         omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC,
314                         paddr, 0, 0);
315
316         omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000);
317
318         omap_start_dma(lch);
319
320         if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) {
321                 omap_stop_dma(lch);
322                 pr_err("VRAM: dma timeout while clearing memory\n");
323                 r = -EIO;
324                 goto err;
325         }
326
327         r = 0;
328 err:
329         omap_free_dma(lch);
330
331         return r;
332 }
333
334 static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr)
335 {
336         struct vram_region *rm;
337         struct vram_alloc *alloc;
338
339         list_for_each_entry(rm, &region_list, list) {
340                 unsigned long start, end;
341
342                 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
343
344                 if (region_mem_type(rm->paddr) != mtype)
345                         continue;
346
347                 start = rm->paddr;
348
349                 list_for_each_entry(alloc, &rm->alloc_list, list) {
350                         end = alloc->paddr;
351
352                         if (end - start >= pages << PAGE_SHIFT)
353                                 goto found;
354
355                         start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
356                 }
357
358                 end = rm->paddr + (rm->pages << PAGE_SHIFT);
359 found:
360                 if (end - start < pages << PAGE_SHIFT)
361                         continue;
362
363                 DBG("FOUND %lx, end %lx\n", start, end);
364
365                 alloc = omap_vram_create_allocation(rm, start, pages);
366                 if (alloc == NULL)
367                         return -ENOMEM;
368
369                 *paddr = start;
370
371                 _omap_vram_clear(start, pages);
372
373                 return 0;
374         }
375
376         return -ENOMEM;
377 }
378
379 int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr)
380 {
381         unsigned pages;
382         int r;
383
384         BUG_ON(mtype > OMAPFB_MEMTYPE_MAX || !size);
385
386         DBG("alloc mem type %d size %d\n", mtype, size);
387
388         size = PAGE_ALIGN(size);
389         pages = size >> PAGE_SHIFT;
390
391         mutex_lock(&region_mutex);
392
393         r = _omap_vram_alloc(mtype, pages, paddr);
394
395         mutex_unlock(&region_mutex);
396
397         return r;
398 }
399 EXPORT_SYMBOL(omap_vram_alloc);
400
401 #ifdef CONFIG_PROC_FS
402 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
403 {
404         struct list_head *l = v;
405
406         (*pos)++;
407
408         if (list_is_last(l, &region_list))
409                 return NULL;
410
411         return l->next;
412 }
413
414 static void *r_start(struct seq_file *m, loff_t *pos)
415 {
416         loff_t p = *pos;
417         struct list_head *l = &region_list;
418
419         mutex_lock(&region_mutex);
420
421         do {
422                 l = l->next;
423                 if (l == &region_list)
424                         return NULL;
425         } while (p--);
426
427         return l;
428 }
429
430 static void r_stop(struct seq_file *m, void *v)
431 {
432         mutex_unlock(&region_mutex);
433 }
434
435 static int r_show(struct seq_file *m, void *v)
436 {
437         struct vram_region *vr;
438         struct vram_alloc *va;
439         unsigned size;
440
441         vr = list_entry(v, struct vram_region, list);
442
443         size = vr->pages << PAGE_SHIFT;
444
445         seq_printf(m, "%08lx-%08lx (%d bytes)\n",
446                         vr->paddr, vr->paddr + size - 1,
447                         size);
448
449         list_for_each_entry(va, &vr->alloc_list, list) {
450                 size = va->pages << PAGE_SHIFT;
451                 seq_printf(m, "    %08lx-%08lx (%d bytes)\n",
452                                 va->paddr, va->paddr + size - 1,
453                                 size);
454         }
455
456
457
458         return 0;
459 }
460
461 static const struct seq_operations resource_op = {
462         .start  = r_start,
463         .next   = r_next,
464         .stop   = r_stop,
465         .show   = r_show,
466 };
467
468 static int vram_open(struct inode *inode, struct file *file)
469 {
470         return seq_open(file, &resource_op);
471 }
472
473 static const struct file_operations proc_vram_operations = {
474         .open           = vram_open,
475         .read           = seq_read,
476         .llseek         = seq_lseek,
477         .release        = seq_release,
478 };
479
480 static int __init omap_vram_create_proc(void)
481 {
482         proc_create("omap-vram", 0, NULL, &proc_vram_operations);
483
484         return 0;
485 }
486 #endif
487
488 static __init int omap_vram_init(void)
489 {
490         int i, r;
491
492         vram_initialized = 1;
493
494         for (i = 0; i < postponed_cnt; i++)
495                 omap_vram_add_region(postponed_regions[i].paddr,
496                                 postponed_regions[i].size);
497
498 #ifdef CONFIG_PROC_FS
499         r = omap_vram_create_proc();
500         if (r)
501                 return -ENOMEM;
502 #endif
503
504         return 0;
505 }
506
507 arch_initcall(omap_vram_init);
508
509 /* boottime vram alloc stuff */
510
511 /* set from board file */
512 static u32 omapfb_sram_vram_start __initdata;
513 static u32 omapfb_sram_vram_size __initdata;
514
515 /* set from board file */
516 static u32 omapfb_sdram_vram_start __initdata;
517 static u32 omapfb_sdram_vram_size __initdata;
518
519 /* set from kernel cmdline */
520 static u32 omapfb_def_sdram_vram_size __initdata;
521 static u32 omapfb_def_sdram_vram_start __initdata;
522
523 static void __init omapfb_early_vram(char **p)
524 {
525         omapfb_def_sdram_vram_size = memparse(*p, p);
526         if (**p == ',')
527                 omapfb_def_sdram_vram_start = simple_strtoul((*p) + 1, p, 16);
528 }
529 __early_param("vram=", omapfb_early_vram);
530
531 /*
532  * Called from map_io. We need to call to this early enough so that we
533  * can reserve the fixed SDRAM regions before VM could get hold of them.
534  */
535 void __init omapfb_reserve_sdram(void)
536 {
537         struct bootmem_data     *bdata;
538         unsigned long           sdram_start, sdram_size;
539         u32 paddr;
540         u32 size = 0;
541
542         /* cmdline arg overrides the board file definition */
543         if (omapfb_def_sdram_vram_size) {
544                 size = omapfb_def_sdram_vram_size;
545                 paddr = omapfb_def_sdram_vram_start;
546         }
547
548         if (!size) {
549                 size = omapfb_sdram_vram_size;
550                 paddr = omapfb_sdram_vram_start;
551         }
552
553 #ifdef CONFIG_OMAP2_DSS_VRAM_SIZE
554         if (!size) {
555                 size = CONFIG_OMAP2_DSS_VRAM_SIZE * 1024 * 1024;
556                 paddr = 0;
557         }
558 #endif
559
560         if (!size)
561                 return;
562
563         size = PAGE_ALIGN(size);
564
565         bdata = NODE_DATA(0)->bdata;
566         sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
567         sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
568
569         if (paddr) {
570                 if ((paddr & ~PAGE_MASK) || paddr < sdram_start ||
571                                 paddr + size > sdram_start + sdram_size) {
572                         printk(KERN_ERR "Illegal SDRAM region for VRAM\n");
573                         return;
574                 }
575
576                 if (reserve_bootmem(paddr, size, BOOTMEM_EXCLUSIVE) < 0) {
577                         pr_err("FB: failed to reserve VRAM\n");
578                         return;
579                 }
580         } else {
581                 if (size > sdram_size) {
582                         printk(KERN_ERR "Illegal SDRAM size for VRAM\n");
583                         return;
584                 }
585
586                 paddr = virt_to_phys(alloc_bootmem_pages(size));
587                 BUG_ON(paddr & ~PAGE_MASK);
588         }
589
590         omap_vram_add_region(paddr, size);
591
592         pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
593 }
594
595 /*
596  * Called at sram init time, before anything is pushed to the SRAM stack.
597  * Because of the stack scheme, we will allocate everything from the
598  * start of the lowest address region to the end of SRAM. This will also
599  * include padding for page alignment and possible holes between regions.
600  *
601  * As opposed to the SDRAM case, we'll also do any dynamic allocations at
602  * this point, since the driver built as a module would have problem with
603  * freeing / reallocating the regions.
604  */
605 unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
606                                   unsigned long sram_vstart,
607                                   unsigned long sram_size,
608                                   unsigned long pstart_avail,
609                                   unsigned long size_avail)
610 {
611         unsigned long                   pend_avail;
612         unsigned long                   reserved;
613         u32 paddr;
614         u32 size;
615
616         paddr = omapfb_sram_vram_start;
617         size = omapfb_sram_vram_size;
618
619         if (!size)
620                 return 0;
621
622         reserved = 0;
623         pend_avail = pstart_avail + size_avail;
624
625         if (!paddr) {
626                 /* Dynamic allocation */
627                 if ((size_avail & PAGE_MASK) < size) {
628                         printk(KERN_ERR "Not enough SRAM for VRAM\n");
629                         return 0;
630                 }
631                 size_avail = (size_avail - size) & PAGE_MASK;
632                 paddr = pstart_avail + size_avail;
633         }
634
635         if (paddr < sram_pstart ||
636                         paddr + size > sram_pstart + sram_size) {
637                 printk(KERN_ERR "Illegal SRAM region for VRAM\n");
638                 return 0;
639         }
640
641         /* Reserve everything above the start of the region. */
642         if (pend_avail - paddr > reserved)
643                 reserved = pend_avail - paddr;
644         size_avail = pend_avail - reserved - pstart_avail;
645
646         omap_vram_add_region(paddr, size);
647
648         if (reserved)
649                 pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved);
650
651         return reserved;
652 }
653
654 void __init omap2_set_sdram_vram(u32 size, u32 start)
655 {
656         omapfb_sdram_vram_start = start;
657         omapfb_sdram_vram_size = size;
658 }
659
660 void __init omap2_set_sram_vram(u32 size, u32 start)
661 {
662         omapfb_sram_vram_start = start;
663         omapfb_sram_vram_size = size;
664 }
665
666 #endif
667