2 * linux/arch/arm/plat-omap/vram.c
4 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
7 * Some code and ideas taken from drivers/video/omap/ driver
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
25 #include <linux/vmalloc.h>
26 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/proc_fs.h>
31 #include <linux/seq_file.h>
32 #include <linux/bootmem.h>
33 #include <linux/omapfb.h>
35 #include <asm/setup.h>
37 #include <mach/sram.h>
38 #include <mach/vram.h>
41 #define DBG(format, ...) printk(KERN_DEBUG "VRAM: " format, ## __VA_ARGS__)
43 #define DBG(format, ...)
46 #define OMAP2_SRAM_START 0x40200000
47 /* Maximum size, in reality this is smaller if SRAM is partially locked. */
48 #define OMAP2_SRAM_SIZE 0xa0000 /* 640k */
50 #define REG_MAP_SIZE(_page_cnt) \
51 ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
52 #define REG_MAP_PTR(_rg, _page_nr) \
53 (((_rg)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
54 #define REG_MAP_MASK(_page_nr) \
55 (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
57 #if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
59 /* postponed regions are used to temporarily store region information at boot
60 * time when we cannot yet allocate the region list */
61 #define MAX_POSTPONED_REGIONS 10
63 static bool vram_initialized;
64 static int postponed_cnt __initdata;
68 } postponed_regions[MAX_POSTPONED_REGIONS] __initdata;
71 struct list_head list;
77 struct list_head list;
78 struct list_head alloc_list;
83 static DEFINE_MUTEX(region_mutex);
84 static LIST_HEAD(region_list);
86 static inline int region_mem_type(unsigned long paddr)
88 if (paddr >= OMAP2_SRAM_START &&
89 paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
90 return OMAPFB_MEMTYPE_SRAM;
92 return OMAPFB_MEMTYPE_SDRAM;
95 static struct vram_region *omap_vram_create_region(unsigned long paddr,
98 struct vram_region *rm;
100 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
103 INIT_LIST_HEAD(&rm->alloc_list);
112 static void omap_vram_free_region(struct vram_region *vr)
119 static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
120 unsigned long paddr, unsigned pages)
122 struct vram_alloc *va;
123 struct vram_alloc *new;
125 new = kzalloc(sizeof(*va), GFP_KERNEL);
133 list_for_each_entry(va, &vr->alloc_list, list) {
134 if (va->paddr > new->paddr)
138 list_add_tail(&new->list, &va->list);
143 static void omap_vram_free_allocation(struct vram_alloc *va)
149 int omap_vram_add_region(unsigned long paddr, size_t size)
151 struct vram_region *rm;
154 if (vram_initialized) {
155 DBG("adding region paddr %08lx size %d\n",
159 pages = size >> PAGE_SHIFT;
161 rm = omap_vram_create_region(paddr, pages);
165 list_add(&rm->list, ®ion_list);
167 if (postponed_cnt == MAX_POSTPONED_REGIONS)
170 postponed_regions[postponed_cnt].paddr = paddr;
171 postponed_regions[postponed_cnt].size = size;
178 int omap_vram_free(unsigned long paddr, size_t size)
180 struct vram_region *rm;
181 struct vram_alloc *alloc;
184 DBG("free mem paddr %08lx size %d\n", paddr, size);
186 size = PAGE_ALIGN(size);
188 mutex_lock(®ion_mutex);
190 list_for_each_entry(rm, ®ion_list, list) {
191 list_for_each_entry(alloc, &rm->alloc_list, list) {
192 start = alloc->paddr;
193 end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
195 if (start >= paddr && end < paddr + size)
200 mutex_unlock(®ion_mutex);
204 omap_vram_free_allocation(alloc);
206 mutex_unlock(®ion_mutex);
209 EXPORT_SYMBOL(omap_vram_free);
211 static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
213 struct vram_region *rm;
214 struct vram_alloc *alloc;
217 size = pages << PAGE_SHIFT;
219 list_for_each_entry(rm, ®ion_list, list) {
220 unsigned long start, end;
222 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
224 if (region_mem_type(rm->paddr) != region_mem_type(paddr))
228 end = start + (rm->pages << PAGE_SHIFT) - 1;
229 if (start > paddr || end < paddr + size - 1)
232 DBG("block ok, checking allocs\n");
234 list_for_each_entry(alloc, &rm->alloc_list, list) {
235 end = alloc->paddr - 1;
237 if (start <= paddr && end >= paddr + size - 1)
240 start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
243 end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
245 if (!(start <= paddr && end >= paddr + size - 1))
248 DBG("FOUND area start %lx, end %lx\n", start, end);
250 if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
259 int omap_vram_reserve(unsigned long paddr, size_t size)
264 DBG("reserve mem paddr %08lx size %d\n", paddr, size);
266 size = PAGE_ALIGN(size);
267 pages = size >> PAGE_SHIFT;
269 mutex_lock(®ion_mutex);
271 r = _omap_vram_reserve(paddr, pages);
273 mutex_unlock(®ion_mutex);
277 EXPORT_SYMBOL(omap_vram_reserve);
279 static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr)
281 struct vram_region *rm;
282 struct vram_alloc *alloc;
284 list_for_each_entry(rm, ®ion_list, list) {
285 unsigned long start, end;
287 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
289 if (region_mem_type(rm->paddr) != mtype)
294 list_for_each_entry(alloc, &rm->alloc_list, list) {
297 if (end - start >= pages << PAGE_SHIFT)
300 start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
303 end = rm->paddr + (rm->pages << PAGE_SHIFT);
305 if (end - start < pages << PAGE_SHIFT)
308 DBG("FOUND %lx, end %lx\n", start, end);
310 alloc = omap_vram_create_allocation(rm, start, pages);
322 int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr)
327 BUG_ON(mtype > OMAPFB_MEMTYPE_MAX || !size);
329 DBG("alloc mem type %d size %d\n", mtype, size);
331 size = PAGE_ALIGN(size);
332 pages = size >> PAGE_SHIFT;
334 mutex_lock(®ion_mutex);
336 r = _omap_vram_alloc(mtype, pages, paddr);
338 mutex_unlock(®ion_mutex);
342 EXPORT_SYMBOL(omap_vram_alloc);
344 #ifdef CONFIG_PROC_FS
345 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
347 struct list_head *l = v;
351 if (list_is_last(l, ®ion_list))
357 static void *r_start(struct seq_file *m, loff_t *pos)
360 struct list_head *l = ®ion_list;
362 mutex_lock(®ion_mutex);
366 if (l == ®ion_list)
373 static void r_stop(struct seq_file *m, void *v)
375 mutex_unlock(®ion_mutex);
378 static int r_show(struct seq_file *m, void *v)
380 struct vram_region *vr;
381 struct vram_alloc *va;
384 vr = list_entry(v, struct vram_region, list);
386 size = vr->pages << PAGE_SHIFT;
388 seq_printf(m, "%08lx-%08lx (%d bytes)\n",
389 vr->paddr, vr->paddr + size - 1,
392 list_for_each_entry(va, &vr->alloc_list, list) {
393 size = va->pages << PAGE_SHIFT;
394 seq_printf(m, " %08lx-%08lx (%d bytes)\n",
395 va->paddr, va->paddr + size - 1,
404 static const struct seq_operations resource_op = {
411 static int vram_open(struct inode *inode, struct file *file)
413 return seq_open(file, &resource_op);
416 static const struct file_operations proc_vram_operations = {
420 .release = seq_release,
423 static int __init omap_vram_create_proc(void)
425 proc_create("omap-vram", 0, NULL, &proc_vram_operations);
431 static __init int omap_vram_init(void)
435 vram_initialized = 1;
437 for (i = 0; i < postponed_cnt; i++)
438 omap_vram_add_region(postponed_regions[i].paddr,
439 postponed_regions[i].size);
441 #ifdef CONFIG_PROC_FS
442 r = omap_vram_create_proc();
450 arch_initcall(omap_vram_init);
452 /* boottime vram alloc stuff */
454 /* set from board file */
455 static u32 omapfb_sram_vram_start __initdata;
456 static u32 omapfb_sram_vram_size __initdata;
458 /* set from board file */
459 static u32 omapfb_sdram_vram_start __initdata;
460 static u32 omapfb_sdram_vram_size __initdata;
462 /* set from kernel cmdline */
463 static u32 omapfb_def_sdram_vram_size __initdata;
464 static u32 omapfb_def_sdram_vram_start __initdata;
466 static void __init omapfb_early_vram(char **p)
468 omapfb_def_sdram_vram_size = memparse(*p, p);
470 omapfb_def_sdram_vram_start = simple_strtoul((*p) + 1, p, 16);
472 __early_param("vram=", omapfb_early_vram);
475 * Called from map_io. We need to call to this early enough so that we
476 * can reserve the fixed SDRAM regions before VM could get hold of them.
478 void __init omapfb_reserve_sdram(void)
480 struct bootmem_data *bdata;
481 unsigned long sdram_start, sdram_size;
485 /* cmdline arg overrides the board file definition */
486 if (omapfb_def_sdram_vram_size) {
487 size = omapfb_def_sdram_vram_size;
488 paddr = omapfb_def_sdram_vram_start;
492 size = omapfb_sdram_vram_size;
493 paddr = omapfb_sdram_vram_start;
496 #ifdef CONFIG_OMAP2_DSS_VRAM_SIZE
498 size = CONFIG_OMAP2_DSS_VRAM_SIZE * 1024 * 1024;
506 size = PAGE_ALIGN(size);
508 bdata = NODE_DATA(0)->bdata;
509 sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
510 sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
513 if ((paddr & ~PAGE_MASK) || paddr < sdram_start ||
514 paddr + size > sdram_start + sdram_size) {
515 printk(KERN_ERR "Illegal SDRAM region for VRAM\n");
519 if (reserve_bootmem(paddr, size, BOOTMEM_EXCLUSIVE) < 0) {
520 pr_err("FB: failed to reserve VRAM\n");
524 if (size > sdram_size) {
525 printk(KERN_ERR "Illegal SDRAM size for VRAM\n");
529 paddr = virt_to_phys(alloc_bootmem_pages(size));
530 BUG_ON(paddr & ~PAGE_MASK);
533 omap_vram_add_region(paddr, size);
535 pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
539 * Called at sram init time, before anything is pushed to the SRAM stack.
540 * Because of the stack scheme, we will allocate everything from the
541 * start of the lowest address region to the end of SRAM. This will also
542 * include padding for page alignment and possible holes between regions.
544 * As opposed to the SDRAM case, we'll also do any dynamic allocations at
545 * this point, since the driver built as a module would have problem with
546 * freeing / reallocating the regions.
548 unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
549 unsigned long sram_vstart,
550 unsigned long sram_size,
551 unsigned long pstart_avail,
552 unsigned long size_avail)
554 unsigned long pend_avail;
555 unsigned long reserved;
559 paddr = omapfb_sram_vram_start;
560 size = omapfb_sram_vram_size;
566 pend_avail = pstart_avail + size_avail;
569 /* Dynamic allocation */
570 if ((size_avail & PAGE_MASK) < size) {
571 printk(KERN_ERR "Not enough SRAM for VRAM\n");
574 size_avail = (size_avail - size) & PAGE_MASK;
575 paddr = pstart_avail + size_avail;
578 if (paddr < sram_pstart ||
579 paddr + size > sram_pstart + sram_size) {
580 printk(KERN_ERR "Illegal SRAM region for VRAM\n");
584 /* Reserve everything above the start of the region. */
585 if (pend_avail - paddr > reserved)
586 reserved = pend_avail - paddr;
587 size_avail = pend_avail - reserved - pstart_avail;
589 omap_vram_add_region(paddr, size);
592 pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved);
597 void __init omap2_set_sdram_vram(u32 size, u32 start)
599 omapfb_sdram_vram_start = start;
600 omapfb_sdram_vram_size = size;
603 void __init omap2_set_sram_vram(u32 size, u32 start)
605 omapfb_sram_vram_start = start;
606 omapfb_sram_vram_size = size;