2 * linux/arch/arm/plat-omap/vram.c
4 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
7 * Some code and ideas taken from drivers/video/omap/ driver
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
25 #include <linux/vmalloc.h>
26 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/proc_fs.h>
31 #include <linux/seq_file.h>
32 #include <linux/bootmem.h>
33 #include <linux/omapfb.h>
35 #include <asm/setup.h>
37 #include <mach/sram.h>
38 #include <mach/vram.h>
41 #define DBG(format, ...) printk(KERN_DEBUG "VRAM: " format, ## __VA_ARGS__)
43 #define DBG(format, ...)
46 #define OMAP2_SRAM_START 0x40200000
47 /* Maximum size, in reality this is smaller if SRAM is partially locked. */
48 #define OMAP2_SRAM_SIZE 0xa0000 /* 640k */
50 #define REG_MAP_SIZE(_page_cnt) \
51 ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
52 #define REG_MAP_PTR(_rg, _page_nr) \
53 (((_rg)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
54 #define REG_MAP_MASK(_page_nr) \
55 (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
57 #if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
59 /* postponed regions are used to temporarily store region information at boot
60 * time when we cannot yet allocate the region list */
61 #define MAX_POSTPONED_REGIONS 10
63 static int postponed_cnt __initdata;
67 } postponed_regions[MAX_POSTPONED_REGIONS] __initdata;
70 struct list_head list;
76 struct list_head list;
77 struct list_head alloc_list;
82 static DEFINE_MUTEX(region_mutex);
83 static LIST_HEAD(region_list);
85 static inline int region_mem_type(unsigned long paddr)
87 if (paddr >= OMAP2_SRAM_START &&
88 paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
89 return OMAPFB_MEMTYPE_SRAM;
91 return OMAPFB_MEMTYPE_SDRAM;
94 static struct vram_region *omap_vram_create_region(unsigned long paddr,
97 struct vram_region *rm;
99 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
102 INIT_LIST_HEAD(&rm->alloc_list);
111 static void omap_vram_free_region(struct vram_region *vr)
118 static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
119 unsigned long paddr, unsigned pages)
121 struct vram_alloc *va;
122 struct vram_alloc *new;
124 new = kzalloc(sizeof(*va), GFP_KERNEL);
132 list_for_each_entry(va, &vr->alloc_list, list) {
133 if (va->paddr > new->paddr)
137 list_add_tail(&new->list, &va->list);
142 static void omap_vram_free_allocation(struct vram_alloc *va)
148 static __init int omap_vram_add_region_postponed(unsigned long paddr,
151 if (postponed_cnt == MAX_POSTPONED_REGIONS)
154 postponed_regions[postponed_cnt].paddr = paddr;
155 postponed_regions[postponed_cnt].size = size;
162 /* add/remove_region can be exported if there's need to add/remove regions
164 static int omap_vram_add_region(unsigned long paddr, size_t size)
166 struct vram_region *rm;
169 DBG("adding region paddr %08lx size %d\n",
173 pages = size >> PAGE_SHIFT;
175 rm = omap_vram_create_region(paddr, pages);
179 list_add(&rm->list, ®ion_list);
184 int omap_vram_free(unsigned long paddr, size_t size)
186 struct vram_region *rm;
187 struct vram_alloc *alloc;
190 DBG("free mem paddr %08lx size %d\n", paddr, size);
192 size = PAGE_ALIGN(size);
194 mutex_lock(®ion_mutex);
196 list_for_each_entry(rm, ®ion_list, list) {
197 list_for_each_entry(alloc, &rm->alloc_list, list) {
198 start = alloc->paddr;
199 end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
201 if (start >= paddr && end < paddr + size)
206 mutex_unlock(®ion_mutex);
210 omap_vram_free_allocation(alloc);
212 mutex_unlock(®ion_mutex);
215 EXPORT_SYMBOL(omap_vram_free);
217 static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
219 struct vram_region *rm;
220 struct vram_alloc *alloc;
223 size = pages << PAGE_SHIFT;
225 list_for_each_entry(rm, ®ion_list, list) {
226 unsigned long start, end;
228 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
230 if (region_mem_type(rm->paddr) != region_mem_type(paddr))
234 end = start + (rm->pages << PAGE_SHIFT) - 1;
235 if (start > paddr || end < paddr + size - 1)
238 DBG("block ok, checking allocs\n");
240 list_for_each_entry(alloc, &rm->alloc_list, list) {
241 end = alloc->paddr - 1;
243 if (start <= paddr && end >= paddr + size - 1)
246 start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
249 end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
251 if (!(start <= paddr && end >= paddr + size - 1))
254 DBG("FOUND area start %lx, end %lx\n", start, end);
256 if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
265 int omap_vram_reserve(unsigned long paddr, size_t size)
270 DBG("reserve mem paddr %08lx size %d\n", paddr, size);
272 size = PAGE_ALIGN(size);
273 pages = size >> PAGE_SHIFT;
275 mutex_lock(®ion_mutex);
277 r = _omap_vram_reserve(paddr, pages);
279 mutex_unlock(®ion_mutex);
283 EXPORT_SYMBOL(omap_vram_reserve);
285 static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr)
287 struct vram_region *rm;
288 struct vram_alloc *alloc;
290 list_for_each_entry(rm, ®ion_list, list) {
291 unsigned long start, end;
293 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
295 if (region_mem_type(rm->paddr) != mtype)
300 list_for_each_entry(alloc, &rm->alloc_list, list) {
303 if (end - start >= pages << PAGE_SHIFT)
306 start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
309 end = rm->paddr + (rm->pages << PAGE_SHIFT);
311 if (end - start < pages << PAGE_SHIFT)
314 DBG("FOUND %lx, end %lx\n", start, end);
316 alloc = omap_vram_create_allocation(rm, start, pages);
328 int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr)
333 BUG_ON(mtype > OMAPFB_MEMTYPE_MAX || !size);
335 DBG("alloc mem type %d size %d\n", mtype, size);
337 size = PAGE_ALIGN(size);
338 pages = size >> PAGE_SHIFT;
340 mutex_lock(®ion_mutex);
342 r = _omap_vram_alloc(mtype, pages, paddr);
344 mutex_unlock(®ion_mutex);
348 EXPORT_SYMBOL(omap_vram_alloc);
350 #ifdef CONFIG_PROC_FS
351 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
353 struct list_head *l = v;
357 if (list_is_last(l, ®ion_list))
363 static void *r_start(struct seq_file *m, loff_t *pos)
366 struct list_head *l = ®ion_list;
368 mutex_lock(®ion_mutex);
372 if (l == ®ion_list)
379 static void r_stop(struct seq_file *m, void *v)
381 mutex_unlock(®ion_mutex);
384 static int r_show(struct seq_file *m, void *v)
386 struct vram_region *vr;
387 struct vram_alloc *va;
390 vr = list_entry(v, struct vram_region, list);
392 size = vr->pages << PAGE_SHIFT;
394 seq_printf(m, "%08lx-%08lx (%d bytes)\n",
395 vr->paddr, vr->paddr + size - 1,
398 list_for_each_entry(va, &vr->alloc_list, list) {
399 size = va->pages << PAGE_SHIFT;
400 seq_printf(m, " %08lx-%08lx (%d bytes)\n",
401 va->paddr, va->paddr + size - 1,
410 static const struct seq_operations resource_op = {
417 static int vram_open(struct inode *inode, struct file *file)
419 return seq_open(file, &resource_op);
422 static const struct file_operations proc_vram_operations = {
426 .release = seq_release,
429 static int __init omap_vram_create_proc(void)
431 proc_create("omap-vram", 0, NULL, &proc_vram_operations);
437 static __init int omap_vram_init(void)
441 for (i = 0; i < postponed_cnt; i++)
442 omap_vram_add_region(postponed_regions[i].paddr,
443 postponed_regions[i].size);
445 #ifdef CONFIG_PROC_FS
446 r = omap_vram_create_proc();
454 arch_initcall(omap_vram_init);
456 /* boottime vram alloc stuff */
458 /* set from board file */
459 static u32 omapfb_sram_vram_start __initdata;
460 static u32 omapfb_sram_vram_size __initdata;
462 /* set from board file */
463 static u32 omapfb_sdram_vram_start __initdata;
464 static u32 omapfb_sdram_vram_size __initdata;
466 /* set from kernel cmdline */
467 static u32 omapfb_def_sdram_vram_size __initdata;
468 static u32 omapfb_def_sdram_vram_start __initdata;
470 static void __init omapfb_early_vram(char **p)
472 omapfb_def_sdram_vram_size = memparse(*p, p);
474 omapfb_def_sdram_vram_start = simple_strtoul((*p) + 1, p, 16);
476 printk("omapfb_early_vram, %d, 0x%x\n",
477 omapfb_def_sdram_vram_size,
478 omapfb_def_sdram_vram_start);
480 __early_param("vram=", omapfb_early_vram);
483 * Called from map_io. We need to call to this early enough so that we
484 * can reserve the fixed SDRAM regions before VM could get hold of them.
486 void __init omapfb_reserve_sdram(void)
488 struct bootmem_data *bdata;
489 unsigned long sdram_start, sdram_size;
493 /* cmdline arg overrides the board file definition */
494 if (omapfb_def_sdram_vram_size) {
495 size = omapfb_def_sdram_vram_size;
496 paddr = omapfb_def_sdram_vram_start;
500 size = omapfb_sdram_vram_size;
501 paddr = omapfb_sdram_vram_start;
504 #ifdef CONFIG_OMAP2_DSS_VRAM_SIZE
506 size = CONFIG_OMAP2_DSS_VRAM_SIZE * 1024 * 1024;
514 size = PAGE_ALIGN(size);
516 bdata = NODE_DATA(0)->bdata;
517 sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
518 sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
521 if ((paddr & ~PAGE_MASK) || paddr < sdram_start ||
522 paddr + size > sdram_start + sdram_size) {
523 printk(KERN_ERR "Illegal SDRAM region for VRAM\n");
527 reserve_bootmem(paddr, size, BOOTMEM_DEFAULT);
529 if (size > sdram_size) {
530 printk(KERN_ERR "Illegal SDRAM size for VRAM\n");
534 paddr = virt_to_phys(alloc_bootmem_pages(size));
535 BUG_ON(paddr & ~PAGE_MASK);
538 omap_vram_add_region_postponed(paddr, size);
540 pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
544 * Called at sram init time, before anything is pushed to the SRAM stack.
545 * Because of the stack scheme, we will allocate everything from the
546 * start of the lowest address region to the end of SRAM. This will also
547 * include padding for page alignment and possible holes between regions.
549 * As opposed to the SDRAM case, we'll also do any dynamic allocations at
550 * this point, since the driver built as a module would have problem with
551 * freeing / reallocating the regions.
553 unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
554 unsigned long sram_vstart,
555 unsigned long sram_size,
556 unsigned long pstart_avail,
557 unsigned long size_avail)
559 unsigned long pend_avail;
560 unsigned long reserved;
564 paddr = omapfb_sram_vram_start;
565 size = omapfb_sram_vram_size;
571 pend_avail = pstart_avail + size_avail;
574 /* Dynamic allocation */
575 if ((size_avail & PAGE_MASK) < size) {
576 printk(KERN_ERR "Not enough SRAM for VRAM\n");
579 size_avail = (size_avail - size) & PAGE_MASK;
580 paddr = pstart_avail + size_avail;
583 if (paddr < sram_pstart ||
584 paddr + size > sram_pstart + sram_size) {
585 printk(KERN_ERR "Illegal SRAM region for VRAM\n");
589 /* Reserve everything above the start of the region. */
590 if (pend_avail - paddr > reserved)
591 reserved = pend_avail - paddr;
592 size_avail = pend_avail - reserved - pstart_avail;
594 omap_vram_add_region_postponed(paddr, size);
597 pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved);
602 void __init omap2_set_sdram_vram(u32 size, u32 start)
604 omapfb_sdram_vram_start = start;
605 omapfb_sdram_vram_size = size;
608 void __init omap2_set_sram_vram(u32 size, u32 start)
610 omapfb_sram_vram_start = start;
611 omapfb_sram_vram_size = size;