2 * linux/arch/arm/plat-omap/vram.c
4 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
7 * Some code and ideas taken from drivers/video/omap/ driver
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
25 #include <linux/vmalloc.h>
26 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/seq_file.h>
31 #include <linux/bootmem.h>
32 #include <linux/omapfb.h>
33 #include <linux/completion.h>
34 #include <linux/debugfs.h>
36 #include <asm/setup.h>
38 #include <mach/sram.h>
39 #include <mach/vram.h>
43 #define DBG(format, ...) printk(KERN_DEBUG "VRAM: " format, ## __VA_ARGS__)
45 #define DBG(format, ...)
48 #define OMAP2_SRAM_START 0x40200000
49 /* Maximum size, in reality this is smaller if SRAM is partially locked. */
50 #define OMAP2_SRAM_SIZE 0xa0000 /* 640k */
52 #define REG_MAP_SIZE(_page_cnt) \
53 ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
54 #define REG_MAP_PTR(_rg, _page_nr) \
55 (((_rg)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
56 #define REG_MAP_MASK(_page_nr) \
57 (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
59 #if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
61 /* postponed regions are used to temporarily store region information at boot
62 * time when we cannot yet allocate the region list */
63 #define MAX_POSTPONED_REGIONS 10
65 static bool vram_initialized;
66 static int postponed_cnt __initdata;
70 } postponed_regions[MAX_POSTPONED_REGIONS] __initdata;
73 struct list_head list;
79 struct list_head list;
80 struct list_head alloc_list;
85 static DEFINE_MUTEX(region_mutex);
86 static LIST_HEAD(region_list);
88 static inline int region_mem_type(unsigned long paddr)
90 if (paddr >= OMAP2_SRAM_START &&
91 paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
92 return OMAPFB_MEMTYPE_SRAM;
94 return OMAPFB_MEMTYPE_SDRAM;
97 static struct vram_region *omap_vram_create_region(unsigned long paddr,
100 struct vram_region *rm;
102 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
105 INIT_LIST_HEAD(&rm->alloc_list);
114 static void omap_vram_free_region(struct vram_region *vr)
121 static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
122 unsigned long paddr, unsigned pages)
124 struct vram_alloc *va;
125 struct vram_alloc *new;
127 new = kzalloc(sizeof(*va), GFP_KERNEL);
135 list_for_each_entry(va, &vr->alloc_list, list) {
136 if (va->paddr > new->paddr)
140 list_add_tail(&new->list, &va->list);
145 static void omap_vram_free_allocation(struct vram_alloc *va)
151 int omap_vram_add_region(unsigned long paddr, size_t size)
153 struct vram_region *rm;
156 if (vram_initialized) {
157 DBG("adding region paddr %08lx size %d\n",
161 pages = size >> PAGE_SHIFT;
163 rm = omap_vram_create_region(paddr, pages);
167 list_add(&rm->list, ®ion_list);
169 if (postponed_cnt == MAX_POSTPONED_REGIONS)
172 postponed_regions[postponed_cnt].paddr = paddr;
173 postponed_regions[postponed_cnt].size = size;
180 int omap_vram_free(unsigned long paddr, size_t size)
182 struct vram_region *rm;
183 struct vram_alloc *alloc;
186 DBG("free mem paddr %08lx size %d\n", paddr, size);
188 size = PAGE_ALIGN(size);
190 mutex_lock(®ion_mutex);
192 list_for_each_entry(rm, ®ion_list, list) {
193 list_for_each_entry(alloc, &rm->alloc_list, list) {
194 start = alloc->paddr;
195 end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
197 if (start >= paddr && end < paddr + size)
202 mutex_unlock(®ion_mutex);
206 omap_vram_free_allocation(alloc);
208 mutex_unlock(®ion_mutex);
211 EXPORT_SYMBOL(omap_vram_free);
213 static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
215 struct vram_region *rm;
216 struct vram_alloc *alloc;
219 size = pages << PAGE_SHIFT;
221 list_for_each_entry(rm, ®ion_list, list) {
222 unsigned long start, end;
224 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
226 if (region_mem_type(rm->paddr) != region_mem_type(paddr))
230 end = start + (rm->pages << PAGE_SHIFT) - 1;
231 if (start > paddr || end < paddr + size - 1)
234 DBG("block ok, checking allocs\n");
236 list_for_each_entry(alloc, &rm->alloc_list, list) {
237 end = alloc->paddr - 1;
239 if (start <= paddr && end >= paddr + size - 1)
242 start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
245 end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
247 if (!(start <= paddr && end >= paddr + size - 1))
250 DBG("FOUND area start %lx, end %lx\n", start, end);
252 if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
261 int omap_vram_reserve(unsigned long paddr, size_t size)
266 DBG("reserve mem paddr %08lx size %d\n", paddr, size);
268 size = PAGE_ALIGN(size);
269 pages = size >> PAGE_SHIFT;
271 mutex_lock(®ion_mutex);
273 r = _omap_vram_reserve(paddr, pages);
275 mutex_unlock(®ion_mutex);
279 EXPORT_SYMBOL(omap_vram_reserve);
281 static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data)
283 struct completion *compl = data;
287 static int _omap_vram_clear(u32 paddr, unsigned pages)
289 struct completion compl;
291 unsigned frame_count;
295 init_completion(&compl);
297 r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA",
301 pr_err("VRAM: request_dma failed for memory clear\n");
305 elem_count = pages * PAGE_SIZE / 4;
308 omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32,
309 elem_count, frame_count,
310 OMAP_DMA_SYNC_ELEMENT,
313 omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC,
316 omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000);
320 if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) {
322 pr_err("VRAM: dma timeout while clearing memory\n");
334 static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr)
336 struct vram_region *rm;
337 struct vram_alloc *alloc;
339 list_for_each_entry(rm, ®ion_list, list) {
340 unsigned long start, end;
342 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
344 if (region_mem_type(rm->paddr) != mtype)
349 list_for_each_entry(alloc, &rm->alloc_list, list) {
352 if (end - start >= pages << PAGE_SHIFT)
355 start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
358 end = rm->paddr + (rm->pages << PAGE_SHIFT);
360 if (end - start < pages << PAGE_SHIFT)
363 DBG("FOUND %lx, end %lx\n", start, end);
365 alloc = omap_vram_create_allocation(rm, start, pages);
371 _omap_vram_clear(start, pages);
379 int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr)
384 BUG_ON(mtype > OMAPFB_MEMTYPE_MAX || !size);
386 DBG("alloc mem type %d size %d\n", mtype, size);
388 size = PAGE_ALIGN(size);
389 pages = size >> PAGE_SHIFT;
391 mutex_lock(®ion_mutex);
393 r = _omap_vram_alloc(mtype, pages, paddr);
395 mutex_unlock(®ion_mutex);
399 EXPORT_SYMBOL(omap_vram_alloc);
401 #if defined(CONFIG_DEBUG_FS)
402 static int vram_debug_show(struct seq_file *s, void *unused)
404 struct vram_region *vr;
405 struct vram_alloc *va;
408 mutex_lock(®ion_mutex);
410 list_for_each_entry(vr, ®ion_list, list) {
411 size = vr->pages << PAGE_SHIFT;
412 seq_printf(s, "%08lx-%08lx (%d bytes)\n",
413 vr->paddr, vr->paddr + size - 1,
416 list_for_each_entry(va, &vr->alloc_list, list) {
417 size = va->pages << PAGE_SHIFT;
418 seq_printf(s, " %08lx-%08lx (%d bytes)\n",
419 va->paddr, va->paddr + size - 1,
424 mutex_unlock(®ion_mutex);
429 static int vram_debug_open(struct inode *inode, struct file *file)
431 return single_open(file, vram_debug_show, inode->i_private);
434 static const struct file_operations vram_debug_fops = {
435 .open = vram_debug_open,
438 .release = single_release,
441 static int __init omap_vram_create_debugfs(void)
445 d = debugfs_create_file("vram", S_IRUGO, NULL,
446 NULL, &vram_debug_fops);
454 static __init int omap_vram_init(void)
458 vram_initialized = 1;
460 for (i = 0; i < postponed_cnt; i++)
461 omap_vram_add_region(postponed_regions[i].paddr,
462 postponed_regions[i].size);
464 #ifdef CONFIG_DEBUG_FS
465 if (omap_vram_create_debugfs())
466 pr_err("VRAM: Failed to create debugfs file\n");
472 arch_initcall(omap_vram_init);
474 /* boottime vram alloc stuff */
476 /* set from board file */
477 static u32 omapfb_sram_vram_start __initdata;
478 static u32 omapfb_sram_vram_size __initdata;
480 /* set from board file */
481 static u32 omapfb_sdram_vram_start __initdata;
482 static u32 omapfb_sdram_vram_size __initdata;
484 /* set from kernel cmdline */
485 static u32 omapfb_def_sdram_vram_size __initdata;
486 static u32 omapfb_def_sdram_vram_start __initdata;
488 static void __init omapfb_early_vram(char **p)
490 omapfb_def_sdram_vram_size = memparse(*p, p);
492 omapfb_def_sdram_vram_start = simple_strtoul((*p) + 1, p, 16);
494 __early_param("vram=", omapfb_early_vram);
497 * Called from map_io. We need to call to this early enough so that we
498 * can reserve the fixed SDRAM regions before VM could get hold of them.
500 void __init omapfb_reserve_sdram(void)
502 struct bootmem_data *bdata;
503 unsigned long sdram_start, sdram_size;
507 /* cmdline arg overrides the board file definition */
508 if (omapfb_def_sdram_vram_size) {
509 size = omapfb_def_sdram_vram_size;
510 paddr = omapfb_def_sdram_vram_start;
514 size = omapfb_sdram_vram_size;
515 paddr = omapfb_sdram_vram_start;
518 #ifdef CONFIG_OMAP2_DSS_VRAM_SIZE
520 size = CONFIG_OMAP2_DSS_VRAM_SIZE * 1024 * 1024;
528 size = PAGE_ALIGN(size);
530 bdata = NODE_DATA(0)->bdata;
531 sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
532 sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
535 if ((paddr & ~PAGE_MASK) || paddr < sdram_start ||
536 paddr + size > sdram_start + sdram_size) {
537 printk(KERN_ERR "Illegal SDRAM region for VRAM\n");
541 if (reserve_bootmem(paddr, size, BOOTMEM_EXCLUSIVE) < 0) {
542 pr_err("FB: failed to reserve VRAM\n");
546 if (size > sdram_size) {
547 printk(KERN_ERR "Illegal SDRAM size for VRAM\n");
551 paddr = virt_to_phys(alloc_bootmem_pages(size));
552 BUG_ON(paddr & ~PAGE_MASK);
555 omap_vram_add_region(paddr, size);
557 pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
561 * Called at sram init time, before anything is pushed to the SRAM stack.
562 * Because of the stack scheme, we will allocate everything from the
563 * start of the lowest address region to the end of SRAM. This will also
564 * include padding for page alignment and possible holes between regions.
566 * As opposed to the SDRAM case, we'll also do any dynamic allocations at
567 * this point, since the driver built as a module would have problem with
568 * freeing / reallocating the regions.
570 unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
571 unsigned long sram_vstart,
572 unsigned long sram_size,
573 unsigned long pstart_avail,
574 unsigned long size_avail)
576 unsigned long pend_avail;
577 unsigned long reserved;
581 paddr = omapfb_sram_vram_start;
582 size = omapfb_sram_vram_size;
588 pend_avail = pstart_avail + size_avail;
591 /* Dynamic allocation */
592 if ((size_avail & PAGE_MASK) < size) {
593 printk(KERN_ERR "Not enough SRAM for VRAM\n");
596 size_avail = (size_avail - size) & PAGE_MASK;
597 paddr = pstart_avail + size_avail;
600 if (paddr < sram_pstart ||
601 paddr + size > sram_pstart + sram_size) {
602 printk(KERN_ERR "Illegal SRAM region for VRAM\n");
606 /* Reserve everything above the start of the region. */
607 if (pend_avail - paddr > reserved)
608 reserved = pend_avail - paddr;
609 size_avail = pend_avail - reserved - pstart_avail;
611 omap_vram_add_region(paddr, size);
614 pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved);
619 void __init omap2_set_sdram_vram(u32 size, u32 start)
621 omapfb_sdram_vram_start = start;
622 omapfb_sdram_vram_size = size;
625 void __init omap2_set_sram_vram(u32 size, u32 start)
627 omapfb_sram_vram_start = start;
628 omapfb_sram_vram_size = size;