-From 6bec28d7c3d7cf97d644c610beadfef354fa596e Mon Sep 17 00:00:00 2001
+From 0eceac2ba3548ae41200403a8dae9907ab788fd0 Mon Sep 17 00:00:00 2001
From: Tomi Valkeinen <tomi.valkeinen@nokia.com>
-Date: Thu, 13 Nov 2008 15:38:15 +0200
+Date: Mon, 8 Dec 2008 13:43:36 +0200
Subject: [PATCH] DSS: New display subsystem driver for OMAP2/3
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@nokia.com>
arch/arm/plat-omap/Makefile | 2 +
arch/arm/plat-omap/dss/Kconfig | 66 +
arch/arm/plat-omap/dss/Makefile | 6 +
- arch/arm/plat-omap/dss/dispc.c | 1722 ++++++++++++++++
- arch/arm/plat-omap/dss/display.c | 775 ++++++++
- arch/arm/plat-omap/dss/dpi.c | 323 +++
- arch/arm/plat-omap/dss/dsi.c | 3020 +++++++++++++++++++++++++++++
- arch/arm/plat-omap/dss/dss.c | 554 ++++++
- arch/arm/plat-omap/dss/dss.h | 254 +++
- arch/arm/plat-omap/dss/rfbi.c | 1234 ++++++++++++
- arch/arm/plat-omap/dss/sdi.c | 157 ++
- arch/arm/plat-omap/dss/venc.c | 515 +++++
- arch/arm/plat-omap/include/mach/display.h | 458 +++++
- 14 files changed, 9088 insertions(+), 0 deletions(-)
+ arch/arm/plat-omap/dss/dispc.c | 2021 +++++++++++++++++++
+ arch/arm/plat-omap/dss/display.c | 765 +++++++
+ arch/arm/plat-omap/dss/dpi.c | 320 +++
+ arch/arm/plat-omap/dss/dsi.c | 3135 +++++++++++++++++++++++++++++
+ arch/arm/plat-omap/dss/dss.c | 784 +++++++
+ arch/arm/plat-omap/dss/dss.h | 268 +++
+ arch/arm/plat-omap/dss/rfbi.c | 1225 +++++++++++
+ arch/arm/plat-omap/dss/sdi.c | 150 ++
+ arch/arm/plat-omap/dss/venc.c | 501 +++++
+ arch/arm/plat-omap/include/mach/display.h | 463 +++++
+ 14 files changed, 9708 insertions(+), 0 deletions(-)
create mode 100644 arch/arm/plat-omap/dss/Kconfig
create mode 100644 arch/arm/plat-omap/dss/Makefile
create mode 100644 arch/arm/plat-omap/dss/dispc.c
+omap-dss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
diff --git a/arch/arm/plat-omap/dss/dispc.c b/arch/arm/plat-omap/dss/dispc.c
new file mode 100644
-index 0000000..3738cf3
+index 0000000..33fbd0a
--- /dev/null
+++ b/arch/arm/plat-omap/dss/dispc.c
-@@ -0,0 +1,1722 @@
+@@ -0,0 +1,2021 @@
+/*
+ * linux/arch/arm/plat-omap/dss/dispc.c
+ *
+/* DISPC */
+#define DISPC_BASE 0x48050400
+
++#define DISPC_SZ_REGS SZ_1K
++
+struct dispc_reg { u16 idx; };
+
+#define DISPC_REG(idx) ((const struct dispc_reg) { idx })
+#define DISPC_TIMING_V DISPC_REG(0x0068)
+#define DISPC_POL_FREQ DISPC_REG(0x006C)
+#define DISPC_DIVISOR DISPC_REG(0x0070)
++#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074)
+#define DISPC_SIZE_DIG DISPC_REG(0x0078)
+#define DISPC_SIZE_LCD DISPC_REG(0x007C)
+
-+#define DISPC_DATA_CYCLE1 DISPC_REG(0x01D4)
-+#define DISPC_DATA_CYCLE2 DISPC_REG(0x01D8)
-+#define DISPC_DATA_CYCLE3 DISPC_REG(0x01DC)
-+
+/* DISPC GFX plane */
+#define DISPC_GFX_BA0 DISPC_REG(0x0080)
+#define DISPC_GFX_BA1 DISPC_REG(0x0084)
+#define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4)
+#define DISPC_GFX_TABLE_BA DISPC_REG(0x00B8)
+
++#define DISPC_DATA_CYCLE1 DISPC_REG(0x01D4)
++#define DISPC_DATA_CYCLE2 DISPC_REG(0x01D8)
++#define DISPC_DATA_CYCLE3 DISPC_REG(0x01DC)
++
++#define DISPC_CPR_COEF_R DISPC_REG(0x0220)
++#define DISPC_CPR_COEF_G DISPC_REG(0x0224)
++#define DISPC_CPR_COEF_B DISPC_REG(0x0228)
++
++#define DISPC_GFX_PRELOAD DISPC_REG(0x022C)
++
+/* DISPC Video plane, n = 0 for VID1 and n = 1 for VID2 */
+#define DISPC_VID_REG(n, idx) DISPC_REG(0x00BC + (n)*0x90 + idx)
+
+#define DISPC_VID_FIR_COEF_HV(n, i) DISPC_REG(0x00F4 + (n)*0x90 + (i)*0x8)
+/* coef index i = {0, 1, 2, 3, 4} */
+#define DISPC_VID_CONV_COEF(n, i) DISPC_REG(0x0130 + (n)*0x90 + (i)*0x4)
++/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
++#define DISPC_VID_FIR_COEF_V(n, i) DISPC_REG(0x01E0 + (n)*0x20 + (i)*0x4)
++
++#define DISPC_VID_PRELOAD(n) DISPC_REG(0x230 + (n)*0x04)
++
+
+#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
+ DISPC_IRQ_OCP_ERR | \
+static struct {
+ void __iomem *base;
+
-+ struct clk *dss_ick;
-+ struct clk *dss1_fck;
-+ struct clk *dss_54m_fck;
+ struct clk *dpll4_m4_ck;
-+} dispc;
+
-+static spinlock_t dss_lock;
++ spinlock_t irq_lock;
+
-+static inline void enable_clocks(int enable)
-+{
-+ if (enable) {
-+ clk_enable(dispc.dss_ick);
-+ clk_enable(dispc.dss1_fck);
-+ } else {
-+ clk_disable(dispc.dss1_fck);
-+ clk_disable(dispc.dss_ick);
-+ }
-+}
++ u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
++} dispc;
+
+static inline void dispc_write_reg(const struct dispc_reg idx, u32 val)
+{
+ return __raw_readl(dispc.base + idx.idx);
+}
+
++#define SR(reg) \
++ dispc.ctx[(DISPC_##reg).idx / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
++#define RR(reg) \
++ dispc_write_reg(DISPC_##reg, dispc.ctx[(DISPC_##reg).idx / sizeof(u32)])
++
++void dispc_save_context(void)
++{
++ SR(SYSCONFIG);
++ SR(IRQENABLE);
++ SR(CONTROL);
++ SR(CONFIG);
++ SR(DEFAULT_COLOR0);
++ SR(DEFAULT_COLOR1);
++ SR(TRANS_COLOR0);
++ SR(TRANS_COLOR1);
++ SR(LINE_NUMBER);
++ SR(TIMING_H);
++ SR(TIMING_V);
++ SR(POL_FREQ);
++ SR(DIVISOR);
++ SR(GLOBAL_ALPHA);
++ SR(SIZE_DIG);
++ SR(SIZE_LCD);
++
++ SR(GFX_BA0);
++ SR(GFX_BA1);
++ SR(GFX_POSITION);
++ SR(GFX_SIZE);
++ SR(GFX_ATTRIBUTES);
++ SR(GFX_FIFO_THRESHOLD);
++ SR(GFX_ROW_INC);
++ SR(GFX_PIXEL_INC);
++ SR(GFX_WINDOW_SKIP);
++ SR(GFX_TABLE_BA);
++
++ SR(DATA_CYCLE1);
++ SR(DATA_CYCLE2);
++ SR(DATA_CYCLE3);
++
++ SR(CPR_COEF_R);
++ SR(CPR_COEF_G);
++ SR(CPR_COEF_B);
++
++ SR(GFX_PRELOAD);
++
++ /* VID1 */
++ SR(VID_BA0(0));
++ SR(VID_BA1(0));
++ SR(VID_POSITION(0));
++ SR(VID_SIZE(0));
++ SR(VID_ATTRIBUTES(0));
++ SR(VID_FIFO_THRESHOLD(0));
++ SR(VID_ROW_INC(0));
++ SR(VID_PIXEL_INC(0));
++ SR(VID_FIR(0));
++ SR(VID_PICTURE_SIZE(0));
++ SR(VID_ACCU0(0));
++ SR(VID_ACCU1(0));
++
++ SR(VID_FIR_COEF_H(0, 0));
++ SR(VID_FIR_COEF_H(0, 1));
++ SR(VID_FIR_COEF_H(0, 2));
++ SR(VID_FIR_COEF_H(0, 3));
++ SR(VID_FIR_COEF_H(0, 4));
++ SR(VID_FIR_COEF_H(0, 5));
++ SR(VID_FIR_COEF_H(0, 6));
++ SR(VID_FIR_COEF_H(0, 7));
++
++ SR(VID_FIR_COEF_HV(0, 0));
++ SR(VID_FIR_COEF_HV(0, 1));
++ SR(VID_FIR_COEF_HV(0, 2));
++ SR(VID_FIR_COEF_HV(0, 3));
++ SR(VID_FIR_COEF_HV(0, 4));
++ SR(VID_FIR_COEF_HV(0, 5));
++ SR(VID_FIR_COEF_HV(0, 6));
++ SR(VID_FIR_COEF_HV(0, 7));
++
++ SR(VID_CONV_COEF(0, 0));
++ SR(VID_CONV_COEF(0, 1));
++ SR(VID_CONV_COEF(0, 2));
++ SR(VID_CONV_COEF(0, 3));
++ SR(VID_CONV_COEF(0, 4));
++
++ SR(VID_FIR_COEF_V(0, 0));
++ SR(VID_FIR_COEF_V(0, 1));
++ SR(VID_FIR_COEF_V(0, 2));
++ SR(VID_FIR_COEF_V(0, 3));
++ SR(VID_FIR_COEF_V(0, 4));
++ SR(VID_FIR_COEF_V(0, 5));
++ SR(VID_FIR_COEF_V(0, 6));
++ SR(VID_FIR_COEF_V(0, 7));
++
++ SR(VID_PRELOAD(0));
++
++ /* VID2 */
++ SR(VID_BA0(1));
++ SR(VID_BA1(1));
++ SR(VID_POSITION(1));
++ SR(VID_SIZE(1));
++ SR(VID_ATTRIBUTES(1));
++ SR(VID_FIFO_THRESHOLD(1));
++ SR(VID_ROW_INC(1));
++ SR(VID_PIXEL_INC(1));
++ SR(VID_FIR(1));
++ SR(VID_PICTURE_SIZE(1));
++ SR(VID_ACCU0(1));
++ SR(VID_ACCU1(1));
++
++ SR(VID_FIR_COEF_H(1, 0));
++ SR(VID_FIR_COEF_H(1, 1));
++ SR(VID_FIR_COEF_H(1, 2));
++ SR(VID_FIR_COEF_H(1, 3));
++ SR(VID_FIR_COEF_H(1, 4));
++ SR(VID_FIR_COEF_H(1, 5));
++ SR(VID_FIR_COEF_H(1, 6));
++ SR(VID_FIR_COEF_H(1, 7));
++
++ SR(VID_FIR_COEF_HV(1, 0));
++ SR(VID_FIR_COEF_HV(1, 1));
++ SR(VID_FIR_COEF_HV(1, 2));
++ SR(VID_FIR_COEF_HV(1, 3));
++ SR(VID_FIR_COEF_HV(1, 4));
++ SR(VID_FIR_COEF_HV(1, 5));
++ SR(VID_FIR_COEF_HV(1, 6));
++ SR(VID_FIR_COEF_HV(1, 7));
++
++ SR(VID_CONV_COEF(1, 0));
++ SR(VID_CONV_COEF(1, 1));
++ SR(VID_CONV_COEF(1, 2));
++ SR(VID_CONV_COEF(1, 3));
++ SR(VID_CONV_COEF(1, 4));
++
++ SR(VID_FIR_COEF_V(1, 0));
++ SR(VID_FIR_COEF_V(1, 1));
++ SR(VID_FIR_COEF_V(1, 2));
++ SR(VID_FIR_COEF_V(1, 3));
++ SR(VID_FIR_COEF_V(1, 4));
++ SR(VID_FIR_COEF_V(1, 5));
++ SR(VID_FIR_COEF_V(1, 6));
++ SR(VID_FIR_COEF_V(1, 7));
++
++ SR(VID_PRELOAD(1));
++}
++
++void dispc_restore_context(void)
++{
++ RR(SYSCONFIG);
++ RR(IRQENABLE);
++ //RR(CONTROL);
++ RR(CONFIG);
++ RR(DEFAULT_COLOR0);
++ RR(DEFAULT_COLOR1);
++ RR(TRANS_COLOR0);
++ RR(TRANS_COLOR1);
++ RR(LINE_NUMBER);
++ RR(TIMING_H);
++ RR(TIMING_V);
++ RR(POL_FREQ);
++ RR(DIVISOR);
++ RR(GLOBAL_ALPHA);
++ RR(SIZE_DIG);
++ RR(SIZE_LCD);
++
++ RR(GFX_BA0);
++ RR(GFX_BA1);
++ RR(GFX_POSITION);
++ RR(GFX_SIZE);
++ RR(GFX_ATTRIBUTES);
++ RR(GFX_FIFO_THRESHOLD);
++ RR(GFX_ROW_INC);
++ RR(GFX_PIXEL_INC);
++ RR(GFX_WINDOW_SKIP);
++ RR(GFX_TABLE_BA);
++
++ RR(DATA_CYCLE1);
++ RR(DATA_CYCLE2);
++ RR(DATA_CYCLE3);
++
++ RR(CPR_COEF_R);
++ RR(CPR_COEF_G);
++ RR(CPR_COEF_B);
++
++ RR(GFX_PRELOAD);
++
++ /* VID1 */
++ RR(VID_BA0(0));
++ RR(VID_BA1(0));
++ RR(VID_POSITION(0));
++ RR(VID_SIZE(0));
++ RR(VID_ATTRIBUTES(0));
++ RR(VID_FIFO_THRESHOLD(0));
++ RR(VID_ROW_INC(0));
++ RR(VID_PIXEL_INC(0));
++ RR(VID_FIR(0));
++ RR(VID_PICTURE_SIZE(0));
++ RR(VID_ACCU0(0));
++ RR(VID_ACCU1(0));
++
++ RR(VID_FIR_COEF_H(0, 0));
++ RR(VID_FIR_COEF_H(0, 1));
++ RR(VID_FIR_COEF_H(0, 2));
++ RR(VID_FIR_COEF_H(0, 3));
++ RR(VID_FIR_COEF_H(0, 4));
++ RR(VID_FIR_COEF_H(0, 5));
++ RR(VID_FIR_COEF_H(0, 6));
++ RR(VID_FIR_COEF_H(0, 7));
++
++ RR(VID_FIR_COEF_HV(0, 0));
++ RR(VID_FIR_COEF_HV(0, 1));
++ RR(VID_FIR_COEF_HV(0, 2));
++ RR(VID_FIR_COEF_HV(0, 3));
++ RR(VID_FIR_COEF_HV(0, 4));
++ RR(VID_FIR_COEF_HV(0, 5));
++ RR(VID_FIR_COEF_HV(0, 6));
++ RR(VID_FIR_COEF_HV(0, 7));
++
++ RR(VID_CONV_COEF(0, 0));
++ RR(VID_CONV_COEF(0, 1));
++ RR(VID_CONV_COEF(0, 2));
++ RR(VID_CONV_COEF(0, 3));
++ RR(VID_CONV_COEF(0, 4));
++
++ RR(VID_FIR_COEF_V(0, 0));
++ RR(VID_FIR_COEF_V(0, 1));
++ RR(VID_FIR_COEF_V(0, 2));
++ RR(VID_FIR_COEF_V(0, 3));
++ RR(VID_FIR_COEF_V(0, 4));
++ RR(VID_FIR_COEF_V(0, 5));
++ RR(VID_FIR_COEF_V(0, 6));
++ RR(VID_FIR_COEF_V(0, 7));
++
++ RR(VID_PRELOAD(0));
++
++ /* VID2 */
++ RR(VID_BA0(1));
++ RR(VID_BA1(1));
++ RR(VID_POSITION(1));
++ RR(VID_SIZE(1));
++ RR(VID_ATTRIBUTES(1));
++ RR(VID_FIFO_THRESHOLD(1));
++ RR(VID_ROW_INC(1));
++ RR(VID_PIXEL_INC(1));
++ RR(VID_FIR(1));
++ RR(VID_PICTURE_SIZE(1));
++ RR(VID_ACCU0(1));
++ RR(VID_ACCU1(1));
++
++ RR(VID_FIR_COEF_H(1, 0));
++ RR(VID_FIR_COEF_H(1, 1));
++ RR(VID_FIR_COEF_H(1, 2));
++ RR(VID_FIR_COEF_H(1, 3));
++ RR(VID_FIR_COEF_H(1, 4));
++ RR(VID_FIR_COEF_H(1, 5));
++ RR(VID_FIR_COEF_H(1, 6));
++ RR(VID_FIR_COEF_H(1, 7));
++
++ RR(VID_FIR_COEF_HV(1, 0));
++ RR(VID_FIR_COEF_HV(1, 1));
++ RR(VID_FIR_COEF_HV(1, 2));
++ RR(VID_FIR_COEF_HV(1, 3));
++ RR(VID_FIR_COEF_HV(1, 4));
++ RR(VID_FIR_COEF_HV(1, 5));
++ RR(VID_FIR_COEF_HV(1, 6));
++ RR(VID_FIR_COEF_HV(1, 7));
++
++ RR(VID_CONV_COEF(1, 0));
++ RR(VID_CONV_COEF(1, 1));
++ RR(VID_CONV_COEF(1, 2));
++ RR(VID_CONV_COEF(1, 3));
++ RR(VID_CONV_COEF(1, 4));
++
++ RR(VID_FIR_COEF_V(1, 0));
++ RR(VID_FIR_COEF_V(1, 1));
++ RR(VID_FIR_COEF_V(1, 2));
++ RR(VID_FIR_COEF_V(1, 3));
++ RR(VID_FIR_COEF_V(1, 4));
++ RR(VID_FIR_COEF_V(1, 5));
++ RR(VID_FIR_COEF_V(1, 6));
++ RR(VID_FIR_COEF_V(1, 7));
++
++ RR(VID_PRELOAD(1));
++
++ /* enable last, because LCD & DIGIT enable are here */
++ RR(CONTROL);
++}
++
++#undef SR
++#undef RR
++
++static inline void enable_clocks(int enable)
++{
++ if (enable)
++ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
++ else
++ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
++}
++
+void dispc_go(enum omap_channel channel)
+{
+ int bit;
+}
+
+
-+static inline void get_dss_clocks(void)
-+{
-+ dispc.dss_ick = get_dss_ick();
-+ dispc.dss1_fck = get_dss1_fck();
-+ dispc.dss_54m_fck = get_tv_fck();
-+}
-+
+void dispc_set_lcd_display_type(enum omap_lcd_display_type type)
+{
+ int mode;
+ unsigned long r = 0;
+
+ if (dss_get_dispc_clk_source() == 0)
-+ r = clk_get_rate(dispc.dss1_fck);
++ r = dss_clk_get_rate(DSS_CLK_FCK1);
+ else
+#ifdef CONFIG_OMAP2_DSS_DSI
+ r = dsi_get_dsi1_pll_rate();
+ if (isr == NULL)
+ return -EINVAL;
+
-+ spin_lock_irqsave(&dss_lock, flags);
++ spin_lock_irqsave(&dispc.irq_lock, flags);
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ if (registered_isr[i].isr == isr) {
+ break;
+ }
+
-+ spin_unlock_irqrestore(&dss_lock, flags);
++ spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+ return ret;
+}
+ u32 new_mask = DISPC_IRQ_MASK_ERROR;
+ int ret = -EINVAL;
+
-+ spin_lock_irqsave(&dss_lock, flags);
++ spin_lock_irqsave(&dispc.irq_lock, flags);
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ if (registered_isr[i].isr != isr)
+ break;
+ }
+
-+ spin_unlock_irqrestore(&dss_lock, flags);
++ spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+ return ret;
+}
+ l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */
+ l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */
+ l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */
-+ l = FLD_MOD(l, 1, 1, 1); /* AUTOIDLE */
++ l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
+ dispc_write_reg(DISPC_SYSCONFIG, l);
+
+ /* FUNCGATED */
+
+ /* Set logic clock to fck, pixel clock to fck/2 for now */
+ dispc_set_lcd_divisor(1, 2);
++
++ dispc_setup_plane_fifo(OMAP_DSS_GFX, 0);
++ dispc_setup_plane_fifo(OMAP_DSS_VIDEO1, 0);
++ dispc_setup_plane_fifo(OMAP_DSS_VIDEO2, 0);
+}
+
+int dispc_init(void)
+{
+ u32 rev;
+
-+ spin_lock_init(&dss_lock);
++ spin_lock_init(&dispc.irq_lock);
+
-+ dispc.base = ioremap(DISPC_BASE, SZ_1K);
++ dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
+ if (!dispc.base) {
+ DSSERR("can't ioremap DISPC\n");
+ return -ENOMEM;
+ }
+
-+ get_dss_clocks();
+ dispc.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
-+ if (IS_ERR(dispc.dpll4_m4_ck))
++ if (IS_ERR(dispc.dpll4_m4_ck)) {
+ DSSERR("Failed to get dpll4_m4_ck\n");
++ return -ENODEV;
++ }
+
+ enable_clocks(1);
+
+
+ _omap_dispc_initialize_irq();
+
++ dispc_save_context();
++
+ rev = dispc_read_reg(DISPC_REVISION);
+ printk(KERN_INFO "OMAP DISPC rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
diff --git a/arch/arm/plat-omap/dss/display.c b/arch/arm/plat-omap/dss/display.c
new file mode 100644
-index 0000000..4d7238f
+index 0000000..b7f7aff
--- /dev/null
+++ b/arch/arm/plat-omap/dss/display.c
-@@ -0,0 +1,775 @@
+@@ -0,0 +1,765 @@
+/*
+ * linux/arch/arm/plat-omap/dss/display.c
+ *
+static ssize_t show_clk(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
-+ struct clk *clocks[5];
-+ int i;
+ ssize_t l, size = PAGE_SIZE;
+
-+ clocks[0] = get_dss_ick();
-+ clocks[1] = get_dss1_fck();
-+ clocks[2] = get_dss2_fck();
-+ clocks[3] = get_tv_fck();
-+ clocks[4] = get_96m_fck();
-+
+ l = 0;
+
-+ l += snprintf(buf + l, size - l, "- dss -\n");
-+
-+ for (i = 0; i < 5; i++) {
-+ l += snprintf(buf + l, size - l, "%-15s\t%lu\t%d\n",
-+ clocks[i]->name,
-+ clk_get_rate(clocks[i]),
-+ clk_get_usecount(clocks[i]));
-+ }
++ l += dss_print_clocks(buf + l, size - l);
+
+ l += dispc_print_clocks(buf + l, size - l);
+#ifdef CONFIG_OMAP2_DSS_DSI
+static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
+{
+ int i;
-+ int r;
++ int r = 0;
+
+ DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
+
+ return 0;
+ }
+
++ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
++
+ for (i = 0; i < mgr->num_overlays; i++) {
+ int ilace = 0;
+ int outw, outh;
+
+ if (r) {
+ DSSERR("dispc_setup_plane failed\n");
-+ return r;
++ goto exit;
+ }
+
+ dispc_enable_plane(ovl->id, 1);
+
+ dispc_go(mgr->id);
+
-+ return 0;
++exit:
++ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
++
++ return r;
+}
+
+static struct omap_overlay dispc_overlays[] = {
+EXPORT_SYMBOL(omap_dss_unregister_panel);
diff --git a/arch/arm/plat-omap/dss/dpi.c b/arch/arm/plat-omap/dss/dpi.c
new file mode 100644
-index 0000000..2261288
+index 0000000..e3ad44e
--- /dev/null
+++ b/arch/arm/plat-omap/dss/dpi.c
-@@ -0,0 +1,323 @@
+@@ -0,0 +1,320 @@
+/*
+ * linux/arch/arm/plat-omap/dss/dpi.c
+ *
+
+
+static struct {
-+ struct clk *dss_ick;
-+ struct clk *dss1_fck;
+ int update_enabled;
+} dpi;
+
+ if (r)
+ return r;
+
-+ clk_enable(dpi.dss_ick);
-+ clk_enable(dpi.dss1_fck);
++ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
-+ dsi_pll_init(0, 1);
++ dss_clk_enable(DSS_CLK_FCK2);
++ r = dsi_pll_init(0, 1);
++ if (r)
++ return r;
+#endif
+ is_tft = (display->panel->config & OMAP_DSS_LCD_TFT) != 0;
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+ dss_select_clk_source(0, 0);
+ dsi_pll_uninit();
++ dss_clk_disable(DSS_CLK_FCK2);
+#endif
+
-+ clk_disable(dpi.dss_ick);
-+ clk_disable(dpi.dss1_fck);
++ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ display->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+ dispc_enable_lcd_out(0);
+
-+ clk_disable(dpi.dss_ick);
-+ clk_disable(dpi.dss1_fck);
++ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ display->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+
+ dispc_enable_lcd_out(1);
+
-+ clk_enable(dpi.dss_ick);
-+ clk_enable(dpi.dss1_fck);
++ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ if (display->panel->resume)
+ display->panel->resume(display);
+ return -EINVAL;
+ }
+
++ if (timings->pixel_clock == 0)
++ return -EINVAL;
+
+ is_tft = (display->panel->config & OMAP_DSS_LCD_TFT) != 0;
+
+
+int dpi_init(void)
+{
-+ dpi.dss_ick = get_dss_ick();
-+ dpi.dss1_fck = get_dss1_fck();
-+
+ return 0;
+}
+
+
diff --git a/arch/arm/plat-omap/dss/dsi.c b/arch/arm/plat-omap/dss/dsi.c
new file mode 100644
-index 0000000..9f31ac3
+index 0000000..7f7db32
--- /dev/null
+++ b/arch/arm/plat-omap/dss/dsi.c
-@@ -0,0 +1,3020 @@
+@@ -0,0 +1,3135 @@
+/*
+ * linux/arch/arm/plat-omap/dss/dsi.c
+ *
+
+#include <mach/board.h>
+#include <mach/display.h>
++#include <mach/clock.h>
+
+#include "dss.h"
+
+
+#define DSI_REG(idx) ((const struct dsi_reg) { idx })
+
++#define DSI_SZ_REGS SZ_1K
+/* DSI Protocol Engine */
+
+#define DSI_REVISION DSI_REG(0x0000)
+
+/* DSIPHY_SCP */
+
-+#define DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
-+#define DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
-+#define DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
-+#define DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
++#define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
++#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
++#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
++#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
+
+/* DSI_PLL_CTRL_SCP */
+
+{
+ void __iomem *base;
+
-+ struct clk *dss_ick;
-+ struct clk *dss1_fck;
-+ struct clk *dss2_fck;
-+
+ unsigned long dsi1_pll_fclk; /* Hz */
+ unsigned long dsi2_pll_fclk; /* Hz */
+ unsigned long dsiphy; /* Hz */
+ unsigned long ddr_clk; /* Hz */
+
++ u32 ctx[DSI_SZ_REGS / sizeof(u32)];
++
+ struct {
+ enum fifo_size fifo_size;
+ int dest_per; /* destination peripheral 0-3 */
+
+ struct mutex lock;
+
++ unsigned pll_locked;
++
+ struct completion bta_completion;
+
+ spinlock_t update_lock;
+ int update_ongoing;
+ int update_syncers;
+ struct completion update_completion;
-+ struct work_struct framedone_work;
++ struct delayed_work framedone_work;
+
-+ enum omap_dss_update_mode update_mode;
++ enum omap_dss_update_mode user_update_mode; /* what the user wants */
++ enum omap_dss_update_mode update_mode; /* current mode */
+ int use_te;
+ int framedone_scheduled; /* helps to catch strange framedone bugs */
+
+#endif
+} dsi;
+
-+
+static inline void dsi_write_reg(const struct dsi_reg idx, u32 val)
+{
+ __raw_writel(val, dsi.base + idx.idx);
+ return __raw_readl(dsi.base + idx.idx);
+}
+
++
++#define SR(reg) \
++ dsi.ctx[(DSI_##reg).idx / sizeof(u32)] = dsi_read_reg(DSI_##reg)
++#define RR(reg) \
++ dsi_write_reg(DSI_##reg, dsi.ctx[(DSI_##reg).idx / sizeof(u32)])
++
++void dsi_save_context(void)
++{
++ SR(SYSCONFIG);
++ SR(IRQENABLE);
++ SR(CTRL);
++ SR(COMPLEXIO_CFG1);
++ SR(COMPLEXIO_IRQ_ENABLE);
++ SR(CLK_CTRL);
++ SR(TIMING1);
++ SR(TIMING2);
++ SR(VM_TIMING1);
++ SR(VM_TIMING2);
++ SR(VM_TIMING3);
++ SR(CLK_TIMING);
++ SR(TX_FIFO_VC_SIZE);
++ SR(RX_FIFO_VC_SIZE);
++ SR(COMPLEXIO_CFG2);
++ SR(VM_TIMING4);
++ SR(VM_TIMING5);
++ SR(VM_TIMING6);
++ SR(VM_TIMING7);
++ SR(STOPCLK_TIMING);
++
++ SR(VC_CTRL(0));
++ SR(VC_TE(0));
++ SR(VC_IRQENABLE(0));
++
++ SR(VC_CTRL(1));
++ SR(VC_TE(1));
++ SR(VC_IRQENABLE(1));
++
++ SR(VC_CTRL(2));
++ SR(VC_TE(2));
++ SR(VC_IRQENABLE(2));
++
++ SR(VC_CTRL(3));
++ SR(VC_TE(3));
++ SR(VC_IRQENABLE(3));
++
++ SR(DSIPHY_CFG0);
++ SR(DSIPHY_CFG1);
++ SR(DSIPHY_CFG2);
++ SR(DSIPHY_CFG5);
++
++ SR(PLL_CONTROL);
++ SR(PLL_CONFIGURATION1);
++ SR(PLL_CONFIGURATION2);
++}
++
++void dsi_restore_context(void)
++{
++ RR(SYSCONFIG);
++ RR(IRQENABLE);
++ RR(CTRL);
++ RR(COMPLEXIO_CFG1);
++ RR(COMPLEXIO_IRQ_ENABLE);
++ RR(CLK_CTRL);
++ RR(TIMING1);
++ RR(TIMING2);
++ RR(VM_TIMING1);
++ RR(VM_TIMING2);
++ RR(VM_TIMING3);
++ RR(CLK_TIMING);
++ RR(TX_FIFO_VC_SIZE);
++ RR(RX_FIFO_VC_SIZE);
++ RR(COMPLEXIO_CFG2);
++ RR(VM_TIMING4);
++ RR(VM_TIMING5);
++ RR(VM_TIMING6);
++ RR(VM_TIMING7);
++ RR(STOPCLK_TIMING);
++
++ RR(VC_CTRL(0));
++ RR(VC_IRQENABLE(0));
++
++ RR(VC_CTRL(1));
++ RR(VC_IRQENABLE(1));
++
++ RR(VC_CTRL(2));
++ RR(VC_IRQENABLE(2));
++
++ RR(VC_CTRL(3));
++ RR(VC_IRQENABLE(3));
++
++ RR(DSIPHY_CFG0);
++ RR(DSIPHY_CFG1);
++ RR(DSIPHY_CFG2);
++ RR(DSIPHY_CFG5);
++
++ RR(PLL_CONTROL);
++ RR(PLL_CONFIGURATION1);
++ RR(PLL_CONFIGURATION2);
++}
++
++#undef SR
++#undef RR
++
+static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
+ int value)
+{
-+ int t = 1000;
++ int t = 100000;
+
+ while (REG_GET(idx, bitnum, bitnum) != value) {
+ if (--t == 0)
+/* DSI func clock. this could also be DSI2_PLL_FCLK */
+static inline void enable_clocks(int enable)
+{
-+ if (enable) {
-+ clk_enable(dsi.dss_ick);
-+ clk_enable(dsi.dss1_fck);
-+ } else {
-+ clk_disable(dsi.dss1_fck);
-+ clk_disable(dsi.dss_ick);
-+ }
++ if (enable)
++ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
++ else
++ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+/* source clock for DSI PLL. this could also be PCLKFREE */
+static inline void dsi_enable_pll_clock(int enable)
+{
+ if (enable)
-+ clk_enable(dsi.dss2_fck);
++ dss_clk_enable(DSS_CLK_FCK2);
+ else
-+ clk_disable(dsi.dss2_fck);
++ dss_clk_disable(DSS_CLK_FCK2);
++
++ if (enable && dsi.pll_locked) {
++ if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
++ DSSERR("cannot lock PLL when enabling clocks\n");
++ }
+}
+
+#if 1
+ /* A dummy read using the SCP interface to any DSIPHY register is
+ * required after DSIPHY reset to complete the reset of the DSI complex
+ * I/O. */
-+ l = dsi_read_reg(DSIPHY_CFG5);
++ l = dsi_read_reg(DSI_DSIPHY_CFG5);
+
+ printk(KERN_DEBUG "DSI resets: ");
+
+ l = dsi_read_reg(DSI_COMPLEXIO_CFG1);
+ printk("CIO (%d) ", FLD_GET(l, 29, 29));
+
-+ l = dsi_read_reg(DSIPHY_CFG5);
++ l = dsi_read_reg(DSI_DSIPHY_CFG5);
+ printk("PHY (%x, %d, %d, %d)\n",
+ FLD_GET(l, 28, 26),
+ FLD_GET(l, 29, 29),
+ /* A dummy read using the SCP interface to any DSIPHY register is
+ * required after DSIPHY reset to complete the reset of the DSI complex
+ * I/O. */
-+ dsi_read_reg(DSIPHY_CFG5);
++ dsi_read_reg(DSI_DSIPHY_CFG5);
+
+ _dsi_print_reset_status();
+
+
+ if (dss_get_dsi_clk_source() == 0) {
+ /* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
-+ r = clk_get_rate(dsi.dss1_fck);
++ r = dss_clk_get_rate(DSS_CLK_FCK1);
+ } else {
+ /* DSI FCLK source is DSI2_PLL_FCLK */
+ r = dsi.dsi2_pll_fclk;
+ }
+
+ if (n == (1 << 13) - 1) {
-+ DSSERR("DSI: Failed to find LP_CLK_DIVISOR\n");
++ DSSERR("Failed to find LP_CLK_DIVISOR\n");
+ return -EINVAL;
+ }
+
+ while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
+ udelay(1);
+ if (t++ > 1000) {
-+ DSSERR("DSI: Failed to set DSI PLL power mode to %d\n",
++ DSSERR("Failed to set DSI PLL power mode to %d\n",
+ state);
+ return -ENODEV;
+ }
+ memset(&best, 0, sizeof(best));
+
+ memset(&cur, 0, sizeof(cur));
-+ cur.clkin = clk_get_rate(dsi.dss2_fck);
++ cur.clkin = dss_clk_get_rate(DSS_CLK_FCK2);
+ cur.use_dss2_fck = 1;
+ cur.highfreq = 0;
+
+ memset(&cur, 0, sizeof(cur));
+ cur.use_dss2_fck = use_dss2_fck;
+ if (use_dss2_fck) {
-+ cur.clkin = clk_get_rate(dsi.dss2_fck);
++ cur.clkin = dss_clk_get_rate(DSS_CLK_FCK2);
+ cur.highfreq = 0;
+ } else {
+ cur.clkin = dispc_pclk_rate();
+ DSSDBG("dsi_pll_program\n");
+
+ enable_clocks(1);
++ dsi_enable_pll_clock(1);
+
+ dsi.dsiphy = cinfo->dsiphy;
+ dsi.ddr_clk = dsi.dsiphy / 4;
+ }
+
+ if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) {
-+ DSSERR("DSI: cannot lock PLL\n");
++ DSSERR("cannot lock PLL\n");
+ r = -EIO;
+ goto err;
+ }
+
++ dsi.pll_locked = 1;
++
+ l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
+ l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
+ l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
+ DSSDBG("PLL config done\n");
+err:
+ enable_clocks(0);
++ dsi_enable_pll_clock(0);
+
+ return r;
+}
+ return r;
+
+ r = dispc_set_clock_div(&cinfo);
-+ if (r)
++ if (r) {
++ DSSERR("Failed to set basic clocks\n");
+ return r;
++ }
+
+ /* PLL does not come out of reset without this... */
+ dispc_pck_free_enable(1);
+
+ if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) {
-+ DSSERR("DSI: PLL not coming out of reset.\n");
++ DSSERR("PLL not coming out of reset.\n");
+ r = -ENODEV;
+ goto err;
+ }
+ goto err;
+
+ enable_clocks(0);
++ dsi_enable_pll_clock(0);
+
+ DSSDBG("PLL init done\n");
+
+
+void dsi_pll_uninit(void)
+{
++ dsi.pll_locked = 0;
+ dsi_pll_power(DSI_PLL_POWER_OFF);
-+ dsi_enable_pll_clock(0);
+ DSSDBG("PLL uninit done\n");
+}
+
+ while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
+ udelay(1);
+ if (t++ > 1000) {
-+ DSSERR("DSI: failed to set complexio power state to "
++ DSSERR("failed to set complexio power state to "
+ "%d\n", state);
+ return -ENODEV;
+ }
+
+ /* program timings */
+
-+ r = dsi_read_reg(DSIPHY_CFG0);
++ r = dsi_read_reg(DSI_DSIPHY_CFG0);
+ r = FLD_MOD(r, ths_prepare, 31, 24);
+ r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
+ r = FLD_MOD(r, ths_trail, 15, 8);
+ r = FLD_MOD(r, ths_exit, 7, 0);
-+ dsi_write_reg(DSIPHY_CFG0, r);
++ dsi_write_reg(DSI_DSIPHY_CFG0, r);
+
-+ r = dsi_read_reg(DSIPHY_CFG1);
++ r = dsi_read_reg(DSI_DSIPHY_CFG1);
+ r = FLD_MOD(r, tlpx_half, 22, 16);
+ r = FLD_MOD(r, tclk_trail, 15, 8);
+ r = FLD_MOD(r, tclk_zero, 7, 0);
-+ dsi_write_reg(DSIPHY_CFG1, r);
++ dsi_write_reg(DSI_DSIPHY_CFG1, r);
+
-+ r = dsi_read_reg(DSIPHY_CFG2);
++ r = dsi_read_reg(DSI_DSIPHY_CFG2);
+ r = FLD_MOD(r, tclk_prepare, 7, 0);
-+ dsi_write_reg(DSIPHY_CFG2, r);
++ dsi_write_reg(DSI_DSIPHY_CFG2, r);
+}
+
+
+ /* CIO_CLK_ICG, enable L3 clk to CIO */
+ REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14);
+
-+ if (wait_for_bit_change(DSIPHY_CFG5, 30, 1) != 1) {
-+ DSSERR("DSI: ComplexIO PHY not coming out of reset.\n");
++ if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) {
++ DSSERR("ComplexIO PHY not coming out of reset.\n");
+ r = -ENODEV;
+ goto err;
+ }
+ goto err;
+
+ if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
-+ DSSERR("DSI: ComplexIO not coming out of reset.\n");
++ DSSERR("ComplexIO not coming out of reset.\n");
+ r = -ENODEV;
+ goto err;
+ }
+
+ if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) {
-+ DSSERR("DSI: ComplexIO LDO power down.\n");
++ DSSERR("ComplexIO LDO power down.\n");
+ r = -ENODEV;
+ goto err;
+ }
+ int size = dsi.vc[i].fifo_size;
+
+ if (add + size > 4) {
-+ DSSERR("DSI: Illegal FIFO configuration\n");
++ DSSERR("Illegal FIFO configuration\n");
+ BUG();
+ }
+
+ int size = dsi.vc[i].fifo_size;
+
+ if (add + size > 4) {
-+ DSSERR("DSI: Illegal FIFO configuration\n");
++ DSSERR("Illegal FIFO configuration\n");
+ BUG();
+ }
+
+
+ /* len + header */
+ if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) {
-+ DSSERR("DSI: unable to send long packet: packet too long.\n");
++ DSSERR("unable to send long packet: packet too long.\n");
+ return -EINVAL;
+ }
+
+ int ddr_clk_pre, ddr_clk_post;
+ u32 r;
+
-+ r = dsi_read_reg(DSIPHY_CFG1);
++ r = dsi_read_reg(DSI_DSIPHY_CFG1);
+ tlpx_half = FLD_GET(r, 22, 16);
+ tclk_trail = FLD_GET(r, 15, 8);
+ tclk_zero = FLD_GET(r, 7, 0);
+
-+ r = dsi_read_reg(DSIPHY_CFG2);
++ r = dsi_read_reg(DSI_DSIPHY_CFG2);
+ tclk_prepare = FLD_GET(r, 7, 0);
+
+ /* min 8*UI */
-+ tclk_pre = 4;
++ tclk_pre = 20;
+ /* min 60ns + 52*UI */
+ tclk_post = ns2ddr(60) + 26;
+
+ return -EINVAL;
+
+ enable_clocks(1);
++ dsi_enable_pll_clock(1);
+
+ scr_width = ovl->info.screen_width;
+ data = ovl->info.vaddr;
+ end_measuring("L4");
+
+ enable_clocks(0);
++ dsi_enable_pll_clock(0);
+
+ return 0;
+}
+}
+#endif
+
-+static int dsi_wait_for_framedone(void)
++static int dsi_wait_for_framedone(int stop_update)
+{
+ unsigned long flags;
+
+ if (dsi.update_ongoing) {
+ long wait = msecs_to_jiffies(1000);
+ dsi.update_syncers++;
++ if (stop_update)
++ dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
+ spin_unlock_irqrestore(&dsi.update_lock, flags);
+ wait = wait_for_completion_timeout(&dsi.update_completion,
+ wait);
+ w = dsi.update_region.w;
+ h = dsi.update_region.h;
+
-+ DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
-+ x, y, w, h);
++ if (dsi.user_update_mode != OMAP_DSS_UPDATE_AUTO)
++ DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
++ x, y, w, h);
+
+ enable_clocks(1);
++ dsi_enable_pll_clock(1);
+
+ /* TODO: one packet could be longer, I think? Max is the line buffer */
+ line_packet_len = w * bytespp + 1; /* 1 byte for DCS cmd */
+ * is sending the data. Thus we have to wait until we can do a new
+ * transfer or turn the clocks off. We do that in a separate work
+ * func. */
-+ schedule_work(&dsi.framedone_work);
++ /* XXX When using auto update and delay value 0, the kernel seems to be
++ * very relaxed about when to call our callback. It may take a second.
++ * Thus we use a delay of 1 */
++ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO)
++ schedule_delayed_work(&dsi.framedone_work, 1);
++ else
++ schedule_delayed_work(&dsi.framedone_work, 0);
+}
+
+static void framedone_worker(struct work_struct *work)
+
+ end_measuring("DISPC");
+
-+ DSSDBG("FRAMEDONE\n");
++ if (dsi.user_update_mode != OMAP_DSS_UPDATE_AUTO)
++ DSSDBG("FRAMEDONE\n");
+
+#if 0
+ if (l)
+ DSSWARN("FRAMEDONE irq too early, %d bytes, %d loops\n", l, i);
+#endif
+
-+ spin_lock_irqsave(&dsi.update_lock, flags);
-+ dsi.update_ongoing = 0;
-+ while (dsi.update_syncers > 0) {
-+ complete(&dsi.update_completion);
-+ --dsi.update_syncers;
-+ }
-+ spin_unlock_irqrestore(&dsi.update_lock, flags);
-+
+#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
+ dispc_fake_vsync_irq();
+#endif
+ enable_clocks(0);
++ dsi_enable_pll_clock(0);
+
+ dsi.framedone_scheduled = 0;
+
++ spin_lock_irqsave(&dsi.update_lock, flags);
++
++ if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
++ dsi.update_ongoing = 0;
++
++ while (dsi.update_syncers > 0) {
++ complete(&dsi.update_completion);
++ --dsi.update_syncers;
++ }
++
+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
-+ spin_lock_irqsave(&dsi.update_lock, flags);
-+ dsi.update_ongoing = 1;
+ spin_unlock_irqrestore(&dsi.update_lock, flags);
+ dsi_update_screen_dispc(dsi.update_region.display);
++ } else {
++ spin_unlock_irqrestore(&dsi.update_lock, flags);
+ }
+}
+
+ dsi.update_region.bytespp = bytespp;
+
+ enable_clocks(1);
++ dsi_enable_pll_clock(1);
+
+ dispc_set_lcd_size(display->x_res, display->y_res);
+
+
+static void dsi_stop_auto_update(void)
+{
-+ dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
-+
+ DSSDBG("waiting for display to finish.\n");
-+ dsi_wait_for_framedone();
++ dsi_wait_for_framedone(1);
+ DSSDBG("done waiting\n");
-+ enable_clocks(0);
+
-+ dsi.update_mode = OMAP_DSS_UPDATE_MANUAL;
++ enable_clocks(0);
++ dsi_enable_pll_clock(0);
+}
+
+static int dsi_set_update_mode(struct omap_display *display,
+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO)
+ dsi_stop_auto_update();
+ else if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
-+ dsi_wait_for_framedone();
++ dsi_wait_for_framedone(0);
+
+ dsi.update_mode = mode;
+
+ }
+
+ enable_clocks(1);
++ dsi_enable_pll_clock(1);
+
+ r = omap_dispc_register_isr(framedone_callback, NULL,
+ DISPC_IRQ_FRAMEDONE);
+
+ display->state = OMAP_DSS_DISPLAY_ACTIVE;
+
-+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO)
-+ dsi_start_auto_update(display);
++ dsi_set_update_mode(display, dsi.user_update_mode);
+
+ enable_clocks(0);
++ dsi_enable_pll_clock(0);
+ mutex_unlock(&dsi.lock);
+
+ return 0;
+ omap_dispc_unregister_isr(framedone_callback);
+err1:
+ enable_clocks(0);
++ dsi_enable_pll_clock(0);
+err0:
+ mutex_unlock(&dsi.lock);
+ DSSDBG("dsi_display_enable FAILED\n");
+ goto end;
+
+ enable_clocks(1);
++ dsi_enable_pll_clock(1);
+
-+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO)
-+ dsi_stop_auto_update();
-+ else if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
-+ dsi_wait_for_framedone();
++ dsi_set_update_mode(display, OMAP_DSS_UPDATE_DISABLED);
+
+ display->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ dsi_pll_uninit();
+
+ enable_clocks(0);
-+
++ dsi_enable_pll_clock(0);
+end:
+ mutex_unlock(&dsi.lock);
+}
+ if (dsi.update_mode != OMAP_DSS_UPDATE_MANUAL)
+ goto end;
+
-+ r = dsi_wait_for_framedone();
++ r = dsi_wait_for_framedone(0);
+
+end:
+ mutex_unlock(&dsi.lock);
+ mutex_lock(&dsi.lock);
+
+ r = dsi_set_update_mode(display, mode);
++ dsi.user_update_mode = mode;
+
+ mutex_unlock(&dsi.lock);
+
+static enum omap_dss_update_mode dsi_display_get_update_mode(
+ struct omap_display *display)
+{
-+ return dsi.update_mode;
++ return dsi.user_update_mode;
+}
+
+static int dsi_display_enable_te(struct omap_display *display, int enable)
+{
-+ enum omap_dss_update_mode mode;
-+
+ DSSDBG("dsi_display_enable_te\n");
+
+ mutex_lock(&dsi.lock);
+
+ enable_clocks(1);
++ dsi_enable_pll_clock(1);
+
-+ mode = dsi.update_mode;
-+
-+ /* XXX perhaps suspend or something would be better here */
-+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO)
-+ dsi_stop_auto_update();
-+ else if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
-+ dsi_wait_for_framedone();
++ dsi_set_update_mode(display, OMAP_DSS_UPDATE_DISABLED);
+
+ dsi.use_te = enable;
+ display->ctrl->enable_te(display, enable);
+ }
+
+ /* restore the old update mode */
-+ dsi_set_update_mode(display, mode);
++ dsi_set_update_mode(display, dsi.user_update_mode);
+
+ enable_clocks(0);
++ dsi_enable_pll_clock(0);
+
+ mutex_unlock(&dsi.lock);
+
+
+static int dsi_display_run_test(struct omap_display *display, int test_num)
+{
-+ enum omap_dss_update_mode mode;
+ int r = 0;
+
+ DSSDBG("dsi_display_run_test %d\n", test_num);
+ mutex_lock(&dsi.lock);
+
+ enable_clocks(1);
++ dsi_enable_pll_clock(1);
+
-+ mode = dsi.update_mode;
-+
-+ /* XXX perhaps suspend or something would be better here */
-+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO)
-+ dsi_stop_auto_update();
-+ else if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
-+ dsi_wait_for_framedone();
++ dsi_set_update_mode(display, OMAP_DSS_UPDATE_DISABLED);
+
+ /* run test first in low speed mode */
+ dsi_vc_enable_hs(0, 0);
+ dsi_vc_enable_hs(0, 1);
+
+ /* restore the old update mode */
-+ dsi_set_update_mode(display, mode);
++ dsi_set_update_mode(display, dsi.user_update_mode);
+
+ enable_clocks(0);
++ dsi_enable_pll_clock(0);
+
+ mutex_unlock(&dsi.lock);
+
+ u32 rev;
+
+ init_completion(&dsi.bta_completion);
-+ INIT_WORK(&dsi.framedone_work, framedone_worker);
++ INIT_DELAYED_WORK(&dsi.framedone_work, framedone_worker);
+
+ init_completion(&dsi.update_completion);
+ spin_lock_init(&dsi.update_lock);
+
+ mutex_init(&dsi.lock);
+
-+ dsi.base = ioremap(DSI_BASE, SZ_1K);
++ dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
+ if (!dsi.base) {
+ DSSERR("can't ioremap DSI\n");
+ return -ENOMEM;
+ }
+
-+ dsi.dss_ick = get_dss_ick();
-+ dsi.dss1_fck = get_dss1_fck();
-+ dsi.dss2_fck = get_dss2_fck();
-+
+ enable_clocks(1);
+
+ /* Autoidle */
+
diff --git a/arch/arm/plat-omap/dss/dss.c b/arch/arm/plat-omap/dss/dss.c
new file mode 100644
-index 0000000..8450ddd
+index 0000000..e585fcd
--- /dev/null
+++ b/arch/arm/plat-omap/dss/dss.c
-@@ -0,0 +1,554 @@
+@@ -0,0 +1,784 @@
+/*
+ * linux/arch/arm/plat-omap/dss/dss.c
+ *
+
+#define DSS_BASE 0x48050000
+
++#define DSS_SZ_REGS SZ_512
++
+struct dss_reg {
+ u16 idx;
+};
+#define DSS_PLL_CONTROL DSS_REG(0x0048)
+#define DSS_SDI_STATUS DSS_REG(0x005C)
+
++#define REG_GET(idx, start, end) \
++ FLD_GET(dss_read_reg(idx), start, end)
++
+#define REG_FLD_MOD(idx, val, start, end) \
+ dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
+
+ struct clk *dss2_fck;
+ struct clk *dss_54m_fck;
+ struct clk *dss_96m_fck;
++
++ unsigned num_clks_enabled;
++ struct platform_device *pdev;
++ unsigned ctx_id;
++ u32 ctx[DSS_SZ_REGS / sizeof(u32)];
+} dss;
+
++/* PM TESTING */
++#if 1
++static unsigned last_tr_id;
++
++unsigned get_last_off_on_transaction_id(struct device *dev)
++{
++ return last_tr_id;
++}
++
++void inc_last_off_on_transaction_id(void)
++{
++ last_tr_id++;
++}
++#endif
++
++static void dss_clk_enable_all_no_ctx(void);
++static void dss_clk_disable_all_no_ctx(void);
++static void dss_clk_enable_no_ctx(enum dss_clock clks);
++static void dss_clk_disable_no_ctx(enum dss_clock clks);
++static int _omap_dss_wait_reset(void);
++
+static inline void dss_write_reg(const struct dss_reg idx, u32 val)
+{
+ __raw_writel(val, dss.base + idx.idx);
+ return __raw_readl(dss.base + idx.idx);
+}
+
++#define SR(reg) \
++ dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg)
++#define RR(reg) \
++ dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
++
++static void dss_save_context(void)
++{
++ SR(SYSCONFIG);
++ SR(CONTROL);
++ SR(SDI_CONTROL);
++ SR(PLL_CONTROL);
++}
++
++static void dss_restore_context(void)
++{
++ RR(SYSCONFIG);
++ RR(CONTROL);
++ RR(SDI_CONTROL);
++ RR(PLL_CONTROL);
++}
++
++#undef SR
++#undef RR
++
++unsigned get_last_off_on_transaction_id(struct device *dev);
++
++unsigned dss_get_id(void)
++{
++ return get_last_off_on_transaction_id(&dss.pdev->dev);
++}
++
++static void save_all_ctx(void)
++{
++ //printk("save context\n");
++
++ dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
++
++ dss_save_context();
++ dispc_save_context();
++#ifdef CONFIG_OMAP2_DSS_DSI
++ dsi_save_context();
++#endif
++
++ dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
++}
++
++static void restore_all_ctx(void)
++{
++ //printk("restore context\n");
++
++ dss_clk_enable_all_no_ctx();
++
++ if (_omap_dss_wait_reset())
++ DSSERR("DSS not coming out of reset after sleep\n");
++
++ dss_restore_context();
++ dispc_restore_context();
++#ifdef CONFIG_OMAP2_DSS_DSI
++ dsi_restore_context();
++#endif
++
++ dss_clk_disable_all_no_ctx();
++}
++
+void dss_sdi_init(int datapairs)
+{
+ u32 l;
+ ;
+}
+
++ssize_t dss_print_clocks(char *buf, ssize_t size)
++{
++ ssize_t l = 0;
++ int i;
++ struct clk *clocks[5] = {
++ dss.dss_ick,
++ dss.dss1_fck,
++ dss.dss2_fck,
++ dss.dss_54m_fck,
++ dss.dss_96m_fck
++ };
++
++ l += snprintf(buf + l, size - l, "- dss -\n");
++
++ l += snprintf(buf + l, size - l, "internal clk count\t%u\n",
++ dss.num_clks_enabled);
++
++ for (i = 0; i < 5; i++) {
++ if (!clocks[i])
++ continue;
++ l += snprintf(buf + l, size - l, "%-15s\t%lu\t%d\n",
++ clocks[i]->name,
++ clk_get_rate(clocks[i]),
++ clk_get_usecount(clocks[i]));
++ }
++
++ return l;
++}
++
+static int get_dss_clocks(void)
+{
+ const struct {
+ clk_put(dss.dss_ick);
+}
+
-+struct clk *get_dss_ick(void)
++unsigned long dss_clk_get_rate(enum dss_clock clk)
+{
-+ return dss.dss_ick;
++ switch (clk) {
++ case DSS_CLK_ICK:
++ return clk_get_rate(dss.dss_ick);
++ case DSS_CLK_FCK1:
++ return clk_get_rate(dss.dss1_fck);
++ case DSS_CLK_FCK2:
++ return clk_get_rate(dss.dss2_fck);
++ case DSS_CLK_54M:
++ return clk_get_rate(dss.dss_54m_fck);
++ case DSS_CLK_96M:
++ return clk_get_rate(dss.dss_96m_fck);
++ }
++
++ BUG();
++ return 0;
+}
+
-+struct clk *get_dss1_fck(void)
++static unsigned count_clk_bits(enum dss_clock clks)
+{
-+ return dss.dss1_fck;
++ unsigned num_clks = 0;
++
++ if (clks & DSS_CLK_ICK)
++ ++num_clks;
++ if (clks & DSS_CLK_FCK1)
++ ++num_clks;
++ if (clks & DSS_CLK_FCK2)
++ ++num_clks;
++ if (clks & DSS_CLK_54M)
++ ++num_clks;
++ if (clks & DSS_CLK_96M)
++ ++num_clks;
++
++ return num_clks;
+}
+
-+struct clk *get_dss2_fck(void)
++static void dss_clk_enable_no_ctx(enum dss_clock clks)
+{
-+ return dss.dss2_fck;
++ unsigned num_clks = count_clk_bits(clks);
++
++ if (clks & DSS_CLK_ICK)
++ clk_enable(dss.dss_ick);
++ if (clks & DSS_CLK_FCK1)
++ clk_enable(dss.dss1_fck);
++ if (clks & DSS_CLK_FCK2)
++ clk_enable(dss.dss2_fck);
++ if (clks & DSS_CLK_54M)
++ clk_enable(dss.dss_54m_fck);
++ if (clks & DSS_CLK_96M)
++ clk_enable(dss.dss_96m_fck);
++
++ dss.num_clks_enabled += num_clks;
+}
+
-+struct clk *get_tv_fck(void)
++void dss_clk_enable(enum dss_clock clks)
+{
-+ return dss.dss_54m_fck;
++ int id;
++
++ dss_clk_enable_no_ctx(clks);
++
++ id = dss_get_id();
++
++ if (id != dss.ctx_id) {
++ printk("old id %u, new id %u\n",
++ dss.ctx_id, id);
++ restore_all_ctx();
++ dss.ctx_id = id;
++ }
+}
+
-+struct clk *get_96m_fck(void)
++static void dss_clk_disable_no_ctx(enum dss_clock clks)
+{
-+ return dss.dss_96m_fck;
++ unsigned num_clks = count_clk_bits(clks);
++
++ if (clks & DSS_CLK_ICK)
++ clk_disable(dss.dss_ick);
++ if (clks & DSS_CLK_FCK1)
++ clk_disable(dss.dss1_fck);
++ if (clks & DSS_CLK_FCK2)
++ clk_disable(dss.dss2_fck);
++ if (clks & DSS_CLK_54M)
++ clk_disable(dss.dss_54m_fck);
++ if (clks & DSS_CLK_96M)
++ clk_disable(dss.dss_96m_fck);
++
++ dss.num_clks_enabled -= num_clks;
+}
+
-+static void enable_dss_clocks(void)
++void dss_clk_disable(enum dss_clock clks)
+{
-+ clk_enable(dss.dss_ick);
-+ clk_enable(dss.dss1_fck);
-+ clk_enable(dss.dss2_fck);
-+ clk_enable(dss.dss_54m_fck);
-+ if (dss.dss_96m_fck)
-+ clk_enable(dss.dss_96m_fck);
++ unsigned num_clks = count_clk_bits(clks);
++
++ BUG_ON(dss.num_clks_enabled < num_clks);
++
++ if (dss.num_clks_enabled == num_clks)
++ save_all_ctx();
++
++ dss_clk_disable_no_ctx(clks);
+}
+
-+static void disable_dss_clocks(void)
++static void dss_clk_enable_all_no_ctx(void)
+{
-+ clk_disable(dss.dss_ick);
-+ clk_disable(dss.dss1_fck);
-+ clk_disable(dss.dss2_fck);
-+ clk_disable(dss.dss_54m_fck);
-+ if (dss.dss_96m_fck)
-+ clk_disable(dss.dss_96m_fck);
++ enum dss_clock clks;
++
++ clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
++ if (cpu_is_omap34xx())
++ clks |= DSS_CLK_96M;
++ dss_clk_enable_no_ctx(clks);
++}
++
++static void dss_clk_disable_all_no_ctx(void)
++{
++ enum dss_clock clks;
++
++ clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
++ if (cpu_is_omap34xx())
++ clks |= DSS_CLK_96M;
++ dss_clk_disable_no_ctx(clks);
++}
++
++static void dss_clk_disable_all(void)
++{
++ enum dss_clock clks;
++
++ clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
++ if (cpu_is_omap34xx())
++ clks |= DSS_CLK_96M;
++ dss_clk_disable(clks);
+}
+
+void dss_select_clk_source(int dsi, int dispc)
+ return FLD_GET(dss_read_reg(DSS_CONTROL), 0, 0);
+}
+
-+static irqreturn_t dss_irq_handler(int irq, void *arg)
++static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
++{
++ //clk_enable(dss.dss_ick);
++ //clk_enable(dss.dss1_fck);
++
++ dispc_irq_handler();
++
++ //clk_disable(dss.dss1_fck);
++ //clk_disable(dss.dss_ick);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
+{
-+#ifdef CONFIG_ARCH_OMAP3
+ u32 irqstatus;
+
-+ clk_enable(dss.dss_ick);
-+ clk_enable(dss.dss1_fck);
++ //clk_enable(dss.dss_ick); // XXX are these needed...
++ //clk_enable(dss.dss1_fck);
+
+ irqstatus = dss_read_reg(DSS_IRQSTATUS);
+
+ if (irqstatus & (1<<1)) /* DSI_IRQ */
+ dsi_irq_handler();
+#endif
-+#else /* OMAP2 */
-+ dispc_irq_handler();
-+#endif
+
-+ clk_disable(dss.dss1_fck);
-+ clk_disable(dss.dss_ick);
++ //clk_disable(dss.dss1_fck);
++ //clk_disable(dss.dss_ick);
+
+ return IRQ_HANDLED;
+}
+
-+
-+static int _omap_dss_reset(void)
++static int _omap_dss_wait_reset(void)
+{
-+ int timeout = 10000;
-+ int r = 0;
-+
-+ /* Soft reset */
-+ REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
++ unsigned timeout = 1000;
+
-+ while (!(dss_read_reg(DSS_SYSSTATUS) & 1)) {
++ while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
++ udelay(1);
+ if (!--timeout) {
+ DSSERR("soft reset failed\n");
-+ r = -ENODEV;
-+ break;
++ return -ENODEV;
+ }
+ }
+
-+ return r;
++ return 0;
++}
++
++static int _omap_dss_reset(void)
++{
++ /* Soft reset */
++ REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
++ return _omap_dss_wait_reset();
+}
+
+void dss_set_venc_output(enum omap_dss_venc_type type)
+ int r;
+ u32 rev;
+
-+ dss.base = ioremap(DSS_BASE, SZ_512);
++ dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
+ if (!dss.base) {
+ DSSERR("can't ioremap DSS\n");
+ r = -ENOMEM;
+ goto fail0;
+ }
+
-+ r = get_dss_clocks();
-+ if (r)
-+ goto fail1;
-+
-+ enable_dss_clocks();
-+
+ /* We need to wait here a bit, otherwise we sometimes start to get
+ * synclost errors. I believe we could wait for one framedone or
+ * perhaps vsync interrupt, but, because dispc is not initialized yet,
+ * we don't have access to the irq register.
+ */
-+ msleep(40);
++ msleep(400);
+
+ _omap_dss_reset();
+
+ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
+#endif
+
-+ r = request_irq(INT_24XX_DSS_IRQ, dss_irq_handler,
++ r = request_irq(INT_24XX_DSS_IRQ,
++ cpu_is_omap24xx()
++ ? dss_irq_handler_omap2
++ : dss_irq_handler_omap3,
+ 0, "OMAP DSS", NULL);
+
+ if (r < 0) {
+ DSSERR("omap2 dss: request_irq failed\n");
-+ goto fail2;
++ goto fail1;
+ }
+
++ dss_save_context();
++
+ rev = dss_read_reg(DSS_REVISION);
+ printk(KERN_INFO "OMAP DSS rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
-+ disable_dss_clocks();
+ return 0;
+
-+fail2:
-+ disable_dss_clocks();
-+ put_dss_clocks();
+fail1:
+ iounmap(dss.base);
+fail0:
+
+ int r;
+
++ dss.pdev = pdev;
++
++ r = get_dss_clocks();
++ if (r)
++ goto fail0;
++
++ dss_clk_enable_all_no_ctx();
++
++ dss.ctx_id = dss_get_id();
++ printk("initial id %u\n", dss.ctx_id);
++
+ r = dss_init();
+ if (r) {
+ DSSERR("Failed to initialize DSS\n");
+
+ initialize_overlays();
+
++ dss_clk_disable_all();
++
+ return 0;
+
++ /* XXX fail correctly */
+fail0:
+ return r;
+}
+
diff --git a/arch/arm/plat-omap/dss/dss.h b/arch/arm/plat-omap/dss/dss.h
new file mode 100644
-index 0000000..28929b9
+index 0000000..04abdc6
--- /dev/null
+++ b/arch/arm/plat-omap/dss/dss.h
-@@ -0,0 +1,254 @@
+@@ -0,0 +1,268 @@
+/*
+ * linux/arch/arm/plat-omap/dss/dss.h
+ *
+ OMAP_DSS_PARALLELMODE_DSI,
+};
+
++enum dss_clock {
++ DSS_CLK_ICK = 1 << 0,
++ DSS_CLK_FCK1 = 1 << 1,
++ DSS_CLK_FCK2 = 1 << 2,
++ DSS_CLK_54M = 1 << 3,
++ DSS_CLK_96M = 1 << 4,
++};
++
+struct dispc_clock_info {
+ /* rates that we get with dividers below */
+ unsigned long fck;
+int dss_init(void);
+void dss_exit(void);
+
++void dss_clk_enable(enum dss_clock clks);
++void dss_clk_disable(enum dss_clock clks);
++
+void dss_sdi_init(int datapairs);
+void dss_select_clk_source(int dsi, int dispc);
+int dss_get_dsi_clk_source(void);
+int dss_get_dispc_clk_source(void);
+void dss_set_venc_output(enum omap_dss_venc_type type);
+void dss_set_dac_pwrdn_bgz(int enable);
-+
-+struct clk *get_dss_ick(void);
-+struct clk *get_dss1_fck(void);
-+struct clk *get_dss2_fck(void);
-+struct clk *get_tv_fck(void);
-+struct clk *get_96m_fck(void);
++unsigned long dss_clk_get_rate(enum dss_clock clk);
++ssize_t dss_print_clocks(char *buf, ssize_t size);
+
+/* SDI */
+int sdi_init(void);
+/* DSI */
+int dsi_init(void);
+void dsi_exit(void);
++
++void dsi_save_context(void);
++void dsi_restore_context(void);
++
+void dsi_init_display(struct omap_display *display);
+void dsi_irq_handler(void);
+unsigned long dsi_get_dsi1_pll_rate(void);
+void dispc_irq_handler(void);
+void dispc_fake_vsync_irq(void);
+
++void dispc_save_context(void);
++void dispc_restore_context(void);
++
+void dispc_lcd_enable_signal_polarity(int act_high);
+void dispc_lcd_enable_signal(int enable);
+void dispc_pck_free_enable(int enable);
+#endif
diff --git a/arch/arm/plat-omap/dss/rfbi.c b/arch/arm/plat-omap/dss/rfbi.c
new file mode 100644
-index 0000000..31ddd24
+index 0000000..eaf6e2c
--- /dev/null
+++ b/arch/arm/plat-omap/dss/rfbi.c
-@@ -0,0 +1,1234 @@
+@@ -0,0 +1,1225 @@
+/*
+ * linux/arch/arm/plat-omap/dss/rfbi.c
+ *
+static struct {
+ void __iomem *base;
+
-+ struct clk *dss_ick;
-+ struct clk *dss1_fck;
-+
+ unsigned long l4_khz;
+
+ enum omap_rfbi_datatype datatype;
+
+static void rfbi_enable_clocks(int enable)
+{
-+ if (enable) {
-+ clk_enable(rfbi.dss_ick);
-+ clk_enable(rfbi.dss1_fck);
-+ } else {
-+ clk_disable(rfbi.dss1_fck);
-+ clk_disable(rfbi.dss_ick);
-+ }
++ if (enable)
++ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
++ else
++ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+void omap_rfbi_write_command(const void *buf, u32 len)
+ };
+
+ l4_rate = rfbi.l4_khz / 1000;
-+ dss1_rate = clk_get_rate(rfbi.dss1_fck) / 1000000;
++ dss1_rate = dss_clk_get_rate(DSS_CLK_FCK1) / 1000000;
+
+ for (i = 0; i < ARRAY_SIZE(ftab); i++) {
+ /* Use a window instead of an exact match, to account
+ return -ENOMEM;
+ }
+
-+ rfbi.dss_ick = get_dss_ick();
-+ rfbi.dss1_fck = get_dss1_fck();
-+
+ rfbi_enable_clocks(1);
+
+ msleep(10);
+
-+ rfbi.l4_khz = clk_get_rate(rfbi.dss_ick) / 1000;
++ rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
+
+ /* Enable autoidle and smart-idle */
+ l = rfbi_read_reg(RFBI_SYSCONFIG);
+}
diff --git a/arch/arm/plat-omap/dss/sdi.c b/arch/arm/plat-omap/dss/sdi.c
new file mode 100644
-index 0000000..de19d47
+index 0000000..8d5b16d
--- /dev/null
+++ b/arch/arm/plat-omap/dss/sdi.c
-@@ -0,0 +1,157 @@
+@@ -0,0 +1,150 @@
+/*
+ * linux/arch/arm/plat-omap/dss/sdi.c
+ *
+
+
+static struct {
-+ struct clk *dss_ick;
-+ struct clk *dss1_fck;
+ int update_enabled;
+} sdi;
+
+
+ panel->enable(display);
+
-+ clk_enable(sdi.dss_ick);
-+ clk_enable(sdi.dss1_fck);
++ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
+ dispc_set_lcd_size(display->x_res, display->y_res);
+ display->panel->disable(display);
+ dispc_enable_lcd_out(0);
+
-+ clk_disable(sdi.dss_ick);
-+ clk_disable(sdi.dss1_fck);
++ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+static void sdi_display_set_mode(struct omap_display *display,
+
+int sdi_init(void)
+{
-+ sdi.dss_ick = get_dss_ick();
-+ sdi.dss1_fck = get_dss1_fck();
-+
+ return 0;
+}
+
+}
diff --git a/arch/arm/plat-omap/dss/venc.c b/arch/arm/plat-omap/dss/venc.c
new file mode 100644
-index 0000000..2ed68b5
+index 0000000..7afb2c5
--- /dev/null
+++ b/arch/arm/plat-omap/dss/venc.c
-@@ -0,0 +1,515 @@
+@@ -0,0 +1,501 @@
+/*
+ * linux/arch/arm/plat-omap/dss/venc.c
+ *
+
+static struct {
+ void __iomem *base;
-+ struct clk *dss_54m_fck;
-+ struct clk *dss_96m_fck;
-+ struct clk *dss_ick;
-+ struct clk *dss1_fck;
+ const struct venc_config *config;
+ struct mutex venc_lock;
+} venc;
+
+static void venc_enable_clocks(int enable)
+{
-+ if (enable) {
-+ clk_enable(venc.dss_ick);
-+ clk_enable(venc.dss1_fck);
-+ clk_enable(venc.dss_54m_fck);
-+ clk_enable(venc.dss_96m_fck);
-+ } else {
-+ clk_disable(venc.dss_96m_fck);
-+ clk_disable(venc.dss_54m_fck);
-+ clk_disable(venc.dss1_fck);
-+ clk_disable(venc.dss_ick);
-+ }
++ if (enable)
++ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
++ DSS_CLK_96M);
++ else
++ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
++ DSS_CLK_96M);
+}
+
+int venc_init(void)
+ return -ENOMEM;
+ }
+
-+ venc.dss_ick = get_dss_ick();
-+ venc.dss1_fck = get_dss1_fck();
-+ venc.dss_54m_fck = get_tv_fck();
-+ venc.dss_96m_fck = get_96m_fck();
-+
+ /* enable clocks */
+ venc_enable_clocks(1);
+
+}
diff --git a/arch/arm/plat-omap/include/mach/display.h b/arch/arm/plat-omap/include/mach/display.h
new file mode 100644
-index 0000000..2e55fae
+index 0000000..95f5b3a
--- /dev/null
+++ b/arch/arm/plat-omap/include/mach/display.h
-@@ -0,0 +1,458 @@
+@@ -0,0 +1,463 @@
+/*
+ * linux/include/asm-arm/arch-omap/display.h
+ *
+ void (*panel_disable)(struct omap_display *display);
+ int (*ctrl_enable)(struct omap_display *display);
+ void (*ctrl_disable)(struct omap_display *display);
++ int (*set_backlight)(struct omap_display *display,
++ int level);
+};
+
+/* Board specific data */
+
+ int (*enable_te)(struct omap_display *display, int enable);
+
++ int (*rotate)(struct omap_display *display, int rotate);
++ int (*mirror)(struct omap_display *display, int enable);
++
+ int (*run_test)(struct omap_display *display, int test);
+
+ int bpp;
-From 36ac3fa1184b392dc54024de6d98e4355f2baba8 Mon Sep 17 00:00:00 2001
+From ecdfbac90a49f517c7d5132e44522b089123b413 Mon Sep 17 00:00:00 2001
From: Tomi Valkeinen <tomi.valkeinen@nokia.com>
Date: Tue, 4 Nov 2008 15:12:21 +0200
Subject: [PATCH] DSS: OMAPFB: fb driver for new display subsystem
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@nokia.com>
---
- arch/arm/plat-omap/fb.c | 9 +-
- arch/arm/plat-omap/include/mach/omapfb.h | 7 +
+ arch/arm/plat-omap/Makefile | 2 +-
+ arch/arm/plat-omap/fb-vram.c | 498 +++++++++++
+ arch/arm/plat-omap/fb.c | 33 +-
+ arch/arm/plat-omap/include/mach/omapfb.h | 14 +
drivers/video/Kconfig | 1 +
drivers/video/Makefile | 1 +
drivers/video/omap/Kconfig | 5 +-
- drivers/video/omap2/Kconfig | 29 +
+ drivers/video/omap2/Kconfig | 38 +
drivers/video/omap2/Makefile | 2 +
- drivers/video/omap2/omapfb-ioctl.c | 428 ++++++++++
- drivers/video/omap2/omapfb-main.c | 1276 ++++++++++++++++++++++++++++++
- drivers/video/omap2/omapfb-sysfs.c | 833 +++++++++++++++++++
- drivers/video/omap2/omapfb.h | 104 +++
- 11 files changed, 2692 insertions(+), 3 deletions(-)
+ drivers/video/omap2/omapfb-ioctl.c | 462 ++++++++++
+ drivers/video/omap2/omapfb-main.c | 1382 ++++++++++++++++++++++++++++++
+ drivers/video/omap2/omapfb-sysfs.c | 838 ++++++++++++++++++
+ drivers/video/omap2/omapfb.h | 109 +++
+ 13 files changed, 3377 insertions(+), 8 deletions(-)
+ create mode 100644 arch/arm/plat-omap/fb-vram.c
create mode 100644 drivers/video/omap2/Kconfig
create mode 100644 drivers/video/omap2/Makefile
create mode 100644 drivers/video/omap2/omapfb-ioctl.c
create mode 100644 drivers/video/omap2/omapfb-sysfs.c
create mode 100644 drivers/video/omap2/omapfb.h
+diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
+index 2740497..7d602a6 100644
+--- a/arch/arm/plat-omap/Makefile
++++ b/arch/arm/plat-omap/Makefile
+@@ -4,7 +4,7 @@
+
+ # Common support
+ obj-y := common.o sram.o clock.o devices.o dma.o mux.o gpio.o \
+- usb.o fb.o io.o
++ usb.o fb.o fb-vram.o io.o
+ obj-m :=
+ obj-n :=
+ obj- :=
+diff --git a/arch/arm/plat-omap/fb-vram.c b/arch/arm/plat-omap/fb-vram.c
+new file mode 100644
+index 0000000..de24503
+--- /dev/null
++++ b/arch/arm/plat-omap/fb-vram.c
+@@ -0,0 +1,498 @@
++/*
++ * linux/arch/arm/plat-omap/fb-vram.c
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
++ *
++ * Some code and ideas taken from drivers/video/omap/ driver
++ * by Imre Deak.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++//#define DEBUG
++
++#include <linux/vmalloc.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/list.h>
++#include <linux/dma-mapping.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#include <mach/omapfb.h>
++
++#ifdef DEBUG
++#define DBG(format, ...) printk(KERN_DEBUG "VRAM: " format, ## __VA_ARGS__)
++#else
++#define DBG(format, ...)
++#endif
++
++#define OMAP2_SRAM_START 0x40200000
++/* Maximum size, in reality this is smaller if SRAM is partially locked. */
++#define OMAP2_SRAM_SIZE 0xa0000 /* 640k */
++
++#define REG_MAP_SIZE(_page_cnt) \
++ ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
++#define REG_MAP_PTR(_rg, _page_nr) \
++ (((_rg)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
++#define REG_MAP_MASK(_page_nr) \
++ (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
++
++#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE) \
++ || defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
++
++/* postponed regions are used to temporarily store region information at boot
++ * time when we cannot yet allocate the region list */
++#define MAX_POSTPONED_REGIONS 10
++
++static int postponed_cnt __initdata;
++static struct {
++ unsigned long paddr;
++ size_t size;
++} postponed_regions[MAX_POSTPONED_REGIONS] __initdata;
++
++struct vram_alloc {
++ struct list_head list;
++ unsigned long paddr;
++ unsigned pages;
++};
++
++struct vram_region {
++ struct list_head list;
++ struct list_head alloc_list;
++ unsigned long paddr;
++ void *vaddr;
++ unsigned pages;
++ unsigned dma_alloced:1;
++};
++
++static DEFINE_MUTEX(region_mutex);
++static LIST_HEAD(region_list);
++
++static inline int region_mem_type(unsigned long paddr)
++{
++ if (paddr >= OMAP2_SRAM_START &&
++ paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
++ return OMAPFB_MEMTYPE_SRAM;
++ else
++ return OMAPFB_MEMTYPE_SDRAM;
++}
++
++static struct vram_region *omap_vram_create_region(unsigned long paddr,
++ void *vaddr, unsigned pages)
++{
++ struct vram_region *rm;
++
++ rm = kzalloc(sizeof(*rm), GFP_KERNEL);
++
++ if (rm) {
++ INIT_LIST_HEAD(&rm->alloc_list);
++ rm->paddr = paddr;
++ rm->vaddr = vaddr;
++ rm->pages = pages;
++ }
++
++ return rm;
++}
++
++static void omap_vram_free_region(struct vram_region *vr)
++{
++ list_del(&vr->list);
++ kfree(vr);
++}
++
++static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
++ unsigned long paddr, unsigned pages)
++{
++ struct vram_alloc *va;
++ struct vram_alloc *new;
++
++ new = kzalloc(sizeof(*va), GFP_KERNEL);
++
++ if (!new)
++ return NULL;
++
++ new->paddr = paddr;
++ new->pages = pages;
++
++ list_for_each_entry(va, &vr->alloc_list, list) {
++ if (va->paddr > new->paddr)
++ break;
++ }
++
++ list_add_tail(&new->list, &va->list);
++
++ return new;
++}
++
++static void omap_vram_free_allocation(struct vram_alloc *va)
++{
++ list_del(&va->list);
++ kfree(va);
++}
++
++__init int omap_vram_add_region_postponed(unsigned long paddr, size_t size)
++{
++ if (postponed_cnt == MAX_POSTPONED_REGIONS)
++ return -ENOMEM;
++
++ postponed_regions[postponed_cnt].paddr = paddr;
++ postponed_regions[postponed_cnt].size = size;
++
++ ++postponed_cnt;
++
++ return 0;
++}
++
++/* add/remove_region can be exported if there's need to add/remove regions
++ * runtime */
++static int omap_vram_add_region(unsigned long paddr, size_t size)
++{
++ struct vram_region *rm;
++ void *vaddr;
++ unsigned pages;
++
++ DBG("adding region paddr %08lx size %d\n",
++ paddr, size);
++
++ size &= PAGE_MASK;
++ pages = size >> PAGE_SHIFT;
++
++ vaddr = ioremap_wc(paddr, size);
++ if (vaddr == NULL)
++ return -ENOMEM;
++
++ rm = omap_vram_create_region(paddr, vaddr, pages);
++ if (rm == NULL) {
++ iounmap(vaddr);
++ return -ENOMEM;
++ }
++
++ list_add(&rm->list, ®ion_list);
++
++ return 0;
++}
++
++#if 0
++int omap_vram_remove_region(unsigned long paddr)
++{
++ struct region *rm;
++ unsigned i;
++
++ DBG("remove region paddr %08lx\n", paddr);
++ list_for_each_entry(rm, ®ion_list, list)
++ if (rm->paddr != paddr)
++ continue;
++
++ if (rm->paddr != paddr)
++ return -EINVAL;
++
++ for (i = 0; i < rm->page_cnt; i++)
++ if (region_page_reserved(rm, i))
++ return -EBUSY;
++
++ iounmap(rm->vaddr);
++
++ list_del(&rm->list);
++
++ kfree(rm);
++
++ return 0;
++}
++#endif
++
++int omap_vram_free(unsigned long paddr, void *vaddr, size_t size)
++{
++ struct vram_region *rm;
++ struct vram_alloc *alloc;
++ unsigned start, end;
++
++ DBG("free mem paddr %08lx vaddr %p size %d\n",
++ paddr, vaddr, size);
++
++ size = PAGE_ALIGN(size);
++
++ mutex_lock(®ion_mutex);
++
++ list_for_each_entry(rm, ®ion_list, list) {
++ list_for_each_entry(alloc, &rm->alloc_list, list) {
++ start = alloc->paddr;
++ end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
++
++ if (start >= paddr && end < paddr + size)
++ goto found;
++ }
++ }
++
++ mutex_unlock(®ion_mutex);
++ return -EINVAL;
++
++found:
++ if (rm->dma_alloced) {
++ DBG("freeing dma-alloced\n");
++ dma_free_writecombine(NULL, size, vaddr, paddr);
++ omap_vram_free_allocation(alloc);
++ omap_vram_free_region(rm);
++ } else {
++ omap_vram_free_allocation(alloc);
++ }
++
++ mutex_unlock(®ion_mutex);
++ return 0;
++}
++EXPORT_SYMBOL(omap_vram_free);
++
++#if 0
++void *omap_vram_reserve(unsigned long paddr, size_t size)
++{
++
++ struct region *rm;
++ unsigned start_page;
++ unsigned end_page;
++ unsigned i;
++ void *vaddr;
++
++ size = PAGE_ALIGN(size);
++
++ rm = region_find_region(paddr, size);
++
++ DBG("reserve mem paddr %08lx size %d\n",
++ paddr, size);
++
++ BUG_ON(rm == NULL);
++
++ start_page = (paddr - rm->paddr) >> PAGE_SHIFT;
++ end_page = start_page + (size >> PAGE_SHIFT);
++ for (i = start_page; i < end_page; i++)
++ region_reserve_page(rm, i);
++
++ vaddr = rm->vaddr + (start_page << PAGE_SHIFT);
++
++ return vaddr;
++}
++EXPORT_SYMBOL(omap_vram_reserve);
++#endif
++static void *_omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr)
++{
++ struct vram_region *rm;
++ struct vram_alloc *alloc;
++ void *vaddr;
++
++ list_for_each_entry(rm, ®ion_list, list) {
++ unsigned long start, end;
++
++ DBG("checking region %lx %d\n", rm->paddr, rm->pages);
++
++ if (region_mem_type(rm->paddr) != mtype)
++ continue;
++
++ start = rm->paddr;
++
++ list_for_each_entry(alloc, &rm->alloc_list, list) {
++ end = alloc->paddr;
++
++ if (end - start >= pages << PAGE_SHIFT)
++ goto found;
++
++ start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
++ }
++
++ end = rm->paddr + (rm->pages << PAGE_SHIFT);
++found:
++ if (end - start < pages << PAGE_SHIFT)
++ continue;
++
++ DBG("FOUND %lx, end %lx\n", start, end);
++
++ if (omap_vram_create_allocation(rm, start, pages) == NULL)
++ return NULL;
++
++ *paddr = start;
++ vaddr = rm->vaddr + (start - rm->paddr);
++
++ return vaddr;
++ }
++
++ return NULL;
++}
++
++static void *_omap_vram_alloc_dma(unsigned pages, unsigned long *paddr)
++{
++ struct vram_region *rm;
++ void *vaddr;
++
++ vaddr = dma_alloc_writecombine(NULL, pages << PAGE_SHIFT,
++ (dma_addr_t *)paddr, GFP_KERNEL);
++
++ if (vaddr == NULL)
++ return NULL;
++
++ rm = omap_vram_create_region(*paddr, vaddr, pages);
++ if (rm == NULL) {
++ dma_free_writecombine(NULL, pages << PAGE_SHIFT, vaddr,
++ (dma_addr_t)*paddr);
++ return NULL;
++ }
++
++ rm->dma_alloced = 1;
++
++ if (omap_vram_create_allocation(rm, *paddr, pages) == NULL) {
++ dma_free_writecombine(NULL, pages << PAGE_SHIFT, vaddr,
++ (dma_addr_t)*paddr);
++ kfree(rm);
++ return NULL;
++ }
++
++ list_add(&rm->list, ®ion_list);
++
++ return vaddr;
++}
++
++void *omap_vram_alloc(int mtype, size_t size, unsigned long *paddr)
++{
++ void *vaddr;
++ unsigned pages;
++
++ BUG_ON(mtype > OMAPFB_MEMTYPE_MAX || !size);
++
++ DBG("alloc mem type %d size %d\n", mtype, size);
++
++ size = PAGE_ALIGN(size);
++ pages = size >> PAGE_SHIFT;
++
++ mutex_lock(®ion_mutex);
++
++ vaddr = _omap_vram_alloc(mtype, pages, paddr);
++
++ if (vaddr == NULL && mtype == OMAPFB_MEMTYPE_SDRAM) {
++ DBG("fallback to dma_alloc\n");
++
++ vaddr = _omap_vram_alloc_dma(pages, paddr);
++ }
++
++ mutex_unlock(®ion_mutex);
++
++ return vaddr;
++}
++EXPORT_SYMBOL(omap_vram_alloc);
++
++#ifdef CONFIG_PROC_FS
++static void *r_next(struct seq_file *m, void *v, loff_t *pos)
++{
++ struct list_head *l = v;
++
++ (*pos)++;
++
++ if (list_is_last(l, ®ion_list))
++ return 0;
++
++ return l->next;
++}
++
++static void *r_start(struct seq_file *m, loff_t *pos)
++{
++ loff_t p = *pos;
++ struct list_head *l = ®ion_list;
++
++ mutex_lock(®ion_mutex);
++
++ do {
++ l = l->next;
++ if (l == ®ion_list)
++ return NULL;
++ } while (p--);
++
++ return l;
++}
++
++static void r_stop(struct seq_file *m, void *v)
++{
++ mutex_unlock(®ion_mutex);
++}
++
++static int r_show(struct seq_file *m, void *v)
++{
++ struct vram_region *vr;
++ struct vram_alloc *va;
++ unsigned size;
++
++ vr = list_entry(v, struct vram_region, list);
++
++ size = vr->pages << PAGE_SHIFT;
++ seq_printf(m, "%08lx-%08lx v:%p-%p (%d bytes) %s\n",
++ vr->paddr, vr->paddr + size,
++ vr->vaddr, vr->vaddr + size,
++ size,
++ vr->dma_alloced ? "dma_alloc" : "");
++
++ list_for_each_entry(va, &vr->alloc_list, list) {
++ size = va->pages << PAGE_SHIFT;
++ seq_printf(m, " %08lx-%08lx (%d bytes)\n",
++ va->paddr, va->paddr + size,
++ size);
++ }
++
++
++
++ return 0;
++}
++
++static const struct seq_operations resource_op = {
++ .start = r_start,
++ .next = r_next,
++ .stop = r_stop,
++ .show = r_show,
++};
++
++static int vram_open(struct inode *inode, struct file *file)
++{
++ return seq_open(file, &resource_op);
++}
++
++static const struct file_operations proc_vram_operations = {
++ .open = vram_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++
++static int __init omap_vram_create_proc(void)
++{
++ proc_create("omap-vram", 0, NULL, &proc_vram_operations);
++
++ return 0;
++}
++#endif
++
++static __init int omap_vram_init(void)
++{
++ int i, r;
++
++ for (i = 0; i < postponed_cnt; i++)
++ omap_vram_add_region(postponed_regions[i].paddr,
++ postponed_regions[i].size);
++
++#ifdef CONFIG_PROC_FS
++ r = omap_vram_create_proc();
++ if (r)
++ return -ENOMEM;
++#endif
++
++ return 0;
++}
++
++arch_initcall(omap_vram_init);
++
++#endif
++
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
-index 3746222..0ba1603 100644
+index 3746222..da528d0 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
-@@ -36,7 +36,8 @@
+@@ -36,7 +36,11 @@
#include <mach/sram.h>
#include <mach/omapfb.h>
-#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
+#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE) \
+ || defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
++
++static int omapfb_vram_count;
++static struct omap_fbmem_config *omapfb_vram_config;
static struct omapfb_platform_data omapfb_config;
static int config_invalid;
-@@ -298,14 +299,18 @@ unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
+@@ -95,11 +99,11 @@ static int get_fbmem_region(int region_idx, struct omapfb_mem_region *rg)
+ const struct omap_fbmem_config *conf;
+ u32 paddr;
+
+- conf = omap_get_nr_config(OMAP_TAG_FBMEM,
+- struct omap_fbmem_config, region_idx);
+- if (conf == NULL)
++ if (region_idx >= omapfb_vram_count)
+ return -ENOENT;
+
++ conf = &omapfb_vram_config[region_idx];
++
+ paddr = conf->start;
+ /*
+ * Low bits encode the page allocation mode, if high bits
+@@ -209,6 +213,13 @@ void __init omapfb_reserve_sdram(void)
+ if (rg.paddr) {
+ reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT);
+ reserved += rg.size;
++ omap_vram_add_region_postponed(rg.paddr, rg.size);
++ } else {
++ void *vaddr;
++ vaddr = alloc_bootmem(rg.size);
++ rg.paddr = virt_to_phys(vaddr);
++ reserved += rg.size;
++ omap_vram_add_region_postponed(rg.paddr, rg.size);
+ }
+ omapfb_config.mem_desc.region[i] = rg;
+ configured_regions++;
+@@ -229,7 +240,7 @@ void __init omapfb_reserve_sdram(void)
+ * this point, since the driver built as a module would have problem with
+ * freeing / reallocating the regions.
+ */
+-unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
++unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
+ unsigned long sram_vstart,
+ unsigned long sram_size,
+ unsigned long pstart_avail,
+@@ -298,14 +309,24 @@ unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
return reserved;
}
++void __init omapfb_set_vram_config(struct omap_fbmem_config *config, int count)
++{
++ omapfb_vram_count = count;
++ omapfb_vram_config = config;
++}
++
+#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
void omapfb_set_ctrl_platform_data(void *data)
{
if (config_invalid)
return 0;
-@@ -313,6 +318,7 @@ static inline int omap_init_fb(void)
+@@ -313,6 +334,7 @@ static inline int omap_init_fb(void)
printk(KERN_ERR "Invalid FB mem configuration entries\n");
return 0;
}
conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config);
if (conf == NULL) {
if (configured_regions)
-@@ -321,6 +327,7 @@ static inline int omap_init_fb(void)
+@@ -321,6 +343,7 @@ static inline int omap_init_fb(void)
return 0;
}
omapfb_config.lcd = *conf;
return platform_device_register(&omap_fb_device);
}
diff --git a/arch/arm/plat-omap/include/mach/omapfb.h b/arch/arm/plat-omap/include/mach/omapfb.h
-index 90d63c5..1e34304 100644
+index 90d63c5..277e3cf 100644
--- a/arch/arm/plat-omap/include/mach/omapfb.h
+++ b/arch/arm/plat-omap/include/mach/omapfb.h
@@ -90,6 +90,13 @@ enum omapfb_color_format {
};
struct omapfb_update_window {
+@@ -392,6 +399,13 @@ extern int omapfb_update_window_async(struct fb_info *fbi,
+
+ /* in arch/arm/plat-omap/fb.c */
+ extern void omapfb_set_ctrl_platform_data(void *pdata);
++extern void omapfb_set_vram_config(struct omap_fbmem_config *config, int count);
++
++/* in arch/arm/plat-omap/fb-vram */
++__init int omap_vram_add_region_postponed(unsigned long paddr, size_t size);
++int omap_vram_free(unsigned long paddr, void *vaddr, size_t size);
++void *omap_vram_reserve(unsigned long paddr, size_t size);
++void *omap_vram_alloc(int mtype, size_t size, unsigned long *paddr);
+
+ #endif /* __KERNEL__ */
+
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 3f3ce13..689a3b1 100644
--- a/drivers/video/Kconfig
help
diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig
new file mode 100644
-index 0000000..4b72479
+index 0000000..bfa1617
--- /dev/null
+++ b/drivers/video/omap2/Kconfig
-@@ -0,0 +1,29 @@
+@@ -0,0 +1,38 @@
+config FB_OMAP2
+ tristate "OMAP2/3 frame buffer support (EXPERIMENTAL)"
+ depends on FB && OMAP2_DSS
+ displays that support manual update are started in manual
+ update mode.
+
++config FB_OMAP2_NUM_FBS
++ int "Number of framebuffers"
++ range 1 10
++ default 3
++ depends on FB_OMAP2
++ help
++ Select the number of framebuffers created. OMAP2/3 has 3 overlays
++ so normally this would be 3.
++
+menu "OMAP2/3 Display Device Drivers"
+ depends on OMAP2_DSS
+
+omapfb-y := omapfb-main.o omapfb-sysfs.o omapfb-ioctl.o
diff --git a/drivers/video/omap2/omapfb-ioctl.c b/drivers/video/omap2/omapfb-ioctl.c
new file mode 100644
-index 0000000..1ceb6b9
+index 0000000..6bf750f
--- /dev/null
+++ b/drivers/video/omap2/omapfb-ioctl.c
-@@ -0,0 +1,428 @@
+@@ -0,0 +1,462 @@
+/*
+ * linux/drivers/video/omap2/omapfb-ioctl.c
+ *
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
++#include <linux/mm.h>
+
+#include <mach/display.h>
+#include <mach/omapfb.h>
+
+ DBG("omapfb_setup_plane\n");
+
++ omapfb_lock(fbdev);
++
+ if (ofbi->num_overlays != 1) {
+ r = -EINVAL;
+ goto out;
+ }
+
++ /* XXX uses only the first overlay */
+ ovl = ofbi->overlays[0];
+
-+ omapfb_lock(fbdev);
-+
-+ if (display) {
-+ if (pi->pos_x + pi->out_width > display->x_res ||
-+ pi->pos_y + pi->out_height > display->y_res) {
-+ r = -EINVAL;
-+ goto out;
-+ }
-+ }
-+
+ if (pi->enabled && !ofbi->region.size) {
+ /*
+ * This plane's memory was freed, can't enable it
+ goto out;
+ }
+
-+ if (!ovl) {
-+ r = -EINVAL;
-+ goto out;
++ if (pi->enabled) {
++ r = omapfb_setup_overlay(fbi, ovl, pi->pos_x, pi->pos_y,
++ pi->out_width, pi->out_height);
++ if (r)
++ goto out;
+ }
+
+ r = omapfb_setup_overlay(fbi, ovl, pi->pos_x, pi->pos_y,
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb_mem_region *rg;
-+ int ret = -EINVAL;
++ struct omap_display *display = fb2display(fbi);
++ int r, i;
++ size_t size;
++
++ if (mi->type > OMAPFB_MEMTYPE_MAX)
++ return -EINVAL;
++
++ size = PAGE_ALIGN(mi->size);
+
+ rg = &ofbi->region;
+
+ omapfb_lock(fbdev);
-+ if (mi->size > rg->size) {
-+ ret = -ENOMEM;
-+ goto out;
++
++ for (i = 0; i < ofbi->num_overlays; i++) {
++ if (ofbi->overlays[i]->info.enabled) {
++ r = -EBUSY;
++ goto out;
++ }
+ }
+
-+ if (mi->type != rg->type)
-+ goto out;
++ if (rg->size != size || rg->type != mi->type) {
++ struct fb_var_screeninfo new_var;
++ unsigned long old_size = rg->size;
+
-+ ret = 0;
++ if (display->sync)
++ display->sync(display);
++
++ r = omapfb_realloc_fbmem(fbdev, ofbi->id, size);
++ if (r)
++ goto out;
++
++ if (old_size != size) {
++ if (size) {
++ memcpy(&new_var, &fbi->var, sizeof(new_var));
++ r = check_fb_var(fbi, &new_var);
++ if (r < 0)
++ goto out;
++ memcpy(&fbi->var, &new_var, sizeof(fbi->var));
++ set_fb_fix(fbi);
++ } else {
++ /*
++ * Set these explicitly to indicate that the
++ * plane memory is dealloce'd, the other
++ * screen parameters in var / fix are invalid.
++ */
++ fbi->fix.smem_start = 0;
++ fbi->fix.smem_len = 0;
++ }
++ }
++ }
++
++ r = 0;
+out:
+ omapfb_unlock(fbdev);
+
-+ return ret;
++ return r;
+}
+
+static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+
diff --git a/drivers/video/omap2/omapfb-main.c b/drivers/video/omap2/omapfb-main.c
new file mode 100644
-index 0000000..c0f1664
+index 0000000..89ad631
--- /dev/null
+++ b/drivers/video/omap2/omapfb-main.c
-@@ -0,0 +1,1276 @@
+@@ -0,0 +1,1382 @@
+/*
+ * linux/drivers/video/omap2/omapfb-main.c
+ *
+ if (var->bits_per_pixel == 16) {
+ u16 *pw = (u16 *)p;
+
-+ if (x == 20 || x == w - 20 ||
++ if (x < 20 && y < 20)
++ *pw = 0xffff;
++ else if (x == 20 || x == w - 20 ||
+ y == 20 || y == h - 20)
+ *pw = 0xffff;
+ else if (x == y || w - x == h - y)
+
+ int r = 0, g = 0, b = 0;
+
-+ if (x == 20 || x == w - 20 ||
++ if (x < 20 && y < 20)
++ r = g = b = 0xff;
++ else if (x == 20 || x == w - 20 ||
+ y == 20 || y == h - 20)
+ r = g = b = 0xff;
+ else if (x == y || w - x == h - y)
+ return -EINVAL;
+}
+
-+static void set_fb_fix(struct fb_info *fbi)
++void set_fb_fix(struct fb_info *fbi)
+{
+ struct fb_fix_screeninfo *fix = &fbi->fix;
+ struct fb_var_screeninfo *var = &fbi->var;
+}
+
+/* check new var and possibly modify it to be ok */
-+static int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
++int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omap_display *display = fb2display(fbi);
+
+ DBG("check_fb_var %d\n", ofbi->id);
+
++ if (ofbi->region.size == 0) {
++ memset(var, 0, sizeof(*var));
++ return 0;
++ }
++
+ if (ofbi->num_overlays == 0) {
+ dev_err(ofbi->fbdev->dev, "no overlays, aborting\n");
+ return -EINVAL;
+
+ if (display && display->check_timings) {
+ struct omap_video_timings timings;
++
++ if (var->pixclock == 0) {
++ DBG("Pixclock can't be zero.\n");
++ return -EINVAL;
++ }
++
+ timings.pixel_clock = PICOS2KHZ(var->pixclock);
+ timings.hfp = var->left_margin;
+ timings.hbp = var->right_margin;
+
+ DBG("apply_changes, fb %d, ovl %d\n", ofbi->id, ovl->id);
+
++ if (ofbi->region.size == 0) {
++ /* the fb is not available. disable the overlay */
++ ovl->enable(ovl, 0);
++ if (!init && ovl->manager)
++ ovl->manager->apply(ovl->manager);
++ continue;
++ }
++
+ if (init || (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
+ outw = var->xres;
+ outh = var->yres;
+ return r;
+}
+
++static void mmap_user_open(struct vm_area_struct *vma)
++{
++ struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data;
++
++ atomic_inc(&ofbi->map_count);
++}
++
++static void mmap_user_close(struct vm_area_struct *vma)
++{
++ struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data;
++
++ atomic_dec(&ofbi->map_count);
++}
++
++static struct vm_operations_struct mmap_user_ops = {
++ .open = mmap_user_open,
++ .close = mmap_user_close,
++};
++
+static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
-+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb_mem_region *rg = &ofbi->region;
++ unsigned long off;
++ unsigned long start;
++ u32 len;
+
-+ return dma_mmap_writecombine(fbdev->dev, vma,
-+ rg->vaddr,
-+ rg->paddr,
-+ rg->size);
++ if (vma->vm_end - vma->vm_start == 0)
++ return 0;
++ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
++ return -EINVAL;
++ off = vma->vm_pgoff << PAGE_SHIFT;
++
++ start = rg->paddr;
++ len = rg->size;
++ if (off >= len)
++ return -EINVAL;
++ if ((vma->vm_end - vma->vm_start + off) > len)
++ return -EINVAL;
++ off += start;
++ vma->vm_pgoff = off >> PAGE_SHIFT;
++ vma->vm_flags |= VM_IO | VM_RESERVED;
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ vma->vm_ops = &mmap_user_ops;
++ vma->vm_private_data = ofbi;
++ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
++ vma->vm_end - vma->vm_start, vma->vm_page_prot))
++ return -EAGAIN;
++ /* vm_ops.open won't be called for mmap itself. */
++ atomic_inc(&ofbi->map_count);
++ return 0;
+}
+
+/* Store a single color palette entry into a pseudo palette or the hardware
+ omapfb_lock(fbdev);
+
+ switch (blank) {
-+ case VESA_NO_BLANKING:
++ case FB_BLANK_UNBLANK:
+ if (display->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+ r = -EINVAL;
+ goto exit;
+
+ break;
+
-+ case VESA_POWERDOWN:
++ case FB_BLANK_POWERDOWN:
+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ r = -EINVAL;
+ goto exit;
+ .fb_setcmap = omapfb_setcmap,
+};
+
-+static int omapfb_free_fbmem(struct omapfb2_device *fbdev)
++static void omapfb_free_fbmem(struct omapfb2_device *fbdev, int fbnum)
++{
++ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[fbnum]);
++ struct omapfb_mem_region *rg;
++
++ rg = &ofbi->region;
++
++ if (rg->paddr)
++ if (omap_vram_free(rg->paddr, rg->vaddr, rg->size))
++ printk("VRAM FREE failed\n");
++
++ rg->vaddr = NULL;
++ rg->paddr = 0;
++ rg->alloc = 0;
++ rg->size = 0;
++}
++
++static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev)
+{
+ int i;
+
-+ DBG("free fbmem\n");
++ DBG("free all fbmem\n");
+
-+ for (i = 0; i < fbdev->num_fbs; i++) {
-+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
-+ struct omapfb_mem_region *rg;
++ for (i = 0; i < fbdev->num_fbs; i++)
++ omapfb_free_fbmem(fbdev, i);
+
-+ rg = &ofbi->region;
++ return 0;
++}
+
-+ if (rg->alloc) {
-+ dma_free_writecombine(fbdev->dev, rg->size,
-+ rg->vaddr, rg->paddr);
-+ }
++static int omapfb_alloc_fbmem(struct omapfb2_device *fbdev, int fbnum,
++ unsigned long size)
++{
++ struct omapfb_info *ofbi;
++ struct omapfb_mem_region *rg;
++ unsigned long paddr;
++ void *vaddr;
+
-+ rg->vaddr = NULL;
-+ rg->paddr = 0;
-+ rg->alloc = 0;
++ ofbi = FB2OFB(fbdev->fbs[fbnum]);
++ rg = &ofbi->region;
++ memset(rg, 0, sizeof(*rg));
++
++ DBG("allocating %lu bytes for fb %d\n",
++ size, ofbi->id);
++
++ vaddr = omap_vram_alloc(OMAPFB_MEMTYPE_SDRAM, size, &paddr);
++ DBG("VRAM ALLOCCI paddr %lx, vaddr %p\n", paddr, vaddr);
++
++ if (vaddr == NULL) {
++ dev_err(fbdev->dev,
++ "failed to allocate framebuffer\n");
++ return -ENOMEM;
+ }
+
-+ fbdev->num_fbs = 0;
++ rg->paddr = paddr;
++ rg->vaddr = vaddr;
++ rg->size = size;
++ rg->alloc = 1;
+
+ return 0;
+}
+
-+static int omapfb_allocate_fbmem(struct omapfb2_device *fbdev)
++int omapfb_realloc_fbmem(struct omapfb2_device *fbdev, int fbnum,
++ unsigned long size)
+{
-+ int i;
-+ struct omapfb_mem_desc *plat_mem_desc;
-+ struct omapfb_platform_data *pdata = fbdev->dev->platform_data;
++ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[fbnum]);
++ struct omapfb_mem_region *rg = &ofbi->region;
++ unsigned old_size = rg->size;
++ int r;
+
-+ plat_mem_desc = &pdata->mem_desc;
++ omapfb_free_fbmem(fbdev, fbnum);
+
-+ DBG("omapfb: setup mem regions, %d regions\n",
-+ plat_mem_desc->region_cnt);
++ if (size == 0)
++ return 0;
+
-+ for (i = 0; i < plat_mem_desc->region_cnt; i++) {
-+ struct omapfb_mem_region *plat_rg;
-+ struct omapfb_mem_region *rg;
-+ struct omapfb_info *ofb_info = FB2OFB(fbdev->fbs[i]);
++ r = omapfb_alloc_fbmem(fbdev, fbnum, size);
+
-+ plat_rg = &plat_mem_desc->region[i];
-+ rg = &ofb_info->region;
++ if (r)
++ omapfb_alloc_fbmem(fbdev, fbnum, old_size);
+
-+ memset(rg, 0, sizeof(*rg));
++ return r;
++}
+
-+ DBG("platform region%d phys %08x virt %p size=%lu\n",
-+ i,
-+ plat_rg->paddr,
-+ plat_rg->vaddr,
-+ plat_rg->size);
++/* allocate fbmem using display resolution as reference */
++static int omapfb_alloc_fbmem_display(struct omapfb2_device *fbdev, int fbnum)
++{
++ struct omapfb_info *ofbi;
++ struct omap_display *display;
++ int bytespp;
++ unsigned long size;
+
-+ if (plat_rg->paddr == 0) {
-+ u32 paddr;
-+ void *vaddr;
++ ofbi = FB2OFB(fbdev->fbs[fbnum]);
++ display = fb2display(fbdev->fbs[fbnum]);
+
-+ vaddr = dma_alloc_writecombine(fbdev->dev,
-+ plat_rg->size,
-+ &paddr, GFP_KERNEL);
++ if (!display)
++ return 0;
+
-+ if (vaddr == NULL) {
-+ dev_err(fbdev->dev,
-+ "failed to allocate framebuffer\n");
-+ return -ENOMEM;
-+ }
++ switch (display->bpp) {
++ case 16:
++ bytespp = 2;
++ break;
++ case 24:
++ case 32:
++ bytespp = 4;
++ break;
++ default:
++ bytespp = 4;
++ break;
++ }
+
-+ rg->paddr = paddr;
-+ rg->vaddr = vaddr;
-+ rg->size = plat_rg->size;
-+ rg->alloc = 1;
-+ } else {
-+ dev_err(fbdev->dev,
-+ "Using preallocated fb not supported\n");
-+ return -EINVAL;
-+ }
++ size = display->x_res * display->y_res * bytespp;
++
++ return omapfb_alloc_fbmem(fbdev, fbnum, size);
++}
++
++static int omapfb_allocate_all_fbs(struct omapfb2_device *fbdev)
++{
++ int i, r;
++
++ for (i = 0; i < fbdev->num_fbs; i++) {
++ r = omapfb_alloc_fbmem_display(fbdev, i);
++
++ if (r)
++ return r;
+ }
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
-+ struct omapfb_info *ofb_info = FB2OFB(fbdev->fbs[i]);
++ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
+ struct omapfb_mem_region *rg;
-+ rg = &ofb_info->region;
++ rg = &ofbi->region;
+
+ DBG("region%d phys %08x virt %p size=%lu\n",
+ i,
+ struct omap_display *display = fb2display(fbi);
+ int r = 0;
+
-+ if (!display) {
-+ dev_err(fbdev->dev, "cannot fbinfo_init, no display\n");
-+ return -EINVAL;
-+ }
-+
+ fbi->fbops = &omapfb_ops;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->pseudo_palette = fbdev->pseudo_palette;
+
+ strncpy(fix->id, MODULE_NAME, sizeof(fix->id));
+
-+ var->xres = display->x_res;
-+ var->yres = display->y_res;
-+ var->xres_virtual = var->xres;
-+ var->yres_virtual = var->yres;
-+ /* var->rotate = def_rotate; */
-+
+ var->nonstd = 0;
+
-+ switch (display->bpp) {
-+ case 16:
-+ var->bits_per_pixel = 16;
-+ break;
-+ case 18:
-+ var->bits_per_pixel = 16;
-+ break;
-+ case 24:
-+ var->bits_per_pixel = 32;
-+ break;
-+ default:
-+ dev_err(fbdev->dev, "illegal display bpp\n");
-+ return -EINVAL;
-+ }
++ if (display) {
++ var->xres = display->x_res;
++ var->yres = display->y_res;
++ var->xres_virtual = var->xres;
++ var->yres_virtual = var->yres;
++ /* var->rotate = def_rotate; */
+
-+ if (display->get_timings) {
-+ struct omap_video_timings timings;
-+ display->get_timings(display, &timings);
++ switch (display->bpp) {
++ case 16:
++ var->bits_per_pixel = 16;
++ break;
++ case 18:
++ var->bits_per_pixel = 16;
++ break;
++ case 24:
++ var->bits_per_pixel = 32;
++ break;
++ default:
++ dev_err(fbdev->dev, "illegal display bpp\n");
++ return -EINVAL;
++ }
+
-+ /* pixclock in ps, the rest in pixclock */
-+ var->pixclock = KHZ2PICOS(timings.pixel_clock);
-+ var->left_margin = timings.hfp;
-+ var->right_margin = timings.hbp;
-+ var->upper_margin = timings.vfp;
-+ var->lower_margin = timings.vbp;
-+ var->hsync_len = timings.hsw;
-+ var->vsync_len = timings.vsw;
-+ } else {
-+ var->pixclock = 0;
-+ var->left_margin = 0;
-+ var->right_margin = 0;
-+ var->upper_margin = 0;
-+ var->lower_margin = 0;
-+ var->hsync_len = 0;
-+ var->vsync_len = 0;
++ if (display->get_timings) {
++ struct omap_video_timings timings;
++ display->get_timings(display, &timings);
++
++ /* pixclock in ps, the rest in pixclock */
++ var->pixclock = KHZ2PICOS(timings.pixel_clock);
++ var->left_margin = timings.hfp;
++ var->right_margin = timings.hbp;
++ var->upper_margin = timings.vfp;
++ var->lower_margin = timings.vbp;
++ var->hsync_len = timings.hsw;
++ var->vsync_len = timings.vsw;
++ } else {
++ var->pixclock = 0;
++ var->left_margin = 0;
++ var->right_margin = 0;
++ var->upper_margin = 0;
++ var->lower_margin = 0;
++ var->hsync_len = 0;
++ var->vsync_len = 0;
++ }
+ }
+
+ r = check_fb_var(fbi, var);
+ unregister_framebuffer(fbdev->fbs[i]);
+
+ /* free the reserved fbmem */
-+ omapfb_free_fbmem(fbdev);
++ omapfb_free_all_fbmem(fbdev);
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ fbinfo_cleanup(fbdev, fbdev->fbs[i]);
+ framebuffer_release(fbdev->fbs[i]);
+ }
+
-+
+ for (i = 0; i < fbdev->num_displays; i++) {
+ if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED)
+ fbdev->displays[i]->disable(fbdev->displays[i]);
+
+static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
+{
-+ int r;
-+ int i;
-+ struct omapfb_mem_desc *plat_mem_desc;
-+ struct omapfb_platform_data *pdata = fbdev->dev->platform_data;
-+
-+ plat_mem_desc = &pdata->mem_desc;
++ int r, i;
+
+ fbdev->num_fbs = 0;
+
-+ DBG("create %d framebuffers\n", plat_mem_desc->region_cnt);
++ DBG("create %d framebuffers\n", CONFIG_FB_OMAP2_NUM_FBS);
+
+ /* allocate fb_infos */
-+ for (i = 0; i < plat_mem_desc->region_cnt; i++) {
++ for (i = 0; i < CONFIG_FB_OMAP2_NUM_FBS; i++) {
+ struct fb_info *fbi;
+ struct omapfb_info *ofbi;
+
+
+ ofbi = FB2OFB(fbi);
+ ofbi->fbdev = fbdev;
-+ /* XXX here we presume we have enough overlays */
-+ ofbi->overlays[0] = fbdev->overlays[i];
-+ ofbi->num_overlays = 1;
+ ofbi->id = i;
+ fbdev->num_fbs++;
+ }
+
+ DBG("fb_infos allocated\n");
+
++ /* assign overlays for the fbs */
++ for (i = 0; i < min(fbdev->num_fbs, fbdev->num_overlays); i++) {
++ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
++
++ ofbi->overlays[0] = fbdev->overlays[i];
++ ofbi->num_overlays = 1;
++ }
++
+ /* allocate fb memories */
-+ r = omapfb_allocate_fbmem(fbdev);
++ r = omapfb_allocate_all_fbs(fbdev);
+ if (r) {
+ dev_err(fbdev->dev, "failed to allocate fbmem\n");
+ return r;
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/omap2/omapfb-sysfs.c b/drivers/video/omap2/omapfb-sysfs.c
new file mode 100644
-index 0000000..e01edd1
+index 0000000..59b48ac
--- /dev/null
+++ b/drivers/video/omap2/omapfb-sysfs.c
-@@ -0,0 +1,833 @@
+@@ -0,0 +1,838 @@
+/*
+ * linux/drivers/video/omap2/omapfb-sysfs.c
+ *
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
++ struct omapfb_mem_region *rg;
++
++ rg = &ofbi->region;
+
-+ l += snprintf(buf + l, size - l, "%d t:", ofbi->id);
++ l += snprintf(buf + l, size - l, "%d p:%08x v:%p size:%lu t:",
++ ofbi->id,
++ rg->paddr, rg->vaddr, rg->size);
+
+ if (ofbi->num_overlays == 0)
+ l += snprintf(buf + l, size - l, "none");
+
diff --git a/drivers/video/omap2/omapfb.h b/drivers/video/omap2/omapfb.h
new file mode 100644
-index 0000000..04ca444
+index 0000000..60352da
--- /dev/null
+++ b/drivers/video/omap2/omapfb.h
-@@ -0,0 +1,104 @@
+@@ -0,0 +1,109 @@
+/*
+ * linux/drivers/video/omap2/omapfb.h
+ *
+struct omapfb_info {
+ int id;
+ struct omapfb_mem_region region;
++ atomic_t map_count;
+ int num_overlays;
+ struct omap_overlay *overlays[OMAPFB_MAX_OVL_PER_FB];
+ struct omapfb2_device *fbdev;
+ struct omap_overlay_manager *managers[10];
+};
+
++void set_fb_fix(struct fb_info *fbi);
++int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var);
++int omapfb_realloc_fbmem(struct omapfb2_device *fbdev, int fbnum,
++ unsigned long size);
+int omapfb_apply_changes(struct fb_info *fbi, int init);
+int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
+ int posx, int posy, int outw, int outh);